version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.6 | # -*- coding: utf-8 -*-
"""
@Time : 2021/1/14 下午5:34
@FileName: bert.py
@author: 王炳宁
@contact: [email protected]
"""
import sys
import time
import apex
import torch
import torch.distributed as dist
from apex import amp
sys.path.append('..')
from modules.BERT import Bert
from train.parser import get_argument_parser
from utils import *
np.random.seed(1000)
torch.manual_seed(1024)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args = get_argument_parser()
print(args.local_rank, dist.get_rank(), dist.get_world_size())
torch.cuda.set_device(args.local_rank)
vocab_size = 50000
n_embedding = 128
n_hidden = 768
n_layer = 12
n_head = 12
batch_size = 8
max_learning_rate = 4e-5
doc_max_length_size = 1024
train_data = load_file(args.train_file_path)
dev_data = load_file(args.dev_file_path)
dev_data = sorted(dev_data, key=lambda x: len(x[0]))
remove_data_size = len(dev_data) % dist.get_world_size()
thread_dev_data = [dev_data[x + args.local_rank] for x in
range(0, len(dev_data) - remove_data_size, dist.get_world_size())]
print('train data size is {} test size {}'.format(len(train_data), len(dev_data)))
model = Bert(vocab_size, n_embedding, n_hidden, n_layer, n_head)
filename = args.pretrain_model
state_dict = load_file(filename)
for name, para in model.named_parameters():
if name not in state_dict:
if dist.get_rank() == 0:
print('{} not load'.format(name))
continue
para.data = torch.FloatTensor(state_dict[name])
print('model size {}'.format(get_model_parameters(model)))
model.cuda()
if args.optimizer.lower() == 'adam':
optimizer = apex.optimizers.FusedLAMB
elif args.optimizer.lower() == 'lamb':
optimizer = apex.optimizers.FusedLAMB
else:
optimizer = apex.optimizers.FusedSGD
optim = optimizer(
model.parameters(),
eps=2.0e-7,
lr=1.0e-7,
)
model, optim = amp.initialize(model, optim, opt_level="O2", verbosity=0)
model = apex.parallel.DistributedDataParallel(model)
warm_up_steps = 500
lr_opt_steps = max_learning_rate / 1000000
warm_up_lr_opt_steps = max_learning_rate / warm_up_steps
def metric_sum(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item()
def metric_mean(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item() / dist.get_world_size()
def get_shuffle_train_data():
pool = {}
for one in train_data:
length = len(one[0]) // 5
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
whole_data = [x for y in length_lst for x in pool[y]]
remove_data_size = len(whole_data) % dist.get_world_size()
thread_data = [whole_data[x + args.local_rank] for x in
range(0, len(whole_data) - remove_data_size, dist.get_world_size())]
return thread_data
def get_train_data(batch, max_len=doc_max_length_size):
batch, _ = padding(batch, max_len=max_len)
seq = batch.flatten()
real_end_pos = np.where(seq == -1)[0]
np.put(seq, real_end_pos, vocab_size)
all_end_pos_seq = np.where(seq == vocab_size)[0]
label = np.zeros(shape=len(all_end_pos_seq), dtype='float32')
for i, j in enumerate(all_end_pos_seq):
if j in real_end_pos:
label[i] = 1
batch = seq.reshape(batch.shape)
return batch, label
current_number = 0
update_number = 0
def evaluation(epo):
results = []
for i in range(dist.get_world_size()):
results.extend(load_file('{}.tmp.obj'.format(i)))
os.remove('{}.tmp.obj'.format(i))
print('epoch:{},total:{}'.format(epo, len(results)))
threshold = 0.5
precision, recall, f1, macro_f1, accuracy = evaluate_comqa(results, threshold)
print('threshold:{}\nprecision:{}\nrecall:{}\nf1:{}\nmacro_f1:{}\naccuracy:{}\n{}'.format(
threshold, precision,
recall, f1,
macro_f1, accuracy,
'===' * 10))
return [precision, recall, macro_f1, f1, accuracy]
def dev(epo):
model.eval()
total = len(thread_dev_data)
results = []
with torch.no_grad():
for i in tqdm(range(0, total, batch_size)):
sample = thread_dev_data[i:i + batch_size]
context_raw = [x[0] for x in sample]
paras = [x[1] for x in sample]
batch, label = get_train_data(context_raw, 1024)
batch = torch.LongTensor(batch)
mask_idx = torch.eq(batch, vocab_size)
answer_logits = model([batch.cuda(), None])
end_num = mask_idx.sum(1).data.numpy().tolist()
answer_logits = answer_logits.cpu().data.numpy().tolist()
start = 0
for one_sent_end_num, para in zip(end_num, paras):
pred = answer_logits[start:start + one_sent_end_num]
results.append([pred, para])
start += one_sent_end_num
dump_file(results, '{}.tmp.obj'.format(dist.get_rank()))
dist.barrier()
if dist.get_rank() == 0:
return evaluation(epo)
return None
def train(epo):
global current_number, update_number
model.train()
data = get_shuffle_train_data()
total = len(data)
total_loss = 0
num = 0
pre_time = None
instance_number = 0
for i in range(0, total, batch_size):
context = [x[0] for x in data[i:i + batch_size]]
batch, label = get_train_data(context)
batch = torch.LongTensor(batch)
loss = model([batch.cuda(), torch.FloatTensor(label).cuda()])
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
total_loss += loss.item() * len(context)
instance_number += len(context)
optim.step()
optim.zero_grad()
update_number += 1
for param_group in optim.param_groups:
if update_number > warm_up_steps:
param_group['lr'] -= lr_opt_steps
else:
param_group['lr'] += warm_up_lr_opt_steps
num += 1
if num % args.log_interval == 0:
if pre_time is None:
eclipse = 0
else:
eclipse = time.time() - pre_time
total_loss = metric_sum(total_loss)
instance_number = metric_sum(instance_number)
if dist.get_rank() == 0:
print(
'epoch {}, mask loss is {:5.4f}, ms per batch is {:7.4f}, eclipse {:4.3f}% lr={:e}'.format(epo,
total_loss / instance_number,
1000 * eclipse / instance_number,
i * 100 / total,
optim.param_groups[
0][
'lr']))
pre_time = time.time()
total_loss = 0
instance_number = 0
if __name__ == '__main__':
results = []
best_f1 = 0
for i in range(args.epoch):
train(i)
results = dev(i)
output = {}
if dist.get_rank() == 0:
print('epoch {} done!! result is {}'.format(i, results))
if results[2] > best_f1:
best_f1 = results[2]
for name, param in model.module.named_parameters():
output[name] = param.data.cpu().numpy()
dump_file(output, args.model_save_path)
| [
"torch.distributed.get_world_size",
"torch.eq",
"torch.distributed.init_process_group",
"torch.FloatTensor",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.LongTensor",
"torch.tensor",
"torch.distributed.reduce",
"torch.distributed.get_rank",
"torch.distributed.barrier"
] | 1.6.0 | benywon/ComQA | 6731d63d16b731d6c3654b2dc7d2503cf333127f |
1.1 | import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), ssup=False):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.ssup = ssup
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.softmax = nn.Softmax()
if ssup:
self.fully_connect_rot = nn.Linear(self.ch, 4, bias=False)
self.fully_connect_gan = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.fully_connect_gan = nn.utils.spectral_norm(self.fully_connect_gan)
if ssup:
self.fully_connect_rot = nn.utils.spectral_norm(self.fully_connect_rot)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# GAN logits
# Global average pooling
h = h.sum(2).sum(2)
gan_logits = self.fully_connect_gan(h)
rot_logits, rot_prob = -1, -1
if self.ssup:
rot_logits = self.fully_connect_rot(h)
rot_prob = self.softmax(rot_logits)
return gan_logits, rot_logits, rot_prob
| [
"torch.nn.Linear",
"torch.nn.Softmax",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm"
] | 1.1.0 | sudarshanregmi/ICRGAN-and-SSGAN | c9e7b01d89cba19505e566892a678932717b8039 |
1.8 | from typing import Iterable, Optional, Sequence
import numpy as np
import torch
from torch.distributions import Categorical, Normal
from torch.distributions import kl_divergence as kl
from torch.nn import functional as F
from scvi import _CONSTANTS
from scvi._compat import Literal
from scvi.module.base import LossRecorder, auto_move_data
from scvi.nn import Decoder, Encoder
from ._classifier import Classifier
from ._utils import broadcast_labels
from ._vae import VAE
class SCANVAE(VAE):
"""
Single-cell annotation using variational inference.
This is an implementation of the scANVI model described in [Xu21]_,
inspired from M1 + M2 model, as described in (https://arxiv.org/pdf/1406.5298.pdf).
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
n_continuous_cov
Number of continuous covarites
n_cats_per_cov
Number of categories for each extra categorical covariate
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
gene_likelihood
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
y_prior
If None, initialized to uniform probability over cell types
labels_groups
Label group designations
use_labels_groups
Whether to use the label groups
use_batch_norm
Whether to use batch norm in layers
use_layer_norm
Whether to use layer norm in layers
**vae_kwargs
Keyword args for :class:`~scvi.module.VAE`
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
n_continuous_cov: int = 0,
n_cats_per_cov: Optional[Iterable[int]] = None,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
gene_likelihood: str = "zinb",
y_prior=None,
labels_groups: Sequence[int] = None,
use_labels_groups: bool = False,
classifier_parameters: dict = dict(),
use_batch_norm: Literal["encoder", "decoder", "none", "both"] = "both",
use_layer_norm: Literal["encoder", "decoder", "none", "both"] = "none",
**vae_kwargs
):
super().__init__(
n_input,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
n_continuous_cov=n_continuous_cov,
n_cats_per_cov=n_cats_per_cov,
dropout_rate=dropout_rate,
n_batch=n_batch,
dispersion=dispersion,
log_variational=log_variational,
gene_likelihood=gene_likelihood,
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
**vae_kwargs
)
use_batch_norm_encoder = use_batch_norm == "encoder" or use_batch_norm == "both"
use_batch_norm_decoder = use_batch_norm == "decoder" or use_batch_norm == "both"
use_layer_norm_encoder = use_layer_norm == "encoder" or use_layer_norm == "both"
use_layer_norm_decoder = use_layer_norm == "decoder" or use_layer_norm == "both"
self.n_labels = n_labels
# Classifier takes n_latent as input
cls_parameters = {
"n_layers": n_layers,
"n_hidden": n_hidden,
"dropout_rate": dropout_rate,
}
cls_parameters.update(classifier_parameters)
self.classifier = Classifier(
n_latent,
n_labels=n_labels,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
**cls_parameters
)
self.encoder_z2_z1 = Encoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
)
self.decoder_z1_z2 = Decoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
use_batch_norm=use_batch_norm_decoder,
use_layer_norm=use_layer_norm_decoder,
)
self.y_prior = torch.nn.Parameter(
y_prior
if y_prior is not None
else (1 / n_labels) * torch.ones(1, n_labels),
requires_grad=False,
)
self.use_labels_groups = use_labels_groups
self.labels_groups = (
np.array(labels_groups) if labels_groups is not None else None
)
if self.use_labels_groups:
if labels_groups is None:
raise ValueError("Specify label groups")
unique_groups = np.unique(self.labels_groups)
self.n_groups = len(unique_groups)
if not (unique_groups == np.arange(self.n_groups)).all():
raise ValueError()
self.classifier_groups = Classifier(
n_latent, n_hidden, self.n_groups, n_layers, dropout_rate
)
self.groups_index = torch.nn.ParameterList(
[
torch.nn.Parameter(
torch.tensor(
(self.labels_groups == i).astype(np.uint8),
dtype=torch.uint8,
),
requires_grad=False,
)
for i in range(self.n_groups)
]
)
@auto_move_data
def classify(self, x, batch_index=None):
if self.log_variational:
x = torch.log(1 + x)
qz_m, _, z = self.z_encoder(x, batch_index)
# We classify using the inferred mean parameter of z_1 in the latent space
z = qz_m
if self.use_labels_groups:
w_g = self.classifier_groups(z)
unw_y = self.classifier(z)
w_y = torch.zeros_like(unw_y)
for i, group_index in enumerate(self.groups_index):
unw_y_g = unw_y[:, group_index]
w_y[:, group_index] = unw_y_g / (
unw_y_g.sum(dim=-1, keepdim=True) + 1e-8
)
w_y[:, group_index] *= w_g[:, [i]]
else:
w_y = self.classifier(z)
return w_y
@auto_move_data
def classification_loss(self, labelled_dataset):
x = labelled_dataset[_CONSTANTS.X_KEY]
y = labelled_dataset[_CONSTANTS.LABELS_KEY]
batch_idx = labelled_dataset[_CONSTANTS.BATCH_KEY]
classification_loss = F.cross_entropy(
self.classify(x, batch_idx), y.view(-1).long()
)
return classification_loss
def loss(
self,
tensors,
inference_outputs,
generative_ouputs,
feed_labels=False,
kl_weight=1,
labelled_tensors=None,
classification_ratio=None,
):
px_r = generative_ouputs["px_r"]
px_rate = generative_ouputs["px_rate"]
px_dropout = generative_ouputs["px_dropout"]
qz1_m = inference_outputs["qz_m"]
qz1_v = inference_outputs["qz_v"]
z1 = inference_outputs["z"]
x = tensors[_CONSTANTS.X_KEY]
batch_index = tensors[_CONSTANTS.BATCH_KEY]
if feed_labels:
y = tensors[_CONSTANTS.LABELS_KEY]
else:
y = None
is_labelled = False if y is None else True
# Enumerate choices of label
ys, z1s = broadcast_labels(y, z1, n_broadcast=self.n_labels)
qz2_m, qz2_v, z2 = self.encoder_z2_z1(z1s, ys)
pz1_m, pz1_v = self.decoder_z1_z2(z2, ys)
reconst_loss = self.get_reconstruction_loss(x, px_rate, px_r, px_dropout)
# KL Divergence
mean = torch.zeros_like(qz2_m)
scale = torch.ones_like(qz2_v)
kl_divergence_z2 = kl(
Normal(qz2_m, torch.sqrt(qz2_v)), Normal(mean, scale)
).sum(dim=1)
loss_z1_unweight = -Normal(pz1_m, torch.sqrt(pz1_v)).log_prob(z1s).sum(dim=-1)
loss_z1_weight = Normal(qz1_m, torch.sqrt(qz1_v)).log_prob(z1).sum(dim=-1)
if not self.use_observed_lib_size:
ql_m = inference_outputs["ql_m"]
ql_v = inference_outputs["ql_v"]
(
local_library_log_means,
local_library_log_vars,
) = self._compute_local_library_params(batch_index)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_library_log_means, torch.sqrt(local_library_log_vars)),
).sum(dim=1)
else:
kl_divergence_l = 0.0
if is_labelled:
loss = reconst_loss + loss_z1_weight + loss_z1_unweight
kl_locals = {
"kl_divergence_z2": kl_divergence_z2,
"kl_divergence_l": kl_divergence_l,
}
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
)
probs = self.classifier(z1)
reconst_loss += loss_z1_weight + (
(loss_z1_unweight).view(self.n_labels, -1).t() * probs
).sum(dim=1)
kl_divergence = (kl_divergence_z2.view(self.n_labels, -1).t() * probs).sum(
dim=1
)
kl_divergence += kl(
Categorical(probs=probs),
Categorical(probs=self.y_prior.repeat(probs.size(0), 1)),
)
kl_divergence += kl_divergence_l
loss = torch.mean(reconst_loss + kl_divergence * kl_weight)
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_divergence,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss, reconst_loss, kl_divergence, kl_global=torch.tensor(0.0)
)
| [
"torch.distributions.Categorical",
"torch.sqrt",
"torch.distributions.Normal",
"torch.ones",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.log",
"torch.mean"
] | 1.8.0 | jules-samaran/scvi-tools | 7dcbb819cdc6a7991469fdca6b292276c59a946d |
2.0 | #!/usr/bin/env python3
import argparse
import datetime
import os
import pickle
import pprint
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from examples.atari.atari_network import QRDQN
from examples.atari.atari_wrapper import make_atari_env
from examples.offline.utils import load_buffer
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.policy import DiscreteCQLPolicy
from tianshou.trainer import offline_trainer
from tianshou.utils import TensorboardLogger, WandbLogger
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="PongNoFrameskip-v4")
parser.add_argument("--seed", type=int, default=1626)
parser.add_argument("--eps-test", type=float, default=0.001)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--num-quantiles", type=int, default=200)
parser.add_argument("--n-step", type=int, default=1)
parser.add_argument("--target-update-freq", type=int, default=500)
parser.add_argument("--min-q-weight", type=float, default=10.)
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--update-per-epoch", type=int, default=10000)
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--hidden-sizes", type=int, nargs="*", default=[512])
parser.add_argument("--test-num", type=int, default=10)
parser.add_argument("--frames-stack", type=int, default=4)
parser.add_argument("--scale-obs", type=int, default=0)
parser.add_argument("--logdir", type=str, default="log")
parser.add_argument("--render", type=float, default=0.)
parser.add_argument("--resume-path", type=str, default=None)
parser.add_argument("--resume-id", type=str, default=None)
parser.add_argument(
"--logger",
type=str,
default="tensorboard",
choices=["tensorboard", "wandb"],
)
parser.add_argument("--wandb-project", type=str, default="offline_atari.benchmark")
parser.add_argument(
"--watch",
default=False,
action="store_true",
help="watch the play of pre-trained policy only"
)
parser.add_argument("--log-interval", type=int, default=100)
parser.add_argument(
"--load-buffer-name", type=str, default="./expert_DQN_PongNoFrameskip-v4.hdf5"
)
parser.add_argument(
"--buffer-from-rl-unplugged", action="store_true", default=False
)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu"
)
args = parser.parse_known_args()[0]
return args
def test_discrete_cql(args=get_args()):
# envs
env, _, test_envs = make_atari_env(
args.task,
args.seed,
1,
args.test_num,
scale=args.scale_obs,
frame_stack=args.frames_stack,
)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
# should be N_FRAMES x H x W
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# model
net = QRDQN(*args.state_shape, args.action_shape, args.num_quantiles, args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
# define policy
policy = DiscreteCQLPolicy(
net,
optim,
args.gamma,
args.num_quantiles,
args.n_step,
args.target_update_freq,
min_q_weight=args.min_q_weight,
).to(args.device)
# load a previous policy
if args.resume_path:
policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))
print("Loaded agent from: ", args.resume_path)
# buffer
if args.buffer_from_rl_unplugged:
buffer = load_buffer(args.load_buffer_name)
else:
assert os.path.exists(args.load_buffer_name), \
"Please run atari_dqn.py first to get expert's data buffer."
if args.load_buffer_name.endswith(".pkl"):
buffer = pickle.load(open(args.load_buffer_name, "rb"))
elif args.load_buffer_name.endswith(".hdf5"):
buffer = VectorReplayBuffer.load_hdf5(args.load_buffer_name)
else:
print(f"Unknown buffer format: {args.load_buffer_name}")
exit(0)
print("Replay buffer size:", len(buffer), flush=True)
# collector
test_collector = Collector(policy, test_envs, exploration_noise=True)
# log
now = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
args.algo_name = "cql"
log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)
log_path = os.path.join(args.logdir, log_name)
# logger
if args.logger == "wandb":
logger = WandbLogger(
save_interval=1,
name=log_name.replace(os.path.sep, "__"),
run_id=args.resume_id,
config=args,
project=args.wandb_project,
)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
if args.logger == "tensorboard":
logger = TensorboardLogger(writer)
else: # wandb
logger.load(writer)
def save_best_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards):
return False
# watch agent's performance
def watch():
print("Setup test envs ...")
policy.eval()
policy.set_eps(args.eps_test)
test_envs.seed(args.seed)
print("Testing agent ...")
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
pprint.pprint(result)
rew = result["rews"].mean()
print(f'Mean reward (over {result["n/ep"]} episodes): {rew}')
if args.watch:
watch()
exit(0)
result = offline_trainer(
policy,
buffer,
test_collector,
args.epoch,
args.update_per_epoch,
args.test_num,
args.batch_size,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
)
pprint.pprint(result)
watch()
if __name__ == "__main__":
test_discrete_cql(get_args())
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.utils.tensorboard.SummaryWriter"
] | 2.0.0 | BFAnas/tianshou | 6e86a0bed7d1117c5ad6a421b483b45a6adfe336 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from convs.dyres_conv import *
from convs.condconv import *
__all__ = ['DyResA_ResNet18']
class DyRes_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = DyResConv(in_channels, channels, kernel_size=3, stride=stride, padding=1,
num_experts=num_experts, mode='A')
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = DyResConv(channels, channels, kernel_size=3, stride=1, padding=1,
num_experts=num_experts, mode='A')
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class CondConv_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = CondConv(in_channels, channels, kernel_size=3, stride=stride, padding=1, num_experts=num_experts)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = CondConv(channels, channels, kernel_size=3, stride=1, padding=1, num_experts=num_experts)
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class DyResA_ResNet(nn.Module):
def __init__(self, block1, block2, num_blocks, num_classes=100, num_experts=3):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block1, 64, num_blocks[0], stride=1, num_experts=num_experts)
self.layer2 = self._make_layer(block1, 128, num_blocks[1], stride=2, num_experts=num_experts)
self.layer3 = self._make_layer(block2, 256, num_blocks[2], stride=2, num_experts=num_experts)
self.layer4 = self._make_layer(block2, 512, num_blocks[3], stride=2, num_experts=num_experts)
self.linear = nn.Linear(512*block2.expansion, num_classes)
def _make_layer(self, block, channels, num_blocks, stride, num_experts):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, channels, stride, num_experts))
self.in_channels = channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DyResA_ResNet18(num_experts=3):
return DyResA_ResNet(DyRes_BasicBlock, CondConv_BasicBlock, [2, 2, 2, 2], num_experts=num_experts)
def test():
x = torch.randn(128, 3, 32, 32)
net1 = DyResA_ResNet18()
y1 = net1(x); print(y1.size())
# test() | [
"torch.nn.Linear",
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.randn"
] | 1.4.0 | Nyquixt/DyConv | 255193068424aaa83352bee258d34cb8b32b6ee6 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['CondConv_Inf']
class route_func(nn.Module):
def __init__(self, in_channels, num_experts):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, num_experts)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.sigmoid(x)
return x
class CondConv_Inf(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, num_experts=3, stride=1, padding=0, groups=1, reduction=16, mode='A'):
super().__init__()
self.num_experts = num_experts
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
# routing function
self.routing_func = route_func(in_channels, num_experts)
# convs
self.convs = [nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size)) for i in range(num_experts)]
def forward(self, x):
routing_weight = self.routing_func(x) # N x k
convs = []
for i in range(self.num_experts):
route = routing_weight[:, i]
weight = self.convs[i]
weight = weight * route
convs.append(weight)
conv = sum(convs)
output = F.conv2d(x, weight=conv, stride=self.stride, padding=self.padding, groups=self.groups)
return None
def test():
x = torch.randn(1, 16, 32, 32)
conv = CondConv_Inf(16, 64, 3, padding=1)
y = conv(x)
print(y.shape)
conv = CondConv_Inf(16, 64, 3, padding=1)
y = conv(x)
print(y.shape)
# test() | [
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.randn"
] | 1.4.0 | Nyquixt/DyConv | 255193068424aaa83352bee258d34cb8b32b6ee6 |
1.9 | import torch
import torch.nn as nn
class CosineSimilarity:
"""
Cosine similarity between the two vector.
Given two vector v1 and v2, the cosine similarity between the two vector
is the cosine of theta, where the theta is the angle between the two vector on therir inner product space.
The cosine of the theta can be derived from Euclidean dot product of the two vectors.
"""
def __init__(
self,
**kwargs
) -> None:
super(CosineSimilarity, self).__init__()
def __call__(
self,
v1: torch.Tensor,
v2: torch.Tensor,
) -> torch.Tensor:
if v1.dim() == 1:
v1 = v1.unsqueeze(0)
if v2.dim() == 1:
v2 = v2.unsqueeze(0)
v1 = v1.unsqueeze(1)
v2 = v2.unsqueeze(-1)
return v1.matmul(v2).squeeze(1).squeeze(1).div(v1.pow(2).sum().sqrt() * v2.pow(2).sum().sqrt())
class AsymmertricSimilarity(nn.Module):
def __init__(
self,
n_dim: int,
**kwargs,
) -> None:
super(AsymmertricSimilarity, self).__init__()
self.Phi_src = nn.Linear(n_dim, n_dim, bias=False)
self.Phi_dst = nn.Linear(n_dim, n_dim, bias=False)
nn.init.xavier_normal_(self.Phi_src.weight)
nn.init.xavier_normal_(self.Phi_dst.weight)
def forward(
self,
z_src: torch.Tensor,
z_dst: torch.Tensor,
) -> torch.Tensor:
return self.Phi_src(z_src).unsqueeze(-2).matmul(self.Phi_dst(z_dst).unsqueeze(-1)).squeeze()
| [
"torch.nn.Linear",
"torch.nn.init.xavier_normal_"
] | 1.9.1 | helloybz/CLANE | 60e6f0503642ac63d3bcde136885e47954067c17 |
1.6 | import os
from typing import Text
import torch
import unittest
import torch.nn as nn
import torch.optim as optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from zsl_kg.class_encoders.auto_gnn import AutoGNN
from zsl_kg.example_encoders.text_encoder import TextEncoder
from zsl_kg.data.snips import SnipsDataset
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from zsl_kg.common.graph import NeighSampler
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from allennlp.common.tqdm import Tqdm
class BiLinearModel(Model):
def __init__(
self,
vocab: Vocabulary,
example_encoder: object,
class_encoder: object,
joint_dim: int,
bias: bool = False,
):
super().__init__(vocab)
self.example_encoder = example_encoder
self.class_encoder = class_encoder
self.text_joint = nn.Linear(
self.example_encoder.output_dim, joint_dim, bias=bias
)
self.class_joint = nn.Linear(
self.class_encoder.output_dim, joint_dim, bias=bias
)
def forward(self, batch, node_idx, kg):
encoder_out = self.example_encoder(batch)
text_rep = self.text_joint(encoder_out)
# get label representation
class_out = self.class_encoder(node_idx, kg)
class_rep = self.class_joint(class_out)
logits = torch.matmul(text_rep, class_rep.t())
return logits
class TestIntentClassification(unittest.TestCase):
def setUp(
self,
):
label_maps = {
"train": ["weather", "music", "restaurant"],
"dev": ["search", "movie"],
"test": ["book", "playlist"],
}
data_path = "tests/test_data/datasets/snips/"
datasets = []
for split in ["train", "dev", "test"]:
labels = label_maps[split]
label_to_idx = dict(
[(label, idx) for idx, label in enumerate(labels)]
)
reader = SnipsDataset(label_to_idx)
path = os.path.join(data_path, f"{split}.txt")
_dataset = reader.read(path)
datasets.append(_dataset)
self.train_dataset, self.dev_dataset, self.test_dataset = datasets
vocab = Vocabulary.from_instances(
self.train_dataset + self.dev_dataset + self.test_dataset
)
# create the iterator
self.iterator = BasicIterator(batch_size=32)
self.iterator.index_with(vocab)
print("Loading GloVe...")
# token embed
token_embed_path = os.path.join(data_path, "word_emb.pt")
token_embedding = torch.load(token_embed_path)
print("word embeddings created...")
word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# create the text encoder
print("Loading the text encoder...")
self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20)
trgcn = {
"input_dim": 300,
"output_dim": 64,
"type": "trgcn",
"gnn": [
{
"input_dim": 300,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(100, mode="topk"),
"fh": 100,
},
{
"input_dim": 64,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(50, mode="topk"),
},
],
}
self.class_encoder = AutoGNN(trgcn)
self.train_graph = ConceptNetKG.load_from_disk(
"tests/test_data/subgraphs/snips/train_graph"
)
node_to_idx = dict(
[(node, idx) for idx, node in enumerate(self.train_graph.nodes)]
)
#
self.train_nodes = torch.tensor(
[
node_to_idx[node]
for node in [
"/c/en/weather",
"/c/en/music",
"/c/en/restaurant",
]
]
)
self.model = BiLinearModel(
vocab, self.example_encoder, self.class_encoder, joint_dim=20
)
self.optimizer = optim.Adam(
self.model.parameters(), lr=1e-03, weight_decay=5e-04
)
self.loss_function = nn.CrossEntropyLoss()
def test_intent_classification_train(self):
self.model.train()
total_batch_loss = 0.0
generator_tqdm = Tqdm.tqdm(
self.iterator(self.train_dataset, num_epochs=1, shuffle=False),
total=self.iterator.get_num_batches(self.train_dataset),
)
for batch in generator_tqdm:
self.optimizer.zero_grad()
logits = self.model(
batch["sentence"], self.train_nodes, self.train_graph
)
loss = self.loss_function(logits, batch["labels"])
total_batch_loss += loss.item()
loss.backward()
self.optimizer.step()
self.assertLessEqual(total_batch_loss, 100.0)
| [
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.tensor",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 |
1.6 | import unittest
from zsl_kg.common.graph import NeighSampler
import torch
from allennlp.common.params import Params
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from zsl_kg.gnn.attention_agg import AttnAggregator
class TestAttnAggregator(unittest.TestCase):
def setUp(self) -> None:
params = Params({"bidirectional": True})
nodes = [
"/c/en/cat",
"/c/en/dog",
"/c/en/elephant",
]
relations = [
"/r/IsA",
"/r/RelatedTo",
]
# (u, r, v)
edges = [
(
0,
0,
1,
),
(
0,
1,
2,
),
(
1,
0,
2,
),
]
features = torch.randn((3, 10))
self.kg_obj = ConceptNetKG(
nodes,
features,
edges,
relations,
params,
)
self.kg_obj.run_random_walk()
attn_args = {
"features": None,
"input_dim": 10,
"output_dim": 20,
"sampler": NeighSampler(-1, "none"),
"feature_dropout": 0.1,
"leaky_relu_neg_slope": 0.4,
"self_loop": True,
}
self.graph_agg = AttnAggregator(**attn_args)
def test_forward(self):
"""testing forward function from the attention aggregator"""
features = self.graph_agg(torch.tensor([0, 1]), self.kg_obj)
self.assertEqual(features.size(0), 2)
self.assertEqual(features.size(1), 20)
| [
"torch.tensor",
"torch.randn"
] | 1.6.0 | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 |
1.3 | #!/h/haoran/anaconda3/bin/python
import sys
import os
sys.path.append(os.getcwd())
import pandas as pd
import numpy as np
import argparse
import Constants
import torch
import torch.nn as nn
from torch.utils import data
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
from run_classifier_dataset_utils import InputExample, convert_examples_to_features
from pathlib import Path
from tqdm import tqdm
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from gradient_reversal import GradientReversal
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, log_loss, mean_squared_error, classification_report
import random
import json
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from utils import create_hdf_key, Classifier, get_emb_size, MIMICDataset, extract_embeddings, EarlyStopping, load_checkpoint
from sklearn.model_selection import ParameterGrid
parser = argparse.ArgumentParser('Fine-tunes a pre-trained BERT model on a certain target for one fold. Outputs fine-tuned BERT model and classifier, ' +
'as well as a pickled dictionary mapping id: predicted probability')
parser.add_argument("--df_path",help = 'must have the following columns: seqs, num_seqs, fold, with note_id as index', type=str)
parser.add_argument("--model_path", type=str)
parser.add_argument('--fold_id', help = 'what fold to use as the DEV fold. Dataframe must have a "fold" column',nargs = '+', type=str, dest = 'fold_id', default = [])
parser.add_argument('--target_col_name', help = 'name of target to train on. Must be a column in the dataframe', type=str)
parser.add_argument("--output_dir", help = 'folder to output model/results', type=str)
parser.add_argument('--use_adversary', help = "whether or not to use an adversary. If True, must not have --freeze_bert", action = 'store_true')
parser.add_argument('--lm', help = 'lambda value for the adversary', type = float, default = 1.0)
parser.add_argument('--protected_group', help = 'name of protected group, must be a column in the dataframe', type = str, default = 'insurance')
parser.add_argument('--adv_layers', help = 'number of layers in adversary', type = int, default = 2)
parser.add_argument('--freeze_bert', help = 'freeze all BERT layers and only use pre-trained representation', action = 'store_true')
parser.add_argument('--train_batch_size', help = 'batch size to use for training', type = int)
parser.add_argument('--max_num_epochs', help = 'maximum number of epochs to train for', type = int, default = 20)
parser.add_argument('--es_patience', help = 'patience for the early stopping', type = int, default = 3)
parser.add_argument('--other_fields', help = 'other fields to add, must be columns in df', nargs = '+', type = str, dest = 'other_fields', default = [])
parser.add_argument('--seed', type = int, default = 42, help = 'random seed for initialization')
parser.add_argument('--dropout', type = float, default = 0, help = 'dropout probability for classifier')
parser.add_argument('--lr', type = float, default = 5e-4, help = 'learning rate for BertAdam optimizer')
parser.add_argument('--predictor_layers', type = int, default = 2, help = 'number of layers for classifier, ignored if gridsearch_classifier')
parser.add_argument('--emb_method', default = 'last', const = 'last', nargs = '?', choices = ['last', 'sum4', 'cat4'], help = 'what embedding layer to take')
parser.add_argument('--fairness_def', default = 'demo', const = 'demo', nargs = '?', choices = ['demo', 'odds'], help = 'what fairness definition to use: demographic parity, equality of odds')
parser.add_argument('--task_type', default = 'binary', const = 'binary', nargs = '?', choices = ['binary', 'multiclass', 'regression'], help = 'what type of data the target_col_name is')
parser.add_argument('--save_embs', help = 'save computed embeddings at the end', action = 'store_true')
parser.add_argument('--output_train_stats', help = 'export training set predictions into the dataframe', action = 'store_true')
parser.add_argument('--gridsearch_classifier', help = 'whether to run a grid search over the classifier parameters, using AUPRC as metric', action = 'store_true')
parser.add_argument('--average', help = 'whether to aggregate sequences to a single prediction by simple average, or by using the NYU agg function', action = 'store_true')
parser.add_argument('--gridsearch_c', help = 'whether to run a grid search over the NYU agg c parameter, using AUPRC as metric, only valid if not --average, and --gridsearch_classifier', action = 'store_true')
parser.add_argument('--use_new_mapping', help = 'whether to use new mapping for adversarial training', action = 'store_true')
parser.add_argument('--pregen_emb_path', help = '''if embeddings have been precomputed, can provide a path here (as a pickled dictionary mapping note_id:numpy array).
Will only be used if freeze_bert. note_ids in this dictionary must a be a superset of the note_ids in df_path''', type = str)
parser.add_argument('--overwrite', help = 'whether to overwrite existing model/predictions', action = 'store_true')
args = parser.parse_args()
if os.path.isfile(os.path.join(args.output_dir, 'preds.pkl')) and not args.overwrite:
print("File already exists; exiting.")
sys.exit()
print('Reading dataframe...', flush = True)
df = pd.read_pickle(args.df_path)
if 'note_id' in df.columns:
df = df.set_index('note_id')
tokenizer = BertTokenizer.from_pretrained(args.model_path)
model = BertModel.from_pretrained(args.model_path)
target = args.target_col_name
assert(target in df.columns)
#even if no adversary, must have valid protected group column for code to work
if args.use_adversary:
protected_group = args.protected_group
assert(protected_group in df.columns)
if args.use_new_mapping:
mapping = Constants.newmapping
for i in Constants.drop_groups[protected_group]:
df = df[df[protected_group] != i]
else:
mapping = Constants.mapping
other_fields_to_include = args.other_fields
if args.freeze_bert:
for param in model.parameters():
param.requires_grad = False
assert('fold' in df.columns)
for i in args.fold_id:
assert(i in df['fold'].unique())
assert('test' in df['fold'].unique())
fold_id = args.fold_id
if args.gridsearch_c:
assert(args.task_type == 'binary')
c_grid = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.5, 0.7, 1, 1.2, 1.5, 2, 3, 5, 10, 20, 50, 100, 1000]
else:
c_grid = [2]
Path(args.output_dir).mkdir(parents = True, exist_ok = True)
EMB_SIZE = get_emb_size(args.emb_method)
train_df = df[~df.fold.isin(['test', 'NA', *fold_id])]
val_df = df[df.fold.isin(fold_id)]
test_df = df[df.fold == 'test']
def convert_input_example(note_id, text, seqIdx, target, group, other_fields = []):
return InputExample(guid = '%s-%s'%(note_id,seqIdx), text_a = text, text_b = None, label = target, group = mapping[protected_group][group] if args.use_adversary else 0, other_fields = other_fields)
# in training generator, return all folds except this.
# in validation generator, return only this fold
print('Converting input examples to appropriate format...', flush = True)
examples_train = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in train_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_eval = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in val_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_test = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in test_df.iterrows()
for c, i in enumerate(row.seqs)]
def convert_examples_to_features_emb(examples, embs):
features = []
for i in examples:
note_id, seq_id = i.guid.split('-')
emb = embs[note_id][int(seq_id), :]
features.append(EmbFeature(emb, y = i.label, guid = i.guid, group = i.group, other_fields = i.other_fields))
return features
class EmbFeature():
def __init__(self, emb, y, guid, group, other_fields):
self.emb = emb
self.y = y
self.guid = guid
self.group = group
self.other_fields = other_fields
class Embdataset(data.Dataset):
def __init__(self, features, gen_type):
self.features = features #list of EmbFeatures
self.gen_type = gen_type
self.length = len(features)
def __len__(self):
return self.length
def __getitem__(self, index):
emb = torch.tensor(self.features[index].emb, dtype = torch.float32)
if args.task_type in ['binary', 'regression']:
y = torch.tensor(self.features[index].y, dtype = torch.float32)
else:
y = torch.tensor(self.features[index].y, dtype = torch.long)
other_fields = self.features[index].other_fields
guid = self.features[index].guid
return emb, y, guid, other_fields
class Discriminator(nn.Module):
def __init__(self, input_dim, num_layers, num_categories, lm):
super(Discriminator, self).__init__()
self.num_layers = num_layers
assert(num_layers >= 1)
self.input_dim = input_dim
self.num_categories = num_categories
self.lm = lm
self.layers = [GradientReversal(lambda_ = lm)]
for c, i in enumerate(range(num_layers)):
if c != num_layers-1:
self.layers.append(nn.Linear(input_dim // (2**c), input_dim // (2**(c+1))))
self.layers.append(nn.ReLU())
else:
self.layers.append(nn.Linear(input_dim // (2**c), num_categories))
self.layers.append(nn.Softmax(dim = 0))
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
return x
if args.gridsearch_classifier:
assert(args.freeze_bert)
grid = list(ParameterGrid({
'num_layers': [2,3,4],
'dropout_prob': [0, 0.2],
'decay_rate': [2,4,6]
}))
grid.append({
'num_layers': 1,
'dropout_prob': 0,
'decay_rate': 2
})
for i in grid: # adds extra fields to input arguments
i['input_dim'] = EMB_SIZE + len(other_fields_to_include)
i['task_type'] = args.task_type
else:
grid = [{ # only one parameter combination
'input_dim': EMB_SIZE + len(other_fields_to_include),
'num_layers': args.predictor_layers,
'dropout_prob': args.dropout,
'task_type': args.task_type
}]
if args.task_type == 'multiclass':
for i in grid:
i['multiclass_nclasses'] = len(df[target].unique())
if args.use_adversary:
discriminator = Discriminator(EMB_SIZE + int(args.fairness_def == 'odds'), args.adv_layers, len(mapping[protected_group]), args.lm)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
model.to(device)
if args.use_adversary:
discriminator.to(device)
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
if args.task_type == 'binary':
criterion = nn.BCELoss()
elif args.task_type == 'multiclass':
criterion = nn.CrossEntropyLoss()
elif args.task_type == 'regression':
criterion = nn.MSELoss()
criterion_adv = nn.CrossEntropyLoss()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = torch.nn.DataParallel(criterion)
if args.use_adversary:
discriminator = torch.nn.DataParallel(discriminator)
criterion_adv = torch.nn.DataParallel(criterion_adv)
def get_embs(generator):
'''
given a generator, runs all the data through one pass of the model to calculate embeddings
used when BERT weights are frozen, calculates embeddings first to save compute
'''
features = []
model.eval()
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
emb = bert_out[c,:].detach().cpu().numpy()
features.append(EmbFeature(emb = emb, y = y[c], guid = i, group = group, other_fields= [i[c] for i in other_vars]))
return features
print('Featurizing examples...', flush = True)
if not args.pregen_emb_path:
features_train = convert_examples_to_features(examples_train,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_eval = convert_examples_to_features(examples_eval,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_test = convert_examples_to_features(examples_test,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
training_set = MIMICDataset(features_train, 'train' ,args.task_type)
training_generator = data.DataLoader(training_set, shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_set = MIMICDataset(features_eval, 'val', args.task_type)
val_generator = data.DataLoader(val_set, shuffle = False, batch_size = args.train_batch_size)
test_set = MIMICDataset(features_test, 'test', args.task_type)
test_generator = data.DataLoader(test_set, shuffle = False, batch_size = args.train_batch_size)
if args.freeze_bert: #only need to precalculate for training and val set
if args.pregen_emb_path:
pregen_embs = pickle.load(open(args.pregen_emb_path, 'rb'))
features_train_embs = convert_examples_to_features_emb(examples_train, pregen_embs)
features_val_embs = convert_examples_to_features_emb(examples_eval, pregen_embs)
features_test_embs = convert_examples_to_features_emb(examples_test, pregen_embs)
else:
features_train_embs = get_embs(training_generator)
features_val_embs = get_embs(val_generator)
features_test_embs = get_embs(test_generator)
training_generator = data.DataLoader(Embdataset(features_train_embs, 'train'), shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_generator = data.DataLoader(Embdataset(features_val_embs, 'val'), shuffle = False, batch_size = args.train_batch_size)
test_generator= data.DataLoader(Embdataset(features_test_embs, 'test'), shuffle = False, batch_size = args.train_batch_size)
num_train_epochs = args.max_num_epochs
learning_rate = args.lr
num_train_optimization_steps = len(training_generator) * num_train_epochs
warmup_proportion = 0.1
PREDICTOR_CHECKPOINT_PATH = os.path.join(args.output_dir, 'predictor.chkpt')
MODEL_CHECKPOINT_PATH = os.path.join(args.output_dir, 'model.chkpt')
grid_auprcs = []
es_models = []
optimal_cs = []
actual_val = val_df[target]
def merge_probs(probs, c):
return (np.max(probs) + np.mean(probs)*len(probs)/float(c))/(1+len(probs)/float(c))
def avg_probs(probs):
return np.mean(probs)
def avg_probs_multiclass(probs):
return np.argmax(np.mean(probs, axis = 0))
def merge_regression(preds):
return np.mean(preds)
def evaluate_on_set(generator, predictor, emb_gen = False, c_val=2):
'''
Input: a pytorch data loader, whether the generator is an embedding or text generator
Outputs:
prediction_dict: a dictionary mapping note_id (str) to list of predicted probabilities
merged_preds: a dictionary mapping note_id (str) to a single merged probability
embs: a dictionary mapping note_id (str) to a numpy 2d array (shape num_seq * 768)
'''
model.eval()
predictor.eval()
if generator.dataset.gen_type == 'val':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in val_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in val_df.iterrows()}
elif generator.dataset.gen_type == 'test':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in test_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in test_df.iterrows()}
elif generator.dataset.gen_type == 'train':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in train_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in train_df.iterrows()}
if emb_gen:
with torch.no_grad():
for embs, y, guid, other_vars in generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
else:
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
embs[note_id][int(seq_id), :] = bert_out[c,:EMB_SIZE].detach().cpu().numpy()
merged_preds = merge_preds(prediction_dict, c_val)
return (prediction_dict, merged_preds, embs)
def merge_preds(prediction_dict, c=2):
merged_preds = {}
for i in prediction_dict:
if args.task_type == 'binary':
if args.average:
merged_preds[i] = avg_probs(prediction_dict[i])
else:
merged_preds[i] = merge_probs(prediction_dict[i], c)
elif args.task_type == 'regression':
merged_preds[i] = merge_regression(prediction_dict[i])
elif args.task_type == 'multiclass':
merged_preds[i] = avg_probs_multiclass(np.array(prediction_dict[i]))
return merged_preds
for predictor_params in grid:
print(predictor_params, flush = True)
predictor = Classifier(**predictor_params).to(device)
if n_gpu > 1:
predictor = torch.nn.DataParallel(predictor)
if not(args.freeze_bert) and not(args.use_adversary):
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters())
elif args.freeze_bert and not(args.use_adversary):
param_optimizer = list(predictor.named_parameters())
elif args.freeze_bert and args.use_adversary:
raise Exception('No purpose in using an adversary if BERT layers are frozen')
else:
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters()) + list(discriminator.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
es = EarlyStopping(patience = args.es_patience)
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps)
warmup_linear = WarmupLinearSchedule(warmup=warmup_proportion,
t_total=num_train_optimization_steps)
for epoch in range(1, num_train_epochs+1):
# training
if not args.freeze_bert:
model.train()
else:
model.eval()
predictor.train()
if args.use_adversary:
discriminator.train()
running_loss = 0.0
num_steps = 0
with tqdm(total=len(training_generator), desc="Epoch %s"%epoch) as pbar:
if not args.freeze_bert:
for input_ids, input_mask, segment_ids, y, group, _, other_vars in training_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if args.use_adversary:
adv_input = bert_out[:, :-len(other_vars)]
if args.fairness_def == 'odds':
adv_input = torch.cat([adv_input, y.unsqueeze(dim = 1)], 1)
adv_pred = discriminator(adv_input)
adv_loss = criterion_adv(adv_pred, group)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
else: # if frozen, use precomputed embeddings to save time
for embs, y,_, other_vars in training_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
# evaluate here
model.eval()
predictor.eval()
val_loss = 0
with torch.no_grad():
if args.freeze_bert:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor}
for embs, y, guid, other_vars in val_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
val_loss += loss.item()
val_loss /= len(val_generator)
# early stopping uses val loss as metric
# model selection/c selection uses AUPRC as metric
else:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor,
MODEL_CHECKPOINT_PATH: model}
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in val_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
val_loss += loss.item()
val_loss /= len(val_generator)
print('Val loss: %s'%val_loss, flush = True)
es(val_loss, checkpoints)
if es.early_stop:
break
print('Trained for %s epochs' % epoch)
predictor.load_state_dict(load_checkpoint(PREDICTOR_CHECKPOINT_PATH))
os.remove(PREDICTOR_CHECKPOINT_PATH)
if not args.freeze_bert:
model.load_state_dict(load_checkpoint(MODEL_CHECKPOINT_PATH))
os.remove(MODEL_CHECKPOINT_PATH)
if args.gridsearch_classifier:
auprcs = [] #one value for each in c grid
prediction_dict, _, _ = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert)
for c_val in c_grid:
merged_preds_val = merge_preds(prediction_dict, c_val)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
auprcs.append(average_precision_score(actual_val.values.astype(int), merged_preds_val_list))
print(auprcs, flush = True)
print(c_grid, flush = True)
idx_max = np.argmax(auprcs)
grid_auprcs.append(auprcs[idx_max])
es_models.append(predictor.cpu())
optimal_cs.append(c_grid[idx_max])
print('val AUPRC:%.5f optimal c: %s' %(auprcs[idx_max], c_grid[idx_max] ))
# find best predictor here, move back to cpu
if args.gridsearch_classifier:
idx_max = np.argmax(grid_auprcs)
predictor = es_models[idx_max].to(device)
opt_c = optimal_cs[idx_max]
else:
opt_c = 2.0
# evaluate on val set
prediction_dict_val, merged_preds_val, embs_val = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
if args.task_type == 'binary':
acc = accuracy_score(actual_val.values.astype(int), np.array(merged_preds_val_list).round())
auprc = average_precision_score(actual_val.values.astype(int), merged_preds_val_list)
ll = log_loss(actual_val.values.astype(int), merged_preds_val_list)
roc = roc_auc_score(actual_val.values.astype(int), merged_preds_val_list)
print('Accuracy: %.5f' % acc)
print('AUPRC: %.5f' % auprc)
print('Log Loss: %.5f' % ll)
print('AUROC: %.5f' % roc)
elif args.task_type == 'regression':
mse = mean_squared_error(actual_val, merged_preds_val_list)
print('MSE: %.5f' % mse)
elif args.task_type == 'multiclass':
report = classification_report(actual_val.values.astype(int), np.array(merged_preds_val_list))
print(report)
prediction_dict_test, merged_preds_test, embs_test = evaluate_on_set(test_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
if args.output_train_stats:
prediction_dict_train, merged_preds_train, embs_train = evaluate_on_set(training_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
else:
merged_preds_train, embs_train = {}, {}
# save predictor
json.dump(predictor_params, open(os.path.join(args.output_dir, 'predictor_params.json'), 'w'))
torch.save(predictor.state_dict(), os.path.join(args.output_dir, 'predictor.pt'))
# save model
if not args.freeze_bert:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# save args
json.dump(vars(args), open(os.path.join(args.output_dir, 'argparse_args.json'), 'w'))
#saves embeddings
if args.save_embs:
embs = {**embs_val, **embs_test, **embs_train}
pickle.dump(embs, open(os.path.join(args.output_dir, 'embs.pkl'), 'wb'))
rough_preds = {**merged_preds_val, **merged_preds_test, **merged_preds_train}
pickle.dump(rough_preds, open(os.path.join(args.output_dir, 'preds.pkl'), 'wb'))
# saves gridsearch info
pickle.dump({
'grid_auprcs':grid_auprcs,
'optimal_cs': optimal_cs,
'opt_c': opt_c
}, open(os.path.join(args.output_dir, 'gs_info.pkl'), 'wb'))
| [
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.tensor",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.no_grad"
] | 1.3.0 | MLforHealth/HurtfulWords | b59181585aa70152f0fbe79fa2611ded928bf9f1 |
1.4 | # Copyright (c) 2020, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import platform
import numpy as np
from torch import Tensor, FloatTensor
class Spectrogram(object):
"""
Create a spectrogram from a audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: torch)
"""
def __init__(
self,
sample_rate: int = 16000,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'torch'
) -> None:
self.sample_rate = sample_rate
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'kaldi':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
try:
import torchaudio
except ImportError:
raise ImportError("Please install torchaudio: `pip install torchaudio`")
self.transforms = torchaudio.compliance.kaldi.spectrogram
self.frame_length = frame_length
self.frame_shift = frame_shift
else:
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
def __call__(self, signal):
if self.feature_extract_by == 'kaldi':
spectrogram = self.transforms(
Tensor(signal).unsqueeze(0),
frame_length=self.frame_length,
frame_shift=self.frame_shift,
sample_frequency=self.sample_rate,
).transpose(0, 1)
else:
spectrogram = torch.stft(
Tensor(signal), self.n_fft, hop_length=self.hop_length,
win_length=self.n_fft, window=torch.hamming_window(self.n_fft),
center=False, normalized=False, onesided=True
)
spectrogram = (spectrogram[:, :, 0].pow(2) + spectrogram[:, :, 1].pow(2)).pow(0.5)
spectrogram = np.log1p(spectrogram.numpy())
return spectrogram
class MelSpectrogram(object):
"""
Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram and MelScale.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mels = n_mels
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB()
self.transforms = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
n_mels=n_mels,
)
else:
import librosa
self.transforms = librosa.feature.melspectrogram
self.amplitude_to_db = librosa.amplitude_to_db
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
melspectrogram = self.transforms(Tensor(signal))
melspectrogram = self.amplitude_to_db(melspectrogram)
melspectrogram = melspectrogram.numpy()
elif self.feature_extract_by == 'librosa':
melspectrogram = self.transforms(
signal,
sr=self.sample_rate,
n_mels=self.n_mels,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
melspectrogram = self.amplitude_to_db(melspectrogram, ref=np.max)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return melspectrogram
class MFCC(object):
"""
Create the Mel-frequency cepstrum coefficients (MFCCs) from an audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mfcc (int): Number of mfc coefficients to retain. (Default: 40)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction(default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mfcc: int = 40,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.transforms = torchaudio.transforms.MFCC(
sample_rate=sample_rate,
n_mfcc=n_mfcc,
log_mels=True,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
)
else:
import librosa
self.transforms = librosa.feature.mfcc
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
mfcc = self.transforms(FloatTensor(signal))
mfcc = mfcc.numpy()
elif self.feature_extract_by == 'librosa':
mfcc = self.transforms(
y=signal,
sr=self.sample_rate,
n_mfcc=self.n_mfcc,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return mfcc
class FilterBank(object):
"""
Create a fbank from a raw audio signal. This matches the input/output of Kaldi’s compute-fbank-feats
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10
) -> None:
import torchaudio
self.transforms = torchaudio.compliance.kaldi.fbank
self.sample_rate = sample_rate
self.n_mels = n_mels
self.frame_length = frame_length
self.frame_shift = frame_shift
def __call__(self, signal):
return self.transforms(
Tensor(signal).unsqueeze(0),
num_mel_bins=self.n_mels,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
).transpose(0, 1).numpy()
| [
"torch.hamming_window",
"torch.FloatTensor",
"torch.Tensor"
] | 1.4.0 | jungwook518/KoSpeech | 77b8daf2f821c8fa755e937096fdbc3536cafd81 |
1.4 | import torch
import numpy as np
from hipo_rank import Embeddings, SentenceEmbeddings, SectionEmbedding, \
PairIndices, SentenceSimilarities, SectionSimilarities, Similarities
from typing import List, Tuple
from numpy import ndarray
class CosSimilarity:
def __init__(self, threshold = 0):
self.threshold = threshold
def _compute_similarities(self, embeds1: ndarray, embeds2: ndarray) -> ndarray:
embeds1 = torch.from_numpy(embeds1)
embeds2 = torch.from_numpy(embeds2)
similarities = torch.cosine_similarity(embeds1, embeds2).numpy()
similarities = similarities / 2 + 0.5 # normalize to a range [0,1]
similarities = np.clip(similarities, self.threshold, 1)
return similarities
def _get_pairwise_similarities(self, embeds: ndarray) -> Tuple[ndarray, PairIndices]:
pair_indices = self._get_pair_indices(len(embeds))
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
similarities = self._compute_similarities(embeds[pair_indices_i], embeds[pair_indices_j])
return similarities, pair_indices
def _get_pair_indices(self, num_nodes: int) -> PairIndices:
pair_indices = []
for i in range(num_nodes):
for j in range(i+1, num_nodes):
pair_indices += [(i, j)]
return pair_indices
def get_similarities(self, embeds: Embeddings):
sent_to_sent = []
for sent_embeds in embeds.sentence:
id = sent_embeds.id
e = sent_embeds.embeddings
similarities, pair_indices = self._get_pairwise_similarities(e)
directions = ["undirected" for _ in pair_indices]
sent_to_sent += [SentenceSimilarities(id, similarities, pair_indices, directions)]
sent_to_sect = []
sect_embeds = np.stack([s.embedding for s in embeds.section])
num_sect = len(sect_embeds)
for sent_embeds in embeds.sentence:
# TODO: factor out pair indices for one and two matrices
pair_indices = []
num_sent = len(sent_embeds.embeddings)
for i in range(num_sent):
for j in range(num_sect):
pair_indices += [(i,j)]
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
embeds1 = sent_embeds.embeddings[pair_indices_i]
embeds2 = sect_embeds[pair_indices_j]
similarities = self._compute_similarities(embeds1, embeds2)
id = sent_embeds.id
directions = ["undirected" for _ in pair_indices]
sent_to_sect += [SentenceSimilarities(id, similarities, pair_indices, directions)]
similarities, pair_indices = self._get_pairwise_similarities(sect_embeds)
directions = ["undirected" for _ in pair_indices]
sect_to_sect = SectionSimilarities(similarities, pair_indices, directions)
return Similarities(sent_to_sent, sect_to_sect, sent_to_sect)
| [
"torch.cosine_similarity",
"torch.from_numpy"
] | 1.4 | mukul-mehta/HipoRank | b44490c4f1f3e0ff8015e3eb0f2b1955947dfe80 |
1.9 | import torch
import torch.nn as nn
from vformer.functional import PatchMerging
from vformer.utils import ENCODER_REGISTRY
encoder_modules = ENCODER_REGISTRY.get_list()
def test_VanillaEncoder():
test_tensor = torch.randn(2, 65, 1024)
encoder = ENCODER_REGISTRY.get("VanillaEncoder")(
embedding_dim=1024, depth=6, num_heads=16, head_dim=64, mlp_dim=2048
)
out = encoder(test_tensor)
assert out.shape == test_tensor.shape # shape remains same
del encoder, test_tensor
def test_SwinEncoder():
test_tensor = torch.randn(3, 3136, 96)
# when downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=PatchMerging,
)
out = encoder(test_tensor)
assert out.shape == (3, 784, 192)
del encoder
# when not downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=None,
use_checkpoint=True,
)
out = encoder(test_tensor)
assert out.shape == (3, 3136, 96)
del encoder
encoder_block = ENCODER_REGISTRY.get("SwinEncoderBlock")(
dim=96, input_resolution=(224 // 4, 224 // 4), num_heads=3, window_size=7
)
out = encoder_block(test_tensor)
assert out.shape == test_tensor.shape
del encoder_block
def test_PVTEncoder():
test_tensor = torch.randn(4, 3136, 64)
encoder = ENCODER_REGISTRY.get("PVTEncoder")(
dim=64,
depth=3,
qkv_bias=True,
qk_scale=0.0,
p_dropout=0.0,
attn_dropout=0.1,
drop_path=[0.0] * 3,
act_layer=nn.GELU,
sr_ratio=1,
linear=False,
use_dwconv=False,
num_heads=1,
mlp_ratio=4,
)
out = encoder(test_tensor, H=56, W=56)
assert out.shape == test_tensor.shape
del encoder
def test_CrossEncoder():
test_tensor1 = torch.randn(3, 5, 128)
test_tensor2 = torch.randn(3, 5, 256)
encoder = ENCODER_REGISTRY.get("CrossEncoder")(128, 256)
out = encoder(test_tensor1, test_tensor2)
assert out[0].shape == test_tensor1.shape
assert out[1].shape == test_tensor2.shape
del encoder
| [
"torch.randn"
] | 1.9.0 | aditya-agrawal-30502/vformer | e1f4950f980238442ff1dc39a8f0791e4fbc9dac |
1.1 | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(cur_epoch,model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, cur_epoch)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(cur_epoch,
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| [
"torch.save"
] | 1.1 | Bilal-A-Qureshi/OpenPCDet | 633c6026e56fc3fb2112f2a9f7ce08a21619e78f |
1.9 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5, 2)
self.conv2 = nn.Conv2d(32, 64, 7, 3)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(36864, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
| [
"torch.nn.Linear",
"torch.flatten",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.Dropout2d"
] | 1.9.0 | evanaze/captcha | 62d226742be7f4091e54a7ea960703812bd44fd5 |
1.6 | import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim,
pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
# TODO maybe no need :(n+1), just self.pos_embedding is OK.
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
# x.shape, b, n+1, d
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
return self.mlp_head(x)
class AttentionWithMask(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads # 64 x 8
self.heads = heads # 8
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
# n is the patch_num + 1, patch_num = (img_size/patch_size)**2.
# just assume img_size 224, patch_size 32, 224/32=7 it is 7*7+1=50 here.
# yolo-v1 also use patch num 7*7.
b, n, _, h = *x.shape, self.heads # n=50,h=8,
# self.to_qkv(x)得到的尺寸为[b,50,64x8x3],然后chunk成3份
# 也就是说,qkv是一个三元tuple,每一份都是[b,50,64x8]的大小
qkv = self.to_qkv(x).chunk(3, dim = -1)
# 把每一份从[b,50,64x8]变成[b,8,50,64]的形式
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
# 这一步不太好理解,q和k都是[b,8,50,64]的形式,50理解为特征数量,64为特征变量
# dots.shape=[b,8,50,50]
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
# 不考虑mask这一块的内容
mask_value = -torch.finfo(dots.dtype).max
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, mask_value)
del mask
# 对[b,8,50,50]的最后一个维度做softmax
attn = dots.softmax(dim=-1)
# 这个attn就是计算出来的自注意力值,和v做点乘,out.shape=[b,8,50,64]
out = torch.einsum('bhij,bhjd->bhid', attn, v)
# out.shape变成[b,50,8x64]
out = rearrange(out, 'b h n d -> b n (h d)')
# out.shape重新变成[b,60,128]
out = self.to_out(out)
return out
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"torch.einsum",
"torch.finfo",
"torch.nn.GELU",
"torch.randn"
] | 1.6 | rocke2020/vit-pytorch | a1f828da0c952fa56a90a71f7c88c8e0025c1d42 |
1.4 | import torch
import os
import numpy as np
import cv2
from PIL import Image
from collections import defaultdict
from tqdm import tqdm
import mcubes
import open3d as o3d
from plyfile import PlyData, PlyElement
from argparse import ArgumentParser
from models.rendering import *
from models.nerf import *
from utils import load_ckpt
from datasets import dataset_dict
torch.backends.cudnn.benchmark = True
def get_opts():
parser = ArgumentParser()
parser.add_argument('--root_dir', type=str,
default='/home/ubuntu/data/nerf_example_data/nerf_synthetic/lego',
help='root directory of dataset')
parser.add_argument('--dataset_name', type=str, default='blender',
choices=['blender', 'llff'],
help='which dataset to validate')
parser.add_argument('--scene_name', type=str, default='test',
help='scene name, used as output ply filename')
parser.add_argument('--img_wh', nargs="+", type=int, default=[800, 800],
help='resolution (img_w, img_h) of the image')
parser.add_argument('--N_samples', type=int, default=64,
help='number of samples to infer the acculmulated opacity')
parser.add_argument('--chunk', type=int, default=32*1024,
help='chunk size to split the input to avoid OOM')
parser.add_argument('--ckpt_path', type=str, required=True,
help='pretrained checkpoint path to load')
parser.add_argument('--N_grid', type=int, default=256,
help='size of the grid on 1 side, larger=higher resolution')
parser.add_argument('--x_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--y_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--z_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--sigma_threshold', type=float, default=20.0,
help='threshold to consider a location is occupied')
parser.add_argument('--occ_threshold', type=float, default=0.2,
help='''threshold to consider a vertex is occluded.
larger=fewer occluded pixels''')
#### method using vertex normals ####
parser.add_argument('--use_vertex_normal', action="store_true",
help='use vertex normals to compute color')
parser.add_argument('--N_importance', type=int, default=64,
help='number of fine samples to infer the acculmulated opacity')
parser.add_argument('--near_t', type=float, default=1.0,
help='the near bound factor to start the ray')
return parser.parse_args()
@torch.no_grad()
def f(models, embeddings, rays, N_samples, N_importance, chunk, white_back):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, chunk):
rendered_ray_chunks = \
render_rays(models,
embeddings,
rays[i:i+chunk],
N_samples,
False,
0,
0,
N_importance,
chunk,
white_back,
test_time=True)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
if __name__ == "__main__":
args = get_opts()
kwargs = {'root_dir': args.root_dir,
'img_wh': tuple(args.img_wh)}
if args.dataset_name == 'llff':
kwargs['spheric_poses'] = True
kwargs['split'] = 'test'
else:
kwargs['split'] = 'train'
dataset = dataset_dict[args.dataset_name](**kwargs)
embedding_xyz = Embedding(3, 10)
embedding_dir = Embedding(3, 4)
embeddings = [embedding_xyz, embedding_dir]
nerf_fine = NeRF()
load_ckpt(nerf_fine, args.ckpt_path, model_name='nerf_fine')
nerf_fine.cuda().eval()
# define the dense grid for query
N = args.N_grid
xmin, xmax = args.x_range
ymin, ymax = args.y_range
zmin, zmax = args.z_range
# assert xmax-xmin == ymax-ymin == zmax-zmin, 'the ranges must have the same length!'
x = np.linspace(xmin, xmax, N)
y = np.linspace(ymin, ymax, N)
z = np.linspace(zmin, zmax, N)
xyz_ = torch.FloatTensor(np.stack(np.meshgrid(x, y, z), -1).reshape(-1, 3)).cuda()
dir_ = torch.zeros_like(xyz_).cuda()
# sigma is independent of direction, so any value here will produce the same result
# predict sigma (occupancy) for each grid location
print('Predicting occupancy ...')
with torch.no_grad():
B = xyz_.shape[0]
out_chunks = []
for i in tqdm(range(0, B, args.chunk)):
xyz_embedded = embedding_xyz(xyz_[i:i+args.chunk]) # (N, embed_xyz_channels)
dir_embedded = embedding_dir(dir_[i:i+args.chunk]) # (N, embed_dir_channels)
xyzdir_embedded = torch.cat([xyz_embedded, dir_embedded], 1)
out_chunks += [nerf_fine(xyzdir_embedded)]
rgbsigma = torch.cat(out_chunks, 0)
sigma = rgbsigma[:, -1].cpu().numpy()
sigma = np.maximum(sigma, 0).reshape(N, N, N)
# perform marching cube algorithm to retrieve vertices and triangle mesh
print('Extracting mesh ...')
vertices, triangles = mcubes.marching_cubes(sigma, args.sigma_threshold)
##### Until mesh extraction here, it is the same as the original repo. ######
vertices_ = (vertices/N).astype(np.float32)
## invert x and y coordinates (WHY? maybe because of the marching cubes algo)
x_ = (ymax-ymin) * vertices_[:, 1] + ymin
y_ = (xmax-xmin) * vertices_[:, 0] + xmin
vertices_[:, 0] = x_
vertices_[:, 1] = y_
vertices_[:, 2] = (zmax-zmin) * vertices_[:, 2] + zmin
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertices_[:, 0], 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
# remove noise in the mesh by keeping only the biggest cluster
print('Removing noise ...')
mesh = o3d.io.read_triangle_mesh(f"{args.scene_name}.ply")
idxs, count, _ = mesh.cluster_connected_triangles()
max_cluster_idx = np.argmax(count)
triangles_to_remove = [i for i in range(len(face)) if idxs[i] != max_cluster_idx]
mesh.remove_triangles_by_index(triangles_to_remove)
mesh.remove_unreferenced_vertices()
print(f'Mesh has {len(mesh.vertices)/1e6:.2f} M vertices and {len(mesh.triangles)/1e6:.2f} M faces.')
vertices_ = np.asarray(mesh.vertices).astype(np.float32)
triangles = np.asarray(mesh.triangles)
# perform color prediction
# Step 0. define constants (image width, height and intrinsics)
W, H = args.img_wh
K = np.array([[dataset.focal, 0, W/2],
[0, dataset.focal, H/2],
[0, 0, 1]]).astype(np.float32)
# Step 1. transform vertices into world coordinate
N_vertices = len(vertices_)
vertices_homo = np.concatenate([vertices_, np.ones((N_vertices, 1))], 1) # (N, 4)
if args.use_vertex_normal: ## use normal vector method as suggested by the author.
## see https://github.com/bmild/nerf/issues/44
mesh.compute_vertex_normals()
rays_d = torch.FloatTensor(np.asarray(mesh.vertex_normals))
near = dataset.bounds.min() * torch.ones_like(rays_d[:, :1])
far = dataset.bounds.max() * torch.ones_like(rays_d[:, :1])
rays_o = torch.FloatTensor(vertices_) - rays_d * near * args.near_t
nerf_coarse = NeRF()
load_ckpt(nerf_coarse, args.ckpt_path, model_name='nerf_coarse')
nerf_coarse.cuda().eval()
results = f([nerf_coarse, nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
args.N_importance,
args.chunk,
dataset.white_back)
else: ## use my color average method. see README_mesh.md
## buffers to store the final averaged color
non_occluded_sum = np.zeros((N_vertices, 1))
v_color_sum = np.zeros((N_vertices, 3))
# Step 2. project the vertices onto each training image to infer the color
print('Fusing colors ...')
for idx in tqdm(range(len(dataset.image_paths))):
## read image of this pose
image = Image.open(dataset.image_paths[idx]).convert('RGB')
image = image.resize(tuple(args.img_wh), Image.LANCZOS)
image = np.array(image)
## read the camera to world relative pose
P_c2w = np.concatenate([dataset.poses[idx], np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
P_w2c = np.linalg.inv(P_c2w)[:3] # (3, 4)
## project vertices from world coordinate to camera coordinate
vertices_cam = (P_w2c @ vertices_homo.T) # (3, N) in "right up back"
vertices_cam[1:] *= -1 # (3, N) in "right down forward"
## project vertices from camera coordinate to pixel coordinate
vertices_image = (K @ vertices_cam).T # (N, 3)
depth = vertices_image[:, -1:]+1e-5 # the depth of the vertices, used as far plane
vertices_image = vertices_image[:, :2]/depth
vertices_image = vertices_image.astype(np.float32)
vertices_image[:, 0] = np.clip(vertices_image[:, 0], 0, W-1)
vertices_image[:, 1] = np.clip(vertices_image[:, 1], 0, H-1)
## compute the color on these projected pixel coordinates
## using bilinear interpolation.
## NOTE: opencv's implementation has a size limit of 32768 pixels per side,
## so we split the input into chunks.
colors = []
remap_chunk = int(3e4)
for i in range(0, N_vertices, remap_chunk):
colors += [cv2.remap(image,
vertices_image[i:i+remap_chunk, 0],
vertices_image[i:i+remap_chunk, 1],
interpolation=cv2.INTER_LINEAR)[:, 0]]
colors = np.vstack(colors) # (N_vertices, 3)
## predict occlusion of each vertex
## we leverage the concept of NeRF by constructing rays coming out from the camera
## and hitting each vertex; by computing the accumulated opacity along this path,
## we can know if the vertex is occluded or not.
## for vertices that appear to be occluded from every input view, we make the
## assumption that its color is the same as its neighbors that are facing our side.
## (think of a surface with one side facing us: we assume the other side has the same color)
## ray's origin is camera origin
rays_o = torch.FloatTensor(dataset.poses[idx][:, -1]).expand(N_vertices, 3)
## ray's direction is the vector pointing from camera origin to the vertices
rays_d = torch.FloatTensor(vertices_) - rays_o # (N_vertices, 3)
rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
near = dataset.bounds.min() * torch.ones_like(rays_o[:, :1])
## the far plane is the depth of the vertices, since what we want is the accumulated
## opacity along the path from camera origin to the vertices
far = torch.FloatTensor(depth) * torch.ones_like(rays_o[:, :1])
results = f([nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
0,
args.chunk,
dataset.white_back)
opacity = results['opacity_coarse'].cpu().numpy()[:, np.newaxis] # (N_vertices, 1)
opacity = np.nan_to_num(opacity, 1)
non_occluded = np.ones_like(non_occluded_sum) * 0.1/depth # weight by inverse depth
# near=more confident in color
non_occluded += opacity < args.occ_threshold
v_color_sum += colors * non_occluded
non_occluded_sum += non_occluded
# Step 3. combine the output and write to file
if args.use_vertex_normal:
v_colors = results['rgb_fine'].cpu().numpy() * 255.0
else: ## the combined color is the average color among all views
v_colors = v_color_sum/non_occluded_sum
v_colors = v_colors.astype(np.uint8)
v_colors.dtype = [('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertex_all = np.empty(N_vertices, vertices_.dtype.descr+v_colors.dtype.descr)
for prop in vertices_.dtype.names:
vertex_all[prop] = vertices_[prop][:, 0]
for prop in v_colors.dtype.names:
vertex_all[prop] = v_colors[prop][:, 0]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertex_all, 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
print('Done!')
| [
"torch.cat",
"torch.norm",
"torch.FloatTensor",
"torch.zeros_like",
"torch.no_grad",
"torch.ones_like"
] | 1.4.0 | U-sepSick/NeRF | c5910f84321eb5f72e3332507b0384f1b23f51f7 |
0.4 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from .file_utils import cached_path
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
}
CONFIG_NAME = 'bert_config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None):
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer
class BertDeSelfOutput(nn.Module):
def __init__(self, config):
super(BertDeSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qdense = nn.Linear(config.hidden_size, config.hidden_size)
self.qLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.qdropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, chidden_states,qhidden_states, cinput_tensor,qinput_tensor):
chidden_states = self.dense(chidden_states)
chidden_states = self.dropout(chidden_states)
chidden_states = self.LayerNorm(chidden_states + cinput_tensor)
qhidden_states = self.dense(qhidden_states)
qhidden_states = self.dropout(qhidden_states)
qhidden_states = self.LayerNorm(qhidden_states + cinput_tensor)
return chidden_states,qhidden_states
class BertDeAttention(nn.Module):
def __init__(self, config):
super(BertDeAttention, self).__init__()
self.self = BertMultiAttention(config)
self.output = BertDeSelfOutput(config) #Can use De here
def forward(self, cinput_tensor,qinput_tensor, attention_mask, qattention_mask):
cself_output,qself_output = self.self(cinput_tensor,qinput_tensor, attention_mask,qattention_mask)
cattention_output,qattention_output = self.output(cself_output,qself_output, cinput_tensor,qinput_tensor)
return cattention_output,qattention_output
class BertDeIntermediate(nn.Module):
def __init__(self, config):
super(BertDeIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.qdense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, chidden_states,qhidden_states):
#print('In DeIntermediate -dim of chidden_states is',chidden_states.size())
chidden_states = self.dense(chidden_states)
chidden_states = self.intermediate_act_fn(chidden_states)
qhidden_states = self.qdense(qhidden_states)
qhidden_states = self.intermediate_act_fn(qhidden_states)
return chidden_states,qhidden_states
class BertDeOutput(nn.Module):
def __init__(self, config):
super(BertDeOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qdense = nn.Linear(config.intermediate_size, config.hidden_size)
self.qLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.qdropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, chidden_states,qhidden_states, cinput_tensor,qinput_tensor):
#print('In BertDeOutput - size of chidden_states is',chidden_states.size())
chidden_states = self.dense(chidden_states)
chidden_states = self.dropout(chidden_states)
chidden_states = self.LayerNorm(chidden_states + cinput_tensor)
qhidden_states = self.qdense(qhidden_states)
qhidden_states = self.qdropout(qhidden_states)
qhidden_states = self.qLayerNorm(qhidden_states + qinput_tensor)
return chidden_states,qhidden_states
class BertDeLayer(nn.Module):
def __init__(self, config):
super(BertDeLayer, self).__init__()
self.attention = BertDeAttention(config)
self.intermediate = BertDeIntermediate(config)
self.output = BertDeOutput(config)
def forward(self, chidden_states,qhidden_states, attention_mask,qattention_mask):
cattention_output,qattention_output = self.attention(chidden_states,qhidden_states, attention_mask,qattention_mask)
#Call this one more time to calculaye qattention_output^
#print('In DeLayer - dim of cattention_output',cattention_output.size())
cintermediate_output,qintermediate_output = self.intermediate(cattention_output,qattention_output)
clayer_output,qlayer_output = self.output(cintermediate_output,qintermediate_output,cattention_output,qattention_output)
return clayer_output,qlayer_output
class BertDecoder(nn.Module):
def __init__(self, config):
super(BertDecoder, self).__init__()
layer = BertDeLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(6)])
def forward(self, hidden_states, cattention_mask, qattention_mask, output_all_deencoded_layers=True):
call_deencoder_layers = []
qall_deencoder_layers = []
chidden_states = hidden_states
qhidden_states = hidden_states
for layer_module in self.layer:
chidden_states,qhidden_states = layer_module(chidden_states,qhidden_states, cattention_mask,qattention_mask)
if output_all_deencoded_layers:
call_deencoder_layers.append(chidden_states)
qall_deencoder_layers.append(qhidden_states)
if not output_all_deencoded_layers:
call_deencoder_layers.append(chidden_states)
qall_deencoder_layers.append(qhidden_states)
return call_deencoder_layers,qall_deencoder_layers
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertMultiAttention(nn.Module):
def __init__(self, config):
super(BertMultiAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
#print('config.hidden_size is',config.hidden_size)
self.query = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.query.weight)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.key.weight)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.value.weight)
self.qquery = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qquery.weight)
self.qkey = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qkey.weight)
self.qvalue = nn.Linear(config.hidden_size, self.all_head_size)
torch.nn.init.xavier_uniform_(self.qvalue.weight)
self.cdropout = nn.Dropout(config.attention_probs_dropout_prob)
self.qdropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
#NOTE -
# 1. enc_hidden_states is context embeddings
# 2. dec_hidden_states is question embeddings
# start expt with query from dec_hidden_states
# key and value from context
def forward(self, enc_hidden_states,dec_hidden_states, attention_mask,qattention_mask):
#print('forward of decoder')
#print('shape of dec_hidden_states is',dec_hidden_states.shape)
#print('size of self.all_head_size is',self.all_head_size)
mixed_query_layer = self.query(dec_hidden_states)
mixed_key_layer = self.key(enc_hidden_states)
mixed_value_layer = self.value(enc_hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.cdropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
#--- Q2C
qmixed_query_layer = self.qquery(enc_hidden_states)
qmixed_key_layer = self.qkey(dec_hidden_states)
qmixed_value_layer = self.qvalue(dec_hidden_states)
qquery_layer = self.transpose_for_scores(qmixed_query_layer)
qkey_layer = self.transpose_for_scores(qmixed_key_layer)
qvalue_layer = self.transpose_for_scores(qmixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
qattention_scores = torch.matmul(qquery_layer, qkey_layer.transpose(-1, -2))
qattention_scores = qattention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
qattention_scores = qattention_scores + qattention_mask
# Normalize the attention scores to probabilities.
qattention_probs = nn.Softmax(dim=-1)(qattention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
qattention_probs = self.qdropout(qattention_probs)
q_layer = torch.matmul(qattention_probs, qvalue_layer)
q_layer = q_layer.permute(0, 2, 1, 3).contiguous()
new_q_layer_shape = q_layer.size()[:-2] + (self.all_head_size,)
q_layer = q_layer.view(*new_q_layer_shape)
return context_layer,q_layer
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask):
self_output = self.self(input_tensor, attention_mask)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask):
attention_output = self.attention(hidden_states, attention_mask)
#print('attention_output is',attention_output.shape)
intermediate_output = self.intermediate(attention_output)
#print('intermediate_output is',intermediate_output.shape)
layer_output = self.output(intermediate_output, attention_output)
#print('layer_output is',layer_output.shape)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True):
all_encoder_layers = []
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
#print('size of hidden_states in BertEncoder is',hidden_states.shape)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(bert_model_embedding_weights.size(0)))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-large-cased`
. `bert-base-multilingual-uncased`
. `bert-base-multilingual-cased`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
else:
archive_file = pretrained_model_name
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except FileNotFoundError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name,
', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
tempdir = None
if os.path.isdir(resolved_archive_file):
serialization_dir = resolved_archive_file
else:
# Extract archive to temp dir
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
with tarfile.open(resolved_archive_file, 'r:gz') as archive:
archive.extractall(tempdir)
serialization_dir = tempdir
# Load config
config_file = os.path.join(serialization_dir, CONFIG_NAME)
config = BertConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if tempdir:
# Clean up temp dir
shutil.rmtree(tempdir)
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
#print('extended_attention_mask',extended_attention_mask)
#print('attention_mask',attention_mask)
#print('token_type_ids',token_type_ids)
qattention_mask = attention_mask - token_type_ids
cattention_mask = attention_mask - qattention_mask
#print('*************************')
#print('cattention_mask',cattention_mask)
#print('qattention_mask',qattention_mask)
cextended_attention_mask = cattention_mask.unsqueeze(1).unsqueeze(2)
cextended_attention_mask = cextended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
cextended_attention_mask = (1.0 - cextended_attention_mask) * -10000.0
qextended_attention_mask = qattention_mask.unsqueeze(1).unsqueeze(2)
qextended_attention_mask = qextended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
qextended_attention_mask = (1.0 - qextended_attention_mask) * -10000.0
#raise SystemExit
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
#print('*** encoded_layers is',encoded_layers.shape)
sequence_output = encoded_layers[-1]
#print('*** sequence_output is',sequence_output.shape)
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return cextended_attention_mask,qextended_attention_mask,sequence_output#encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertForMaskedLM(PreTrainedBertModel):
"""BERT model with the masked language modeling head.
This module comprises the BERT model followed by the masked language modeling head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
Outputs:
if `masked_lm_labels` is not `None`:
Outputs the masked language modeling loss.
if `masked_lm_labels` is `None`:
Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForMaskedLM(config)
masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
prediction_scores = self.cls(sequence_output)
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
return masked_lm_loss
else:
return prediction_scores
class BertForNextSentencePrediction(PreTrainedBertModel):
"""BERT model with next sentence prediction head.
This module comprises the BERT model followed by the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `next_sentence_label` is not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `next_sentence_label` is `None`:
Outputs the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForNextSentencePrediction(config)
seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False)
seq_relationship_score = self.cls( pooled_output)
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
return next_sentence_loss
else:
return seq_relationship_score
class BertForSequenceClassification(PreTrainedBertModel):
"""BERT model for classification.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForSequenceClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
_, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForMultipleChoice(PreTrainedBertModel):
"""BERT model for multiple choice tasks.
This module is composed of the BERT model with a linear layer on top of
the pooled output.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_choices`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length]
with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A`
and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
with indices selected in [0, ..., num_choices].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]])
input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]])
token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_choices = 2
model = BertForMultipleChoice(config, num_choices)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_choices=2):
super(BertForMultipleChoice, self).__init__(config)
self.num_choices = num_choices
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
_, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, self.num_choices)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return loss
else:
return reshaped_logits
class BertForTokenClassification(PreTrainedBertModel):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels=2):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class BertForQuestionAnswering(PreTrainedBertModel):
"""BERT model for Question Answering (span extraction).
This module is composed of the BERT model with a linear layer on top of
the sequence output that computes start_logits and end_logits
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
`end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size].
Positions are clamped to the length of the sequence and position outside of the sequence are not taken
into account for computing the loss.
Outputs:
if `start_positions` and `end_positions` are not `None`:
Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions.
if `start_positions` or `end_positions` is `None`:
Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end
position tokens of shape [batch_size, sequence_length].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForQuestionAnswering(config)
start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.bert = BertModel(config)
self.enc_trans = nn.Linear(2*config.max_position_embeddings,config.max_position_embeddings)
# TODO check with Google if it's normal there is no dropout on the token classifier of SQuAD in the TF version
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
#print('hidden size in QA is',config.hidden_size)
self.apply(self.init_bert_weights)
self.conv1d = nn.Conv1d(in_channels=2,out_channels=1,kernel_size=3,padding=1)
self.decoder = BertDecoder(config)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
ks=3
#convs
self.conv_512_1= nn.Conv1d(in_channels=768, out_channels=512, kernel_size=ks, padding=1)
self.conv_512_2= nn.Conv1d(in_channels=512, out_channels=512, kernel_size=ks, padding=1)
self.conv_256_1= nn.Conv1d(in_channels=512, out_channels=256, kernel_size=ks, padding=1)
self.conv_256_2= nn.Conv1d(in_channels=256, out_channels=256, kernel_size=ks, padding=1)
self.conv_128_1= nn.Conv1d(in_channels=256, out_channels=128, kernel_size=ks, padding=1)
self.conv_128_2= nn.Conv1d(in_channels=128, out_channels=128, kernel_size=ks, padding=1)
self.conv_64_1= nn.Conv1d(in_channels=128, out_channels=64, kernel_size=ks, padding=1)
self.conv_64_2= nn.Conv1d(in_channels=64, out_channels=64, kernel_size=ks, padding=1)
self.conv_32_1= nn.Conv1d(in_channels=64, out_channels=32, kernel_size=ks, padding=1)
self.conv_32_2= nn.Conv1d(in_channels=32, out_channels=32, kernel_size=ks, padding=1)
self.conv_16_1= nn.Conv1d(in_channels=32, out_channels=16, kernel_size=ks, padding=1)
self.conv_16_2= nn.Conv1d(in_channels=16, out_channels=16, kernel_size=ks, padding=1)
self.conv_8_1= nn.Conv1d(in_channels=16, out_channels=8, kernel_size=ks, padding=1)
self.conv_8_2= nn.Conv1d(in_channels=8, out_channels=8, kernel_size=ks, padding=1)
self.conv_4_1= nn.Conv1d(in_channels=8, out_channels=4, kernel_size=ks, padding=1)
self.conv_4_2= nn.Conv1d(in_channels=4, out_channels=4, kernel_size=ks, padding=1)
self.conv_out=nn.Conv1d(in_channels=4, out_channels=2, kernel_size=ks, padding=1)
#Freeze embedding layers of Bert
#for param in self.bert.parameters():
# param.requires_grad = False
# print(param)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None):
c_attention_mask,q_attention_mask,sequence_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
#print('shape of sequence_output',sequence_output.shape)
#Ankit addition - Decoder
cdeencoded_layers,qdeencoded_layers = self.decoder(sequence_output, #2d --> 1d translated
c_attention_mask,q_attention_mask,
output_all_deencoded_layers=False)#output_all_encoded_layers)
cdeencoded_layers = cdeencoded_layers[-1]
qdeencoded_layers = qdeencoded_layers[-1]
cdeencoded_layers = cdeencoded_layers.unsqueeze(-1)
qdeencoded_layers = qdeencoded_layers.unsqueeze(-1)
enc_cat = torch.cat((cdeencoded_layers,qdeencoded_layers), dim=-1)
#print('enc_cat size is',enc_cat.size())
#enc_cat = enc_cat.permute(0,2,1)
encshape = enc_cat.shape
#print('AFTERPERMUTE - enc_cat size is',enc_cat.size())
#sequence_output1d = self.enc_trans(enc_cat)
#print('Dim of sequence_output is',sequence_output1d.size())
enc_cat = enc_cat.reshape(-1,enc_cat.shape[2],enc_cat.shape[3]).contiguous()
#print('AFTER : enc_cat size is',enc_cat.size())
enc_cat = enc_cat.permute(0,2,1).contiguous()
sequence_output1d = self.conv1d(enc_cat)
#print('shape of sequence_output1d',sequence_output1d.shape)
sequence_output1d = sequence_output1d.squeeze(1).contiguous()
sequence_output1d = sequence_output1d.reshape(encshape[0],encshape[1],encshape[2])
#Skip connection with bert embeddings
sequence_output1d = self.LayerNorm(sequence_output + sequence_output1d)
sequence_output1d = sequence_output1d.permute(0,2,1).contiguous()
#print('seq after perm: ', sequence_output.shape)
out_512_1=self.conv_512_1(sequence_output1d)
#print('out 512 1 shape', out_512_1.shape)
out_512_2=self.conv_512_2(out_512_1)
#print('out 512 2 shape', out_512_2.shape)
#elem_1=self.LayerNorm(sequence_output+out_512_2)
#print('512 size: ', elem_1.shape)
out_256_1=self.conv_256_1(out_512_2)
out_256_2=self.conv_256_2(out_256_1)
#elem_2=self.LayerNorm(sequence_output+out_256_2)
#print('256 size: ', elem_2.shape)
out_128_1=self.conv_128_1(out_256_2)
out_128_2=self.conv_128_2(out_128_1)
#elem_3=self.LayerNorm(sequence_output+out_128_2)
#print('128 size: ', elem_3.shape)
out_64_1=self.conv_64_1(out_128_2)
out_64_2=self.conv_64_2(out_64_1)
#elem_4=self.LayerNorm(sequence_output+out_64_2)
#print('64 size: ', elem_4.shape)
out_32_1=self.conv_32_1(out_64_2)
out_32_2=self.conv_32_2(out_32_1)
#elem_5=self.LayerNorm(sequence_output+out_32_2)
#print('32 size: ', elem_5.shape)
out_16_1=self.conv_16_1(out_32_2)
out_16_2=self.conv_16_2(out_16_1)
#elem_6=self.LayerNorm(sequence_output+out_16_2)
#print('16 size: ', elem_6.shape)
out_8_1=self.conv_8_1(out_16_2)
out_8_2=self.conv_8_2(out_8_1)
#elem_7=self.LayerNorm(sequence_output+out_8_2)
#print('8 size: ', elem_7.shape)
out_4_1=self.conv_4_1(out_8_2)
out_4_2=self.conv_4_2(out_4_1)
#elem_8=self.LayerNorm(sequence_output+out_4_2)
#print('4 size: ', elem_8.shape)
out=self.conv_out(out_4_2)
#print('out before perm: ', out.shape)
out = out.permute(0,2,1).contiguous()
#out = self.LayerNorm2(out)
#print('out after perm: ', out.shape)
logits=out
#logits = self.qa_outputs(sequence_output1d)
#print('Dim of logits is',logits.size())
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
return total_loss
else:
return start_logits, end_logits
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.cat",
"torch.zeros",
"torch.sqrt",
"torch.arange",
"torch.nn.Conv1d",
"torch.nn.Softmax",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.nn.init.xavier_uniform_",
"torch.ones",
"torch.load",
"torch.ones_like",
"torch.zeros_like",
"torch.matmul",
"torch.nn.Embedding"
] | 0.4.1 | ankit-ai/BertQA-Attention-on-Steroids | 49c3de360f88f55c8442b9f8153af56c28a689a9 |
1.11 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import OrderedDict
import pytest
import funsor
from funsor.domains import Bint
from funsor.einsum import (
einsum,
naive_contract_einsum,
naive_einsum,
naive_plated_einsum,
)
from funsor.interpretations import normalize, reflect
from funsor.interpreter import reinterpret
from funsor.optimizer import apply_optimizer
from funsor.tensor import Tensor
from funsor.terms import Variable
from funsor.testing import (
assert_close,
make_chain_einsum,
make_einsum_example,
make_hmm_einsum,
make_plated_hmm_einsum,
)
from funsor.util import get_backend
# TODO: make this file backend agnostic
pytestmark = pytest.mark.skipif(
get_backend() != "torch",
reason="jax backend does not have pyro.ops.contract.einsum equivalent",
)
if get_backend() == "torch":
import torch
from pyro.ops.contract import einsum as pyro_einsum
from funsor.torch.distributions import Categorical
OPTIMIZED_EINSUM_EXAMPLES = [make_chain_einsum(t) for t in range(2, 50, 10)] + [
make_hmm_einsum(t) for t in range(2, 50, 10)
]
@pytest.mark.parametrize("equation", OPTIMIZED_EINSUM_EXAMPLES)
@pytest.mark.parametrize(
"backend", ["pyro.ops.einsum.torch_log", "pyro.ops.einsum.torch_map"]
)
@pytest.mark.parametrize("einsum_impl", [naive_einsum, naive_contract_einsum])
def test_optimized_einsum(equation, backend, einsum_impl):
inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)
expected = pyro_einsum(equation, *operands, backend=backend)[0]
with normalize:
naive_ast = einsum_impl(equation, *funsor_operands, backend=backend)
optimized_ast = apply_optimizer(naive_ast)
actual = reinterpret(optimized_ast) # eager by default
assert isinstance(actual, funsor.Tensor) and len(outputs) == 1
if len(outputs[0]) > 0:
actual = actual.align(tuple(outputs[0]))
assert expected.shape == actual.data.shape
assert torch.allclose(expected, actual.data)
for output in outputs:
for i, output_dim in enumerate(output):
assert output_dim in actual.inputs
assert actual.inputs[output_dim].dtype == sizes[output_dim]
@pytest.mark.parametrize(
"eqn1,eqn2", [("a,ab->b", "bc->"), ("ab,bc,cd->d", "de,ef,fg->")]
)
@pytest.mark.parametrize("optimize1", [False, True])
@pytest.mark.parametrize("optimize2", [False, True])
@pytest.mark.parametrize(
"backend1", ["torch", "pyro.ops.einsum.torch_log", "pyro.ops.einsum.torch_map"]
)
@pytest.mark.parametrize(
"backend2", ["torch", "pyro.ops.einsum.torch_log", "pyro.ops.einsum.torch_map"]
)
@pytest.mark.parametrize("einsum_impl", [naive_einsum, naive_contract_einsum])
def test_nested_einsum(
eqn1, eqn2, optimize1, optimize2, backend1, backend2, einsum_impl
):
inputs1, outputs1, sizes1, operands1, _ = make_einsum_example(eqn1, sizes=(3,))
inputs2, outputs2, sizes2, operands2, funsor_operands2 = make_einsum_example(
eqn2, sizes=(3,)
)
# normalize the probs for ground-truth comparison
operands1 = [
operand.abs() / operand.abs().sum(-1, keepdim=True) for operand in operands1
]
expected1 = pyro_einsum(eqn1, *operands1, backend=backend1, modulo_total=True)[0]
expected2 = pyro_einsum(
outputs1[0] + "," + eqn2,
*([expected1] + operands2),
backend=backend2,
modulo_total=True
)[0]
with normalize:
funsor_operands1 = [
Categorical(
probs=Tensor(
operand,
inputs=OrderedDict([(d, Bint[sizes1[d]]) for d in inp[:-1]]),
)
)(value=Variable(inp[-1], Bint[sizes1[inp[-1]]])).exp()
for inp, operand in zip(inputs1, operands1)
]
output1_naive = einsum_impl(eqn1, *funsor_operands1, backend=backend1)
with reflect:
output1 = apply_optimizer(output1_naive) if optimize1 else output1_naive
output2_naive = einsum_impl(
outputs1[0] + "," + eqn2, *([output1] + funsor_operands2), backend=backend2
)
with reflect:
output2 = apply_optimizer(output2_naive) if optimize2 else output2_naive
actual1 = reinterpret(output1)
actual2 = reinterpret(output2)
assert torch.allclose(expected1, actual1.data)
assert torch.allclose(expected2, actual2.data)
PLATED_EINSUM_EXAMPLES = [
make_plated_hmm_einsum(num_steps, num_obs_plates=b, num_hidden_plates=a)
for num_steps in range(3, 50, 6)
for (a, b) in [(0, 1), (0, 2), (0, 0), (1, 1), (1, 2)]
]
@pytest.mark.parametrize("equation,plates", PLATED_EINSUM_EXAMPLES)
@pytest.mark.parametrize(
"backend", ["pyro.ops.einsum.torch_log", "pyro.ops.einsum.torch_map"]
)
def test_optimized_plated_einsum(equation, plates, backend):
inputs, outputs, sizes, operands, funsor_operands = make_einsum_example(equation)
expected = pyro_einsum(equation, *operands, plates=plates, backend=backend)[0]
actual = einsum(equation, *funsor_operands, plates=plates, backend=backend)
if len(equation) < 10:
actual_naive = naive_plated_einsum(
equation, *funsor_operands, plates=plates, backend=backend
)
assert_close(actual, actual_naive)
assert isinstance(actual, funsor.Tensor) and len(outputs) == 1
if len(outputs[0]) > 0:
actual = actual.align(tuple(outputs[0]))
assert expected.shape == actual.data.shape
assert torch.allclose(expected, actual.data)
for output in outputs:
for i, output_dim in enumerate(output):
assert output_dim in actual.inputs
assert actual.inputs[output_dim].dtype == sizes[output_dim]
| [
"torch.allclose"
] | 1.11.0 | fritzo/funsor | 1d07af18c21894dd56e2f4f877c7845430c3b729 |
1.9 | # Copyright 2021 MosaicML. All Rights Reserved.
"""Core ColOut classes and functions."""
from __future__ import annotations
import logging
import textwrap
import weakref
from typing import TypeVar
import torch
from PIL.Image import Image as PillowImage
from torchvision.datasets import VisionDataset
from composer.algorithms.utils.augmentation_common import image_as_type
from composer.core import Algorithm, Event, Logger, State
from composer.core.types import Tensor
from composer.datasets.utils import add_vision_dataset_transform
log = logging.getLogger(__name__)
ImgT = TypeVar("ImgT", torch.Tensor, PillowImage)
__all__ = ["ColOut", "ColOutTransform", "colout_batch"]
def colout_batch(X: ImgT, p_row: float = 0.15, p_col: float = 0.15) -> ImgT:
"""Applies ColOut augmentation to a batch of images, dropping the same random rows and columns from all images in a
batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms.colout import colout_batch
new_X = colout_batch(
X=X_example,
p_row=0.15,
p_col=0.15
)
Args:
X: :class:`PIL.Image.Image` or :class:`torch.Tensor` of image data. In
the latter case, must be a single image of shape ``CHW`` or a batch
of images of shape ``NCHW``.
p_row: Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col: Fraction of columns to drop (drop along W). Default: ``0.15``.
Returns:
torch.Tensor: Input batch tensor with randomly dropped columns and rows.
"""
# Convert image to Tensor if needed
X_tensor = image_as_type(X, torch.Tensor)
# Get the dimensions of the image
row_size = X_tensor.shape[-2]
col_size = X_tensor.shape[-1]
# Determine how many rows and columns to keep
kept_row_size = int((1 - p_row) * row_size)
kept_col_size = int((1 - p_col) * col_size)
# Randomly choose indices to keep. Must be sorted for slicing
kept_row_idx = sorted(torch.randperm(row_size)[:kept_row_size].numpy())
kept_col_idx = sorted(torch.randperm(col_size)[:kept_col_size].numpy())
# Keep only the selected row and columns
X_colout = X_tensor[..., kept_row_idx, :]
X_colout = X_colout[..., :, kept_col_idx]
# convert back to same type as input, and strip added batch dim if needed;
# we can't just reshape to input shape because we've reduced the spatial size
if not isinstance(X, torch.Tensor) or (X.ndim < X_colout.ndim):
X_colout = X_colout.reshape(X_colout.shape[-3:])
X_colout = image_as_type(X_colout, type(X))
return X_colout
class ColOutTransform:
"""Torchvision-like transform for performing the ColOut augmentation, where random rows and columns are dropped from
a single image.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from torchvision import datasets, transforms
from composer.algorithms.colout import ColOutTransform
colout_transform = ColOutTransform(p_row=0.15, p_col=0.15)
transforms = transforms.Compose([colout_transform, transforms.ToTensor()])
Args:
p_row (float): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float): Fraction of columns to drop (drop along W). Default: ``0.15``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15):
self.p_row = p_row
self.p_col = p_col
def __call__(self, img: ImgT) -> ImgT:
"""Drops random rows and columns from a single image.
Args:
img (torch.Tensor or PIL Image): An input image as a torch.Tensor or PIL image
Returns:
torch.Tensor or PIL Image: A smaller image with rows and columns dropped
"""
return colout_batch(img, self.p_row, self.p_col)
class ColOut(Algorithm):
"""Drops a fraction of the rows and columns of an input image. If the fraction of rows/columns dropped isn't too
large, this does not significantly alter the content of the image, but reduces its size and provides extra
variability.
If ``batch`` is True (the default), this algorithm runs on :attr:`Event.INIT` to insert a dataset transformation.
It is a no-op if this algorithm already applied itself on the :attr:`State.train_dataloader.dataset`.
Otherwise, if ``batch`` is False, then this algorithm runs on :attr:`Event.AFTER_DATALOADER` to modify the batch.
See the :doc:`Method Card </method_cards/colout>` for more details.
Example:
.. testcode::
from composer.algorithms import ColOut
from composer.trainer import Trainer
colout_algorithm = ColOut(p_row=0.15, p_col=0.15, batch=True)
trainer = Trainer(
model=model,
train_dataloader=train_dataloader,
eval_dataloader=eval_dataloader,
max_duration="1ep",
algorithms=[colout_algorithm],
optimizers=[optimizer]
)
Args:
p_row (float): Fraction of rows to drop (drop along H). Default: ``0.15``.
p_col (float): Fraction of columns to drop (drop along W). Default: ``0.15``.
batch (bool): Run ColOut at the batch level. Default: ``True``.
"""
def __init__(self, p_row: float = 0.15, p_col: float = 0.15, batch: bool = True):
if not (0 <= p_col <= 1):
raise ValueError("p_col must be between 0 and 1")
if not (0 <= p_row <= 1):
raise ValueError("p_row must be between 0 and 1")
self.p_row = p_row
self.p_col = p_col
self.batch = batch
self._transformed_datasets = weakref.WeakSet()
def match(self, event: Event, state: State) -> bool:
if self.batch:
return event == Event.AFTER_DATALOADER
else:
return event == Event.FIT_START and state.train_dataloader.dataset not in self._transformed_datasets
def _apply_sample(self, state: State) -> None:
"""Add the ColOut dataset transform to the dataloader."""
dataset = state.train_dataloader.dataset
transform = ColOutTransform(p_row=self.p_row, p_col=self.p_col)
if not isinstance(dataset, VisionDataset):
raise TypeError(
textwrap.dedent(f"""\
To use {type(self).__name__}, the dataset must be a
{VisionDataset.__qualname__}, not {type(dataset).__name__}"""))
add_vision_dataset_transform(dataset, transform, is_tensor_transform=False)
self._transformed_datasets.add(dataset)
def _apply_batch(self, state: State) -> None:
"""Transform a batch of images using the ColOut augmentation."""
inputs, labels = state.batch_pair
assert isinstance(inputs, Tensor), "Multiple Tensors not supported yet for ColOut"
new_inputs = colout_batch(inputs, p_row=self.p_row, p_col=self.p_col)
state.batch = (new_inputs, labels)
def apply(self, event: Event, state: State, logger: Logger) -> None:
if self.batch:
self._apply_batch(state)
else:
self._apply_sample(state)
| [
"torch.randperm"
] | 1.9 | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 |
1.0 | """
Functions are modified on top of GFLA.
GFLA's license: https://github.com/RenYurui/Global-Flow-Local-Attention/blob/master/LICENSE.md
"""
import torch
import torch.nn as nn
import torchvision.models as models
import torch.nn.functional as F
import os
import torchvision.transforms as transforms
import numpy as np
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class MultiAffineRegularizationLoss(nn.Module):
def __init__(self, kz_dic):
super(MultiAffineRegularizationLoss, self).__init__()
self.kz_dic=kz_dic
self.method_dic={}
for key in kz_dic:
instance = AffineRegularizationLoss(kz_dic[key])
self.method_dic[key] = instance
self.layers = sorted(kz_dic, reverse=True)
def __call__(self, flow_fields):
loss=0
for i in range(len(flow_fields)):
method = self.method_dic[self.layers[i]]
loss += method(flow_fields[i])
return loss
class AffineRegularizationLoss(nn.Module):
"""docstring for AffineRegularizationLoss"""
# kernel_size: kz
def __init__(self, kz):
super(AffineRegularizationLoss, self).__init__()
self.kz = kz
self.criterion = torch.nn.L1Loss()
from models.networks.block_extractor.block_extractor import BlockExtractor
from models.networks.local_attn_reshape.local_attn_reshape import LocalAttnReshape
self.extractor = BlockExtractor(kernel_size=kz)
self.reshape = LocalAttnReshape()
temp = np.arange(kz)
A = np.ones([kz*kz, 3])
A[:, 0] = temp.repeat(kz)
A[:, 1] = temp.repeat(kz).reshape((kz,kz)).transpose().reshape(kz**2)
AH = A.transpose()
k = np.dot(A, np.dot(np.linalg.inv(np.dot(AH, A)), AH)) - np.identity(kz**2) #K = (A((AH A)^-1)AH - I)
self.kernel = np.dot(k.transpose(), k)
self.kernel = torch.from_numpy(self.kernel).unsqueeze(1).view(kz**2, kz, kz).unsqueeze(1)
def __call__(self, flow_fields):
grid = self.flow2grid(flow_fields)
grid_x = grid[:,0,:,:].unsqueeze(1)
grid_y = grid[:,1,:,:].unsqueeze(1)
weights = self.kernel.type_as(flow_fields)
#import pdb; pdb.set_trace()
loss_x = self.calculate_loss(grid_x, weights)
loss_y = self.calculate_loss(grid_y, weights)
return loss_x+loss_y
def calculate_loss(self, grid, weights):
results = nn.functional.conv2d(grid, weights) # KH K B [b, kz*kz, w, h]
b, c, h, w = results.size()
kernels_new = self.reshape(results, self.kz)
f = torch.zeros(b, 2, h, w).type_as(kernels_new) + float(int(self.kz/2))
grid_H = self.extractor(grid, f)
result = torch.nn.functional.avg_pool2d(grid_H*kernels_new, self.kz, self.kz)
loss = torch.mean(result)*self.kz**2
return loss
def flow2grid(self, flow_field):
b,c,h,w = flow_field.size()
x = torch.arange(w).view(1, -1).expand(h, -1).type_as(flow_field).float()
y = torch.arange(h).view(-1, 1).expand(-1, w).type_as(flow_field).float()
grid = torch.stack([x,y], dim=0)
grid = grid.unsqueeze(0).expand(b, -1, -1, -1)
return flow_field+grid
class VGGLoss(nn.Module):
r"""
Perceptual loss, VGG-based
https://arxiv.org/abs/1603.08155
https://github.com/dxyang/StyleTransfer/blob/master/utils.py
"""
def __init__(self, weights=[1.0, 1.0, 1.0, 1.0, 1.0]):
super(VGGLoss, self).__init__()
self.add_module('vgg', VGG19())
self.criterion = torch.nn.L1Loss()
self.weights = weights
def compute_gram(self, x):
b, ch, h, w = x.size()
f = x.view(b, ch, w * h)
f_T = f.transpose(1, 2)
G = f.bmm(f_T) / (h * w * ch)
return G
def __call__(self, x, y, last_only=False, content_only=False):
# Compute features
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
if not last_only:
content_loss = 0.0
content_loss += self.weights[0] * self.criterion(x_vgg['relu1_1'], y_vgg['relu1_1'])
content_loss += self.weights[1] * self.criterion(x_vgg['relu2_1'], y_vgg['relu2_1'])
content_loss += self.weights[2] * self.criterion(x_vgg['relu3_1'], y_vgg['relu3_1'])
content_loss += self.weights[3] * self.criterion(x_vgg['relu4_1'], y_vgg['relu4_1'])
content_loss += self.weights[4] * self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
if content_only:
return content_loss
# Compute loss
style_loss = 0.0
style_loss += self.criterion(self.compute_gram(x_vgg['relu2_2']), self.compute_gram(y_vgg['relu2_2']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu3_4']), self.compute_gram(y_vgg['relu3_4']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu4_4']), self.compute_gram(y_vgg['relu4_4']))
style_loss += self.criterion(self.compute_gram(x_vgg['relu5_2']), self.compute_gram(y_vgg['relu5_2']))
else:
content_loss = self.criterion(x_vgg['relu5_1'], y_vgg['relu5_1'])
if content_only:
return content_loss
style_loss = self.criterion(self.compute_gram(x_vgg['relu5_2']), self.compute_gram(y_vgg['relu5_2']))
return content_loss, style_loss
class PerceptualCorrectness(nn.Module):
r"""
"""
def __init__(self, layer=['rel1_1','relu2_1','relu3_1','relu4_1']):
super(PerceptualCorrectness, self).__init__()
self.add_module('vgg', VGG19())
self.layer = layer
self.eps=1e-8
from models.networks.resample2d_package.resample2d import Resample2d
self.resample = Resample2d(4, 1, sigma=2)
def __call__(self, target, source, flow_list, used_layers, mask=None, use_bilinear_sampling=False):
used_layers=sorted(used_layers, reverse=True)
# self.target=target
# self.source=source
self.target_vgg, self.source_vgg = self.vgg(target), self.vgg(source)
loss = 0
for i in range(len(flow_list)):
loss += self.calculate_loss(flow_list[i], self.layer[used_layers[i]], mask, use_bilinear_sampling)
return loss
def calculate_loss(self, flow, layer, mask=None, use_bilinear_sampling=False):
target_vgg = self.target_vgg[layer]
source_vgg = self.source_vgg[layer]
[b, c, h, w] = target_vgg.shape
# maps = F.interpolate(maps, [h,w]).view(b,-1)
flow = F.interpolate(flow, [h,w])
target_all = target_vgg.view(b, c, -1) #[b C N2]
source_all = source_vgg.view(b, c, -1).transpose(1,2) #[b N2 C]
source_norm = source_all/(source_all.norm(dim=2, keepdim=True)+self.eps)
target_norm = target_all/(target_all.norm(dim=1, keepdim=True)+self.eps)
correction = torch.bmm(source_norm, target_norm) #[b N2 N2]
(correction_max,max_indices) = torch.max(correction, dim=1)
# interple with bilinear sampling
if use_bilinear_sampling:
input_sample = self.bilinear_warp(source_vgg, flow).view(b, c, -1)
else:
input_sample = self.resample(source_vgg, flow).view(b, c, -1)
correction_sample = F.cosine_similarity(input_sample, target_all) #[b 1 N2]
loss_map = torch.exp(-correction_sample/(correction_max+self.eps))
if mask is None:
loss = torch.mean(loss_map) - torch.exp(torch.tensor(-1).type_as(loss_map))
else:
mask=F.interpolate(mask, size=(target_vgg.size(2), target_vgg.size(3)))
mask=mask.view(-1, target_vgg.size(2)*target_vgg.size(3))
loss_map = loss_map - torch.exp(torch.tensor(-1).type_as(loss_map))
loss = torch.sum(mask * loss_map)/(torch.sum(mask)+self.eps)
# print(correction_sample[0,2076:2082])
# print(correction_max[0,2076:2082])
# coor_x = [32,32]
# coor = max_indices[0,32+32*64]
# coor_y = [int(coor%64), int(coor/64)]
# source = F.interpolate(self.source, [64,64])
# target = F.interpolate(self.target, [64,64])
# source_i = source[0]
# target_i = target[0]
# source_i = source_i.view(3, -1)
# source_i[:,coor]=-1
# source_i[0,coor]=1
# source_i = source_i.view(3,64,64)
# target_i[:,32,32]=-1
# target_i[0,32,32]=1
# lists = str(int(torch.rand(1)*100))
# img_numpy = util.tensor2im(source_i.data)
# util.save_image(img_numpy, 'source'+lists+'.png')
# img_numpy = util.tensor2im(target_i.data)
# util.save_image(img_numpy, 'target'+lists+'.png')
return loss
def bilinear_warp(self, source, flow):
[b, c, h, w] = source.shape
x = torch.arange(w).view(1, -1).expand(h, -1).type_as(source).float() / (w-1)
y = torch.arange(h).view(-1, 1).expand(-1, w).type_as(source).float() / (h-1)
grid = torch.stack([x,y], dim=0)
grid = grid.unsqueeze(0).expand(b, -1, -1, -1)
grid = 2*grid - 1
flow = 2*flow/torch.tensor([w, h]).view(1, 2, 1, 1).expand(b, -1, h, w).type_as(flow)
grid = (grid+flow).permute(0, 2, 3, 1)
input_sample = F.grid_sample(source, grid).view(b, c, -1)
return input_sample
class VGG19(torch.nn.Module):
def __init__(self):
super(VGG19, self).__init__()
features = models.vgg19(pretrained=True).features
self.relu1_1 = torch.nn.Sequential()
self.relu1_2 = torch.nn.Sequential()
self.relu2_1 = torch.nn.Sequential()
self.relu2_2 = torch.nn.Sequential()
self.relu3_1 = torch.nn.Sequential()
self.relu3_2 = torch.nn.Sequential()
self.relu3_3 = torch.nn.Sequential()
self.relu3_4 = torch.nn.Sequential()
self.relu4_1 = torch.nn.Sequential()
self.relu4_2 = torch.nn.Sequential()
self.relu4_3 = torch.nn.Sequential()
self.relu4_4 = torch.nn.Sequential()
self.relu5_1 = torch.nn.Sequential()
self.relu5_2 = torch.nn.Sequential()
self.relu5_3 = torch.nn.Sequential()
self.relu5_4 = torch.nn.Sequential()
for x in range(2):
self.relu1_1.add_module(str(x), features[x])
for x in range(2, 4):
self.relu1_2.add_module(str(x), features[x])
for x in range(4, 7):
self.relu2_1.add_module(str(x), features[x])
for x in range(7, 9):
self.relu2_2.add_module(str(x), features[x])
for x in range(9, 12):
self.relu3_1.add_module(str(x), features[x])
for x in range(12, 14):
self.relu3_2.add_module(str(x), features[x])
for x in range(14, 16):
self.relu3_2.add_module(str(x), features[x])
for x in range(16, 18):
self.relu3_4.add_module(str(x), features[x])
for x in range(18, 21):
self.relu4_1.add_module(str(x), features[x])
for x in range(21, 23):
self.relu4_2.add_module(str(x), features[x])
for x in range(23, 25):
self.relu4_3.add_module(str(x), features[x])
for x in range(25, 27):
self.relu4_4.add_module(str(x), features[x])
for x in range(27, 30):
self.relu5_1.add_module(str(x), features[x])
for x in range(30, 32):
self.relu5_2.add_module(str(x), features[x])
for x in range(32, 34):
self.relu5_3.add_module(str(x), features[x])
for x in range(34, 36):
self.relu5_4.add_module(str(x), features[x])
# don't need the gradients, just want the features
for param in self.parameters():
param.requires_grad = False
def forward(self, x):
relu1_1 = self.relu1_1(x)
relu1_2 = self.relu1_2(relu1_1)
relu2_1 = self.relu2_1(relu1_2)
relu2_2 = self.relu2_2(relu2_1)
relu3_1 = self.relu3_1(relu2_2)
relu3_2 = self.relu3_2(relu3_1)
relu3_3 = self.relu3_3(relu3_2)
relu3_4 = self.relu3_4(relu3_3)
relu4_1 = self.relu4_1(relu3_4)
relu4_2 = self.relu4_2(relu4_1)
relu4_3 = self.relu4_3(relu4_2)
relu4_4 = self.relu4_4(relu4_3)
relu5_1 = self.relu5_1(relu4_4)
relu5_2 = self.relu5_2(relu5_1)
relu5_3 = self.relu5_3(relu5_2)
relu5_4 = self.relu5_4(relu5_3)
out = {
'relu1_1': relu1_1,
'relu1_2': relu1_2,
'relu2_1': relu2_1,
'relu2_2': relu2_2,
'relu3_1': relu3_1,
'relu3_2': relu3_2,
'relu3_3': relu3_3,
'relu3_4': relu3_4,
'relu4_1': relu4_1,
'relu4_2': relu4_2,
'relu4_3': relu4_3,
'relu4_4': relu4_4,
'relu5_1': relu5_1,
'relu5_2': relu5_2,
'relu5_3': relu5_3,
'relu5_4': relu5_4,
}
return out
| [
"torch.stack",
"torch.bmm",
"torch.nn.BCEWithLogitsLoss",
"torch.exp",
"torch.sum",
"torch.nn.functional.avg_pool2d",
"torch.tensor",
"torch.nn.functional.conv2d",
"torch.zeros",
"torch.max",
"torch.nn.Sequential",
"torch.nn.functional.cosine_similarity",
"torch.rand",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.functional.interpolate",
"torch.nn.L1Loss",
"torch.from_numpy",
"torch.nn.functional.grid_sample",
"torch.mean"
] | 1.0.0 | fyviezhao/dressing-in-order | 63790663ad0420d9d2dabed22d5c56dd40422313 |
1.8 | import os
import torch
import torchvision.transforms as transforms
import torchvision.utils as vutils
from PIL import Image
from abc import abstractmethod
from numpy import inf
from logger import TensorboardWriter
from model.esrgan.utils.utils import MODEL_KEY, GENERATOR_KEY, DISCRIMINATOR_KEY
from test import save_predictions_as_imgs
# Load base low-resolution image.
fixed_lr = transforms.ToTensor()(Image.open(os.path.join("data/inputs/Set5", "butterfly.png"))).unsqueeze(0)
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, models, criterion, metric_ftns, optimizers, config, device, monitor_cfg_key='monitor',
epochs_cfg_key='epochs'):
self.device = device
self.fixed_lr = fixed_lr.to(self.device)
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
self.models = models
self.criterion = criterion
self.metric_ftns = metric_ftns
self.optimizers = optimizers
cfg_trainer = config['trainer']
self.epochs = cfg_trainer[epochs_cfg_key]
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get(monitor_cfg_key, 'off')
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.plot_epoch_result = cfg_trainer.get('plot_epoch_result', inf)
if self.early_stop <= 0:
self.early_stop = inf
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
log = {'epoch': epoch}
log.update(result)
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
for i, model in enumerate(self.models):
optimizer = self.optimizers[i]
arch = type(model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-{}_epoch_{}.pth'.format(arch, epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / f'model_{arch}_best.pth')
torch.save(state, best_path)
self.logger.info(f'Saving current best: model_{arch}_best.pth ...')
# Each one epoch create a sr image.
arch = type(self.models[MODEL_KEY]).__name__
with torch.no_grad():
sr = self.models[MODEL_KEY](self.fixed_lr)
vutils.save_image(
sr.detach(),
os.path.join(self.checkpoint_dir, f'checkpoint-{arch}_epoch_{epoch}.png'),
normalize=True
)
def _resume_checkpoint(self, resume_paths):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
for i, path in enumerate(resume_paths):
self.logger.info("Loading checkpoint: {} ...".format(path))
checkpoint = torch.load(path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
if 'Generator' in checkpoint['arch']:
key = GENERATOR_KEY
arch_param = 'arch_esrgan_gen'
elif 'Discriminator' in checkpoint['arch']:
key = DISCRIMINATOR_KEY
arch_param = 'arch_esrgan_disc'
else:
key = MODEL_KEY
arch_param = 'arch_single'
# load architecture params from checkpoint.
if checkpoint['config'][arch_param] != self.config[arch_param]:
self.logger.warning(
"Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.models[key].load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning(
"Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizers[key].load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| [
"torch.save",
"torch.no_grad",
"torch.load"
] | 1.8.0 | Lo1s/superresolution | 18052465694bfc2543b9af71d8012d854a516d1a |
1.7 | import torch
import torchaudio
import torchvision
from torchvision import transforms
import nltk
from nltk.stem import WordNetLemmatizer
from collections import defaultdict
# from allennlp.predictors.predictor import Predictor
# import allennlp_models.structured_prediction
import numpy as np
import re
import os
import json
from tqdm import tqdm
from itertools import combinations
from copy import deepcopy
from PIL import Image
from scipy import signal
from kaldiio import ReadHelper
# dep_parser = Predictor.from_path("https://storage.googleapis.com/allennlp-public-models/biaffine-dependency-parser-ptb-2020.04.06.tar.gz")
# dep_parser._model = dep_parser._model.cuda()
# lemmatizer = WordNetLemmatizer()
UNK = "###UNK###"
NULL = "###NULL###"
BLANK = "###BLANK###"
SIL = "SIL"
IGNORED_TOKENS = ["GARBAGE", "+BREATH+", "+LAUGH+", "+NOISE+"]
lemmatizer = WordNetLemmatizer()
def log_normalize(x):
x.add_(1e-6).log_()
mean = x.mean()
std = x.std()
return x.sub_(mean).div_(std + 1e-6)
def fix_embedding_length(emb, L, padding=0):
size = emb.size()[1:]
if emb.size(0) < L:
if padding == 0:
pad = torch.zeros((L-emb.size(0),)+size, dtype=emb.dtype)
else:
pad = padding*torch.ones((L-emb.size(0),)+size, dtype=emb.dtype)
emb = torch.cat([emb, pad], dim=0)
else:
emb = emb[:L]
return emb
def collate_fn_spoken_word(batch):
audios = [t[0] for t in batch]
phone_labels = [t[1] for t in batch]
labels = [t[2] for t in batch]
input_masks = [t[3] for t in batch]
phone_masks = [t[4] for t in batch]
word_masks = [t[5] for t in batch]
indices = [t[6] for t in batch]
if isinstance(audios[0], list):
audios = [
torch.nn.utils.rnn.pad_sequence(audio)\
for audio in audios
]
input_masks = [
torch.nn.utils.rnn.pad_sequence(input_mask)
for input_mask in input_masks
]
audios = torch.nn.utils.rnn.pad_sequence(audios, batch_first=True) # (bsz, n_seg, n_pos, d)
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True) # (bsz, n_seg, n_pos)
audios = audios.permute(0, 2, 1, 3)
input_masks = input_masks.permute(0, 2, 1)
else:
audios = torch.nn.utils.rnn.pad_sequence(audios, batch_first=True)
input_masks = torch.nn.utils.rnn.pad_sequence(input_masks, batch_first=True)
phone_labels = torch.nn.utils.rnn.pad_sequence(phone_labels, batch_first=True)
labels = torch.stack(labels)
phone_masks = torch.nn.utils.rnn.pad_sequence(phone_masks, batch_first=True)
return audios, phone_labels, labels, input_masks, phone_masks, indices
def embed(feat, method='average'):
if method == 'average':
return feat.mean(0)
elif method == 'resample':
new_feat = signal.resample(feat.detach().numpy(), 4)
return torch.FloatTensor(new_feat.flatten())
class SpokenWordDataset(torch.utils.data.Dataset):
def __init__(
self, data_path,
preprocessor, split,
splits = {
"train": ["train-clean-100", "train-clean-360"],
"validation": ["dev-clean"],
"test": ["dev-clean"],
},
augment=False,
use_segment=False,
audio_feature="cpc",
phone_label="predicted",
ds_method="average",
sample_rate=16000,
min_class_size=50,
n_overlap=0,
debug=False
):
self.preprocessor = preprocessor
if debug:
splits['train'] = [splits['train'][0]]
self.splits = splits[split]
self.data_path = data_path
self.use_segment = use_segment
self.ds_method = ds_method
self.sample_rate = sample_rate
self.n_overlap = n_overlap
self.debug = debug
data = []
for sp in self.splits:
# Load data paths to audio and visual features
examples = load_data_split(preprocessor.dataset_name,
data_path, sp,
min_class_size=min_class_size,
audio_feature=audio_feature,
phone_label=phone_label,
debug=debug)
data.extend(examples)
print("Number of {} audio files = {}".format(split, len(examples)))
# Set up transforms
self.audio_transforms = [
torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate, win_length=sample_rate * 25 // 1000,
n_mels=preprocessor.num_features,
hop_length=sample_rate * 10 // 1000,
),
torchvision.transforms.Lambda(log_normalize),
]
if augment:
augmentation = [
torchaudio.transforms.FrequencyMasking(27, iid_masks=True),
torchaudio.transforms.FrequencyMasking(27, iid_masks=True),
torchaudio.transforms.TimeMasking(100, iid_masks=True),
torchaudio.transforms.TimeMasking(100, iid_masks=True),
]
self.audio_transforms.extend(augmentation)
self.audio_transforms = torchvision.transforms.Compose(self.audio_transforms)
audio = [example["audio"] for example in data]
text = [example["text"] for example in data]
phonemes = [example["phonemes"] for example in data]
true_phonemes = [example["true_phonemes"] for example in data]
self.dataset = [list(item) for item in zip(audio, text, phonemes, true_phonemes)]
self.class_to_indices = defaultdict(list)
for idx, item in enumerate(self.dataset):
self.class_to_indices[item[1]].append(idx)
self.audio_feature_type = audio_feature
def load_audio(self, audio_file):
if self.audio_feature_type in ["cpc", "cpc_big"]:
if audio_file.split('.')[-1] == "txt":
audio = np.loadtxt(audio_file)
else:
with ReadHelper(f"ark: gunzip -c {audio_file} |") as ark_f:
for k, audio in ark_f:
continue
inputs = torch.FloatTensor(audio)
elif self.audio_feature_type in ["bnf", "bnf+cpc"]:
if audio_file.split('.')[-1] == "txt":
audio = np.loadtxt(audio_file)
else:
with ReadHelper(f"ark: gunzip -c {audio_file} |") as ark_f:
for k, audio in ark_f:
continue
if self.audio_feature_type == "bnf+cpc":
cpc_feat = np.loadtxt(audio_file.replace("bnf", "cpc"))
feat_len = min(audio.shape[0], cpc_feat.shape[0])
audio = np.concatenate([audio[:feat_len], cpc_feat[:feat_len]], axis=-1)
inputs = torch.FloatTensor(audio)
elif self.audio_feature_type in ['vq-wav2vec', 'wav2vec', 'wav2vec2']:
audio, _ = torchaudio.load(audio_file)
inputs = audio.squeeze(0)
else: Exception(f"Audio feature type {self.audio_feature_type} not supported")
input_mask = torch.ones(inputs.size(0))
return inputs, input_mask
def segment(self, feat, segments,
method="average"):
"""
Args:
feat : (num. of frames, feature dim.)
segments : a list of dicts of phoneme boundaries
Returns:
sfeat : (max num. of segments, feature dim.)
mask : (max num. of segments,)
"""
sfeats = []
word_begin = segments[0]["begin"]
dur = segments[-1]["end"] - segments[0]["begin"]
for i, segment in enumerate(segments):
if segment["text"] == SIL:
continue
phn = segment["text"]
begin = int(round((segment["begin"]-word_begin)*100, 3))
end = int(round((segment["end"]-word_begin)*100, 3))
if self.n_overlap > 0:
begin = max(begin - self.n_overlap, 0)
end = max(end + self.n_overlap, feat.size(0))
dur = max(end - begin, 1)
if begin >= feat.size(0):
print(f'Warning: ({phn}, {begin}, {end}) begin idx {begin} >= feature size {feat.size(0)}')
segment_feat = feat[-1]
elif begin != end:
segment_feat = embed(feat[begin:end], method=method)
else:
segment_feat = embed(feat[begin:end+1], method=method)
if torch.any(torch.isnan(segment_feat)):
print(f'Bad segment feature for feature of size {feat.size()}, begin {begin}, end {end}')
sfeats.append(segment_feat)
sfeat = torch.stack(sfeats)
if method == "no-op":
mask = torch.zeros(len(feat), len(sfeats))
else:
mask = torch.ones(len(sfeats))
return sfeat, mask
def unsegment(self, sfeat, segments):
"""
Args:
sfeat : (num. of segments, feature dim.)
segments : a list of dicts of phoneme boundaries
Returns:
feat : (num. of frames, feature dim.)
"""
if sfeat.ndim == 1:
sfeat = sfeat.unsqueeze(-1)
word_begin = segments[0]['begin']
dur = segments[-1]["end"] - segments[0]["begin"]
nframes = int(round(dur * 100, 3))
feat = torch.zeros((nframes, *sfeat.size()[1:]))
for i, segment in enumerate(segments):
if segment["text"] == SIL:
continue
begin = int(round((segment["begin"]-word_begin)*100, 3))
end = int(round((segment["end"]-word_begin)*100, 3))
if i >= sfeat.size(0):
break
if begin != end:
feat[begin:end] = sfeat[i]
else:
feat[begin:end+1] = sfeat[i]
return feat.squeeze(-1)
def __getitem__(self, idx):
audio_file, label, phoneme_dicts, _ = self.dataset[idx]
audio_inputs, input_mask = self.load_audio(audio_file)
if self.use_segment:
audio_inputs, input_mask = self.segment(audio_inputs,
phoneme_dicts,
method=self.ds_method)
phonemes = [phn_dict["text"] for phn_dict in phoneme_dicts]
word_labels = self.preprocessor.to_word_index([label])
phone_labels = self.preprocessor.to_index(phonemes)
if self.use_segment:
word_mask = torch.zeros(1, len(phoneme_dicts), len(phoneme_dicts))
else:
word_mask = torch.zeros(1, len(audio_inputs), len(audio_inputs))
for t in range(len(phoneme_dicts)):
word_mask[0, t, t] = 1.
phone_mask = torch.ones(len(phonemes))
return audio_inputs,\
phone_labels,\
word_labels,\
input_mask,\
phone_mask,\
word_mask,\
idx
def __len__(self):
return len(self.dataset)
class SpokenWordPreprocessor:
def __init__(
self,
dataset_name,
data_path,
num_features,
splits = {
"train": ["train-clean-100", "train-clean-360"],
"validation": ["dev-clean"],
"test": ["dev-clean"]
},
tokens_path=None,
lexicon_path=None,
use_words=False,
prepend_wordsep=False,
audio_feature="mfcc",
phone_label="predicted",
sample_rate=16000,
min_class_size=50,
ignore_index=-100,
use_blank=True,
debug=False,
):
self.dataset_name = dataset_name
self.data_path = data_path
self.num_features = num_features
self.ignore_index = ignore_index
self.min_class_size = min_class_size
self.use_blank = use_blank
self.wordsep = " "
self._prepend_wordsep = prepend_wordsep
if debug:
splits['train'] = [splits['train'][0]]
metadata_file = os.path.join(data_path, f"{dataset_name}.json")
data = []
for split_type, spl in splits.items():
if split_type == 'test_oos':
continue
for sp in spl:
data.extend(load_data_split(dataset_name,
data_path, sp,
audio_feature=audio_feature,
phone_label=phone_label,
min_class_size=self.min_class_size,
debug=debug))
visual_words = set()
tokens = set()
for ex in data:
visual_words.add(ex["text"])
for phn in ex["phonemes"]:
if phone_label == "groundtruth" and not "phoneme" in phn["text"]:
phn["text"] = re.sub(r"[0-9]", "", phn["text"])
tokens.add(phn["text"])
self.tokens = sorted(tokens)
self.visual_words = sorted(visual_words)
if self.use_blank:
self.tokens = [BLANK]+self.tokens
self.visual_words = [BLANK]+self.visual_words
self.tokens_to_index = {t:i for i, t in enumerate(self.tokens)}
self.words_to_index = {t:i for i, t in enumerate(self.visual_words)}
print(f"Preprocessor: number of phone classes: {self.num_tokens}")
print(f"Preprocessor: number of visual word classes: {self.num_visual_words}")
@property
def num_tokens(self):
return len(self.tokens)
@property
def num_visual_words(self):
return len(self.visual_words)
def to_index(self, sent):
tok_to_idx = self.tokens_to_index
return torch.LongTensor([tok_to_idx.get(t, 0) for t in sent])
def to_word_index(self, sent):
tok_to_idx = self.words_to_index
return torch.LongTensor([tok_to_idx.get(t, 0) for t in sent])
def to_text(self, indices):
text = []
for t, i in enumerate(indices):
if (i == 0) and (t != 0):
prev_token = text[t-1]
text.append(prev_token)
else:
text.append(self.tokens[i])
return text
def to_word_text(self, indices):
return [self.visual_words[i] for i in indices]
def tokens_to_word_text(self, indices):
T = len(indices)
path = [self.visual_words[i] for i in indices]
sent = []
for i in range(T):
if path[i] == BLANK:
continue
elif (i != 0) and (path[i] == path[i-1]):
continue
else:
sent.append(path[i])
return sent
def tokens_to_text(self, indices):
T = len(indices)
path = self.to_text(indices)
sent = []
for i in range(T):
if path[i] == BLANK:
continue
elif (i != 0) and (path[i] == path[i-1]):
continue
else:
sent.append(path[i])
return sent
def load_data_split(dataset_name,
data_path, split,
audio_feature="mfcc",
phone_label="predicted",
min_class_size=50,
max_keep_size=1000,
debug=False):
"""
Returns:
examples : a list of mappings of
{ "audio" : filename of audio,
"text" : a list of tokenized words for the class name,
}
"""
examples = []
if os.path.exists(os.path.join(data_path, f'{split}.json')):
word_files = [f'{split}.json']
else:
word_files = [word_file for word_file in os.listdir(data_path) if word_file.split('.')[-1] == 'json']
for word_file in word_files:
word_f = open(os.path.join(data_path, word_file), "r")
label_counts = dict()
for line in word_f:
if debug and len(examples) >= 100:
break
word = json.loads(line.rstrip("\n"))
label = lemmatizer.lemmatize(word["label"].lower())
if not label in label_counts:
label_counts[label] = 1
else:
label_counts[label] += 1
if label_counts[label] > max_keep_size:
continue
audio_path = None
audio_id = word["audio_id"]
word_id = word['word_id']
if audio_feature in ["mfcc", "vq-wav2vec", "wav2vec2", "wav2vec"]:
audio_path = os.path.join(data_path, split, f"{audio_id}_{word_id}.wav")
if not os.path.exists(audio_path):
word_id = int(word_id)
audio_file = f"{audio_id}_{word_id:04d}.wav"
audio_path = os.path.join(data_path, split, audio_file)
elif audio_feature in ["cpc", "cpc_big"]:
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}_txt/{audio_id}_{word_id}.txt")
if not os.path.exists(audio_path):
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}/{audio_id}_{word_id}.ark.gz")
if not os.path.exists(audio_path):
word_id = int(word_id)
audio_file = f"{audio_id}_{word_id:04d}.txt"
audio_path = os.path.join(data_path, f"../{dataset_name}_{audio_feature}_txt", audio_file)
elif audio_feature in ["bnf", "bnf+cpc"]:
audio_file = f"{audio_id}_{word_id}.txt"
audio_path = os.path.join(data_path, f"../{dataset_name}_bnf_txt", audio_file)
else: Exception(f"Audio feature type {audio_feature} not supported")
true_phonemes = word["phonemes"]
if "children" in true_phonemes:
true_phonemes = [phn for phn in true_phonemes["children"] if phn["text"] != SIL]
if len(true_phonemes) == 0:
continue
for phn_idx in range(len(true_phonemes)): # In Mboshi, each phoneme is written as ``phoneme{index}''
if not "phoneme" in true_phonemes[phn_idx]["text"]:
true_phonemes[phn_idx]["text"] = re.sub(r"[0-9]", "", true_phonemes[phn_idx]["text"])
noisy = False
for phn in true_phonemes:
if phn["text"] in IGNORED_TOKENS or (phn["text"][0] == "+"):
noisy = True
break
if noisy:
continue
dur = round(true_phonemes[-1]['end'] - true_phonemes[0]['begin'], 3)
phonemes = None
if phone_label == "groundtruth":
phonemes = deepcopy(true_phonemes)
elif phone_label == "multilingual":
phonemes = [phn for phn in word["predicted_segments_multilingual"] if phn["text"] != SIL]
elif phone_label == "multilingual_phones":
phonemes = deepcopy(word["multilingual_phones"])
elif phone_label == "predicted":
phonemes = [phn for phn in word["predicted_segments"] if phn["text"] != SIL]
elif phone_label == "predicted_wav2vec2":
if not "predicted_segments_wav2vec2" in word:
continue
phonemes = [phn for phn in word["predicted_segments_wav2vec2"] if phn["text"] != SIL]
else:
raise ValueError(f"Invalid phone label type: {phone_label}")
phonemes = [phn for phn in phonemes if round(phn['end'] - phonemes[0]['begin'], 3) <= dur]
if not len(phonemes):
print(f'Skip example without segments: {phonemes}')
continue
if phonemes[-1]['end'] - phonemes[0]['begin'] != dur:
phonemes[-1]['end'] = phonemes[0]['begin'] + dur
example = {"audio": audio_path,
"text": label,
"phonemes": phonemes,
"true_phonemes": true_phonemes}
examples.append(example)
word_f.close()
return examples
if __name__ == "__main__":
preproc = SpokenWordPreprocessor(num_features=80, data_path="/ws/ifp-53_2/hasegawa/lwang114/data/flickr30k/")
| [
"torch.cat",
"torch.stack",
"torch.isnan",
"torch.nn.utils.rnn.pad_sequence",
"torch.FloatTensor"
] | 1.7.1 | lwang114/InformationQuantizer | 45419140708e612495fd324a9e5724306d4d4129 |
0.4 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# 这个是做cities 问题的
class GraphAttentionLayer(nn.Module):
"""
Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(GraphAttentionLayer, self).__init__()
self.dropout = dropout
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
# 这个初始化没看太懂. in_features 就是论文这个的F out_features 就是论文中的F'
nn.init.xavier_uniform_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(2*out_features, 1)))
nn.init.xavier_uniform_(self.a.data, gain=1.414)
self.leakyrelu = nn.LeakyReLU(self.alpha)
def forward(self, input, adj):
h = torch.mm(input, self.W) # 取w的一行
N = h.size()[0]# batch_size
a_input = torch.cat([h.repeat(1, N).view(N * N, -1), h.repeat(N, 1)], dim=1).view(N, -1, 2 * self.out_features)
e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(2))
zero_vec = -9e15*torch.ones_like(e)
attention = torch.where(adj > 0, e, zero_vec)
attention = F.softmax(attention, dim=1)
attention = F.dropout(attention, self.dropout, training=self.training)
h_prime = torch.matmul(attention, h)
if self.concat:
return F.elu(h_prime)
else:
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
# 这个是做content问题的.
class SpecialSpmmFunction(torch.autograd.Function):
"""Special function for only sparse region backpropataion layer."""
@staticmethod
def forward(ctx, indices, values, shape, b):
assert indices.requires_grad == False
a = torch.sparse_coo_tensor(indices, values, shape)
ctx.save_for_backward(a, b)
ctx.N = shape[0]
return torch.matmul(a, b)
@staticmethod
def backward(ctx, grad_output):
a, b = ctx.saved_tensors
grad_values = grad_b = None
if ctx.needs_input_grad[1]:
grad_a_dense = grad_output.matmul(b.t())
edge_idx = a._indices()[0, :] * ctx.N + a._indices()[1, :]
grad_values = grad_a_dense.view(-1)[edge_idx]
if ctx.needs_input_grad[3]:
grad_b = a.t().matmul(grad_output)
return None, grad_values, None, grad_b
class SpecialSpmm(nn.Module):
def forward(self, indices, values, shape, b):
return SpecialSpmmFunction.apply(indices, values, shape, b)
class SpGraphAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, dropout, alpha, concat=True):
super(SpGraphAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = alpha
self.concat = concat
self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
nn.init.xavier_normal_(self.W.data, gain=1.414)
self.a = nn.Parameter(torch.zeros(size=(1, 2*out_features)))
nn.init.xavier_normal_(self.a.data, gain=1.414)
self.dropout = nn.Dropout(dropout)
self.leakyrelu = nn.LeakyReLU(self.alpha)
self.special_spmm = SpecialSpmm()
def forward(self, input, adj):
dv = 'cuda' if input.is_cuda else 'cpu'
N = input.size()[0]
edge = adj.nonzero().t()
h = torch.mm(input, self.W)
# h: N x out
assert not torch.isnan(h).any()
# Self-attention on the nodes - Shared attention mechanism
edge_h = torch.cat((h[edge[0, :], :], h[edge[1, :], :]), dim=1).t()
# edge: 2*D x E
edge_e = torch.exp(-self.leakyrelu(self.a.mm(edge_h).squeeze()))
assert not torch.isnan(edge_e).any()
# edge_e: E
e_rowsum = self.special_spmm(edge, edge_e, torch.Size([N, N]), torch.ones(size=(N,1), device=dv))
# e_rowsum: N x 1
edge_e = self.dropout(edge_e)
# edge_e: E
h_prime = self.special_spmm(edge, edge_e, torch.Size([N, N]), h)
assert not torch.isnan(h_prime).any()
# h_prime: N x out
h_prime = h_prime.div(e_rowsum)
# h_prime: N x out
assert not torch.isnan(h_prime).any()
if self.concat:
# if this layer is not last layer,
return F.elu(h_prime)
else:
# if this layer is last layer,
return h_prime
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
| [
"torch.zeros",
"torch.Size",
"torch.nn.Dropout",
"torch.cat",
"torch.isnan",
"torch.nn.LeakyReLU",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.nn.functional.elu",
"torch.mm",
"torch.ones",
"torch.sparse_coo_tensor",
"torch.nn.functional.softmax",
"torch.ones_like",
"torch.nn.init.xavier_normal_",
"torch.matmul",
"torch.where"
] | 0.4.1 | zhangbo2008/GAT_network | c871a2aceceaa5d638c96c21d23d64ed07c07b4c |
1.10 | import os
import pickle
import random
import zlib
from os import path
from typing import List, Optional
import pandas as pd
import torch
from transformers import DistilBertTokenizerFast
import wandb
from artificial_detection.data.data import BinaryDataset, TextDetectionDataset
class MockDataset:
"""
Mock dataset for testing.
"""
dataset = [
{"ru": "добрый день", "en": "good evening",},
{"ru": "извините", "en": "i am sorry",},
]
_translations = ["good evening", "i am sorry"]
dataset_name = "mock"
@classmethod
def targets(cls) -> List[str]:
return [sample["en"] for sample in cls.dataset]
@classmethod
def translations(cls) -> List[str]:
return cls._translations
@classmethod
def list(cls) -> List[str]:
dataset_list = []
for dct in cls.dataset:
dataset_list.extend([dct["ru"], dct["en"]])
return dataset_list
def get_dvc_storage_path() -> str:
"""
Get the full path to the DVC storage.
Returns
-------
str
Path to the DVC Storage.
"""
dir_path = path.dirname(path.dirname(path.realpath(__file__)))
return path.join(dir_path, "resources/data")
def get_dataset_path(dataset_name: str, langs: Optional[List[str]] = None, ext: str = "bin") -> str:
dvc_path = get_dvc_storage_path()
if langs:
dataset_real_name = f"{dataset_name}.{langs[0]}-{langs[1]}.{ext}"
else:
dataset_real_name = f"{dataset_name}.{ext}"
return path.join(dvc_path, dataset_real_name)
def load_binary_dataset(dataset_name: str, langs: Optional[List[str]] = None, ext: str = "bin") -> BinaryDataset:
dataset_path = get_dataset_path(dataset_name, langs=langs, ext=ext)
with open(dataset_path, "rb") as file:
compressed_dataset = file.read()
dumped_dataset = zlib.decompress(compressed_dataset)
dataset = pickle.loads(dumped_dataset)
return dataset
def save_binary_dataset(
dataset: BinaryDataset, dataset_name: str, langs: Optional[List[str]] = None, ext: str = "bin"
) -> None:
dataset_path = get_dataset_path(dataset_name, langs=langs, ext=ext)
with open(dataset_path, "wb") as file:
dumped_dataset = pickle.dumps(dataset, protocol=pickle.HIGHEST_PROTOCOL)
compressed_dataset = zlib.compress(dumped_dataset)
file.write(compressed_dataset)
def translations_to_torch_dataset(
targets: List[str], translations: List[str], easy_nmt_offline: Optional[bool] = None, device: Optional[str] = None
) -> TextDetectionDataset:
corpus = TextDetectionDataset.get_corpus(targets, translations)
labels = torch.FloatTensor([0, 1] * len(targets))
tokenizer_path = "resources/data/tokenizer" if easy_nmt_offline else "distilbert-base-uncased"
tokenizer = DistilBertTokenizerFast.from_pretrained(tokenizer_path)
encodings = tokenizer(corpus, truncation=True, padding=True)
encodings, labels = TextDetectionDataset.to_device(encodings, labels, device=device)
dataset = TextDetectionDataset(encodings, labels, device=device)
return dataset
def save_translations_texts(
sources: List[str], targets: List[str], translations: List[str], dataset_name: str, src_lang: str, trg_lang: str
) -> None:
"""
Saves data to csv.
"""
print("Saving sources/translations in csv...")
df_data = list(zip(sources, targets, translations))
df = pd.DataFrame(data=df_data, columns=["sources", "targets", "translations"])
csv_path = get_dataset_path(f"{dataset_name}.{src_lang}-{trg_lang}", ext="csv")
df.to_csv(csv_path, index=False)
def ord_cyrillic(c: str) -> int:
if "а" <= c <= "я":
return ord(c) - ord("а") + ord("a") # - cyrillic + latinic
if "А" <= c <= "Я":
return ord(c) - ord("А") + ord("A")
return ord(c)
def setup_experiment_tracking(run_name: str) -> None:
os.environ["WANDB_MODE"] = "offline"
token = os.environ.get("WANDB_TOKEN", None)
wandb.login(key=token)
wandb.init(project="artificial-text-detection", name=run_name)
def stop_experiment_tracking() -> None:
wandb.finish()
def fix_random_seed(seed: int = 42) -> None:
"""
Fixing a random seed.
"""
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
| [
"torch.manual_seed",
"torch.cuda.manual_seed_all"
] | 1.10.0 | MaratSaidov/artificial-text-detection | 74b2100294232ec361db84fdc3a24fdeba1fce49 |
1.7 | # ------------------------------------------------------------------------
# Conditional DETR
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copied from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = torch.cat([
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ('v2', 'sine'):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSine(N_steps, normalize=True)
elif args.position_embedding in ('v3', 'learned'):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| [
"torch.nn.init.uniform_",
"torch.cat",
"torch.nn.Embedding",
"torch.arange"
] | 1.7.0 | miranmanesh/ConditionalDETR | c7d24c221125daa6322adc9915af77701240f063 |
1.8 | """
Adapted from the original WHAMR script to obtain the Room Impulse ResponsesRoom Impulse Responses
Authors
* Cem Subakan 2021
"""
import os
import pandas as pd
import argparse
import torchaudio
from recipes.WHAMandWHAMR.meta.wham_room import WhamRoom
from scipy.signal import resample_poly
import torch
from speechbrain.pretrained.fetching import fetch
from tqdm import tqdm
import pyroomacoustics
def create_rirs(output_dir, sr=8000):
"""
This function creates the room impulse responses from the WHAMR! dataset
The implementation is based on the scripts from http://wham.whisper.ai/
Arguments:
------
output_dir (str) : directory for saving the RIRs
sr (int) : sampling rate with which we save
"""
assert (
pyroomacoustics.__version__ == "0.3.1"
), "The pyroomacoustics version needs to be 0.3.1"
os.makedirs(output_dir)
metafilesdir = os.path.dirname(os.path.realpath(__file__))
filelist = [
"mix_2_spk_filenames_tr.csv",
"mix_2_spk_filenames_cv.csv",
"mix_2_spk_filenames_tt.csv",
"reverb_params_tr.csv",
"reverb_params_cv.csv",
"reverb_params_tt.csv",
]
savedir = os.path.join(metafilesdir, "data")
for fl in filelist:
if not os.path.exists(os.path.join(savedir, fl)):
fetch(
"metadata/" + fl,
"speechbrain/sepformer-whamr",
savedir=savedir,
save_filename=fl,
)
FILELIST_STUB = os.path.join(
metafilesdir, "data", "mix_2_spk_filenames_{}.csv"
)
SPLITS = ["tr"]
reverb_param_stub = os.path.join(
metafilesdir, "data", "reverb_params_{}.csv"
)
for splt in SPLITS:
wsjmix_path = FILELIST_STUB.format(splt)
wsjmix_df = pd.read_csv(wsjmix_path)
reverb_param_path = reverb_param_stub.format(splt)
reverb_param_df = pd.read_csv(reverb_param_path)
utt_ids = wsjmix_df.output_filename.values
for output_name in tqdm(utt_ids):
utt_row = reverb_param_df[
reverb_param_df["utterance_id"] == output_name
]
room = WhamRoom(
[
utt_row["room_x"].iloc[0],
utt_row["room_y"].iloc[0],
utt_row["room_z"].iloc[0],
],
[
[
utt_row["micL_x"].iloc[0],
utt_row["micL_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
[
utt_row["micR_x"].iloc[0],
utt_row["micR_y"].iloc[0],
utt_row["mic_z"].iloc[0],
],
],
[
utt_row["s1_x"].iloc[0],
utt_row["s1_y"].iloc[0],
utt_row["s1_z"].iloc[0],
],
[
utt_row["s2_x"].iloc[0],
utt_row["s2_y"].iloc[0],
utt_row["s2_z"].iloc[0],
],
utt_row["T60"].iloc[0],
)
room.generate_rirs()
rir = room.rir_reverberant
for i, mics in enumerate(rir):
for j, source in enumerate(mics):
h = resample_poly(source, sr, 16000)
h_torch = torch.from_numpy(h).float().unsqueeze(0)
torchaudio.save(
os.path.join(
output_dir, "{}_{}_".format(i, j) + output_name,
),
h_torch,
sr,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-dir",
type=str,
required=True,
help="The output directory for saving the rirs for random augmentation style",
)
args = parser.parse_args()
create_rirs(args.output_dir)
| [
"torch.from_numpy"
] | 1.8.0 | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 |
1.8 | """Library implementing complex-valued convolutional neural networks.
Authors
* Titouan Parcollet 2020
"""
import torch
import torch.nn as nn
import logging
import torch.nn.functional as F
from speechbrain.nnet.CNN import get_padding_elem
from speechbrain.nnet.complex_networks.c_ops import (
unitary_init,
complex_init,
affect_conv_init,
complex_conv_op,
)
logger = logging.getLogger(__name__)
class CConv1d(torch.nn.Module):
"""This function implements complex-valued 1d convolution.
Arguments
---------
out_channels : int
Number of output channels. Please note
that these are complex-valued neurons. If 256
channels are specified, the output dimension
will be 512.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
"causal" results in causal (dilated) convolutions. (default "same")
padding_mode : str, optional
This flag specifies the type of padding. See torch.nn documentation
for more information (default "reflect").
groups : int, optional
This option specifies the convolutional groups. See torch.nn
documentation for more information (default 1).
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str, optional
(glorot, he).
This parameter controls the initialization criterion of the weights.
It is combined with weights_init to build the initialization method of
the complex-valued weights. (default "glorot")
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights. "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle. (default "complex")
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30])
>>> cnn_1d = CConv1d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=5
... )
>>> out_tensor = cnn_1d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 24])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.unsqueeze = False
self.init_criterion = init_criterion
self.weight_init = weight_init
self.in_channels = self._check_input(input_shape) // 2
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.real_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.imag_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(2 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = None
self.winit = {"complex": complex_init, "unitary": unitary_init}[
self.weight_init
]
affect_conv_init(
self.real_weight,
self.imag_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor
(batch, time, channel).
Input to convolve. 3d or 4d tensors are expected.
"""
# (batch, channel, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
wx = complex_conv_op(
x,
self.real_weight,
self.imag_weight,
self.b,
stride=self.stride,
padding=0,
dilation=self.dilation,
conv1d=True,
)
wx = wx.transpose(1, -1)
return wx
def _manage_padding(self, x, kernel_size, dilation, stride):
"""This function performs zero-padding on the time axis
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride : int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding = get_padding_elem(L_in, stride, kernel_size, dilation)
# Applying padding
x = F.pad(x, tuple(padding), mode=self.padding_mode)
return x
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 3:
in_channels = input_shape[2]
else:
raise ValueError(
"ComplexConv1d expects 3d inputs. Got " + input_shape
)
# Kernel size must be odd
if self.kernel_size % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
# Check complex format
if in_channels % 2 != 0:
raise ValueError(
"Complex Tensors must have dimensions divisible by 2."
" input.size()["
+ str(self.channels_axis)
+ "] = "
+ str(self.nb_channels)
)
return in_channels
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = self.kernel_size
w_shape = (self.out_channels, self.in_channels) + tuple((ks,))
return ks, w_shape
class CConv2d(nn.Module):
"""This function implements complex-valued 1d convolution.
Arguments
---------
out_channels : int
Number of output channels. Please note
that these are complex-valued neurons. If 256
channels are specified, the output dimension
will be 512.
kernel_size : int
Kernel size of the convolutional filters.
stride : int, optional
Stride factor of the convolutional filters (default 1).
dilation : int, optional
Dilation factor of the convolutional filters (default 1).
padding : str, optional
(same, valid, causal). If "valid", no padding is performed.
If "same" and stride is 1, output shape is same as input shape.
"causal" results in causal (dilated) convolutions. (default "same")
padding_mode : str, optional
This flag specifies the type of padding (default "reflect").
See torch.nn documentation for more information.
groups : int, optional
This option specifies the convolutional groups (default 1). See torch.nn
documentation for more information.
bias : bool, optional
If True, the additive bias b is adopted (default True).
init_criterion : str , optional
(glorot, he).
This parameter controls the initialization criterion of the weights (default "glorot").
It is combined with weights_init to build the initialization method of
the complex-valued weights.
weight_init : str, optional
(complex, unitary).
This parameter defines the initialization procedure of the
complex-valued weights (default complex). "complex" will generate random complex-valued
weights following the init_criterion and the complex polar form.
"unitary" will normalize the weights to lie on the unit circle.
More details in: "Deep Complex Networks", Trabelsi C. et al.
Example
-------
>>> inp_tensor = torch.rand([10, 16, 30, 30])
>>> cnn_2d = CConv2d(
... input_shape=inp_tensor.shape, out_channels=12, kernel_size=5
... )
>>> out_tensor = cnn_2d(inp_tensor)
>>> out_tensor.shape
torch.Size([10, 16, 30, 24])
"""
def __init__(
self,
out_channels,
kernel_size,
input_shape,
stride=1,
dilation=1,
padding="same",
groups=1,
bias=True,
padding_mode="reflect",
init_criterion="glorot",
weight_init="complex",
):
super().__init__()
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.groups = groups
self.bias = bias
self.padding_mode = padding_mode
self.unsqueeze = False
self.init_criterion = init_criterion
self.weight_init = weight_init
# k -> [k,k]
if isinstance(self.kernel_size, int):
self.kernel_size = [self.kernel_size, self.kernel_size]
if isinstance(self.dilation, int):
self.dilation = [self.dilation, self.dilation]
if isinstance(self.stride, int):
self.stride = [self.stride, self.stride]
self.in_channels = self._check_input(input_shape) // 2
# Managing the weight initialization and bias by directly setting the
# correct function
(self.k_shape, self.w_shape) = self._get_kernel_and_weight_shape()
self.real_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
self.imag_weight = torch.nn.Parameter(torch.Tensor(*self.w_shape))
if self.bias:
self.b = torch.nn.Parameter(torch.Tensor(2 * self.out_channels))
self.b.data.fill_(0)
else:
self.b = None
self.winit = {"complex": complex_init, "unitary": unitary_init}[
self.weight_init
]
affect_conv_init(
self.real_weight,
self.imag_weight,
self.kernel_size,
self.winit,
self.init_criterion,
)
def forward(self, x, init_params=False):
"""Returns the output of the convolution.
Arguments
---------
x : torch.Tensor
(batch, time, feature, channels).
Input to convolve. 3d or 4d tensors are expected.
"""
if init_params:
self.init_params(x)
# (batch, channel, feature, time)
x = x.transpose(1, -1)
if self.padding == "same":
x = self._manage_padding(
x, self.kernel_size, self.dilation, self.stride
)
elif self.padding == "causal":
num_pad = (self.kernel_size - 1) * self.dilation
x = F.pad(x, (num_pad, 0))
elif self.padding == "valid":
pass
else:
raise ValueError(
"Padding must be 'same', 'valid' or 'causal'. Got %s."
% (self.padding)
)
wx = complex_conv_op(
x,
self.real_weight,
self.imag_weight,
self.b,
stride=self.stride,
padding=0,
dilation=self.dilation,
conv1d=False,
)
wx = wx.transpose(1, -1)
return wx
def _get_kernel_and_weight_shape(self):
""" Returns the kernel size and weight shape for convolutional layers.
"""
ks = (self.kernel_size[0], self.kernel_size[1])
w_shape = (self.out_channels, self.in_channels) + (*ks,)
return ks, w_shape
def _manage_padding(self, x, kernel_size, dilation, stride):
"""This function performs zero-padding on the time and frequency axes
such that their lengths is unchanged after the convolution.
Arguments
---------
x : torch.Tensor
Input tensor.
kernel_size : int
Kernel size.
dilation : int
Dilation.
stride: int
Stride.
"""
# Detecting input shape
L_in = x.shape[-1]
# Time padding
padding_time = get_padding_elem(
L_in, stride[-1], kernel_size[-1], dilation[-1]
)
padding_freq = get_padding_elem(
L_in, stride[-2], kernel_size[-2], dilation[-2]
)
padding = padding_time + padding_freq
# Applying padding
x = nn.functional.pad(x, tuple(padding), mode=self.padding_mode)
return x
def _check_input(self, input_shape):
"""Checks the input and returns the number of input channels.
"""
if len(input_shape) == 3:
self.unsqueeze = True
in_channels = 1
elif len(input_shape) == 4:
in_channels = input_shape[3]
else:
raise ValueError("Expected 3d or 4d inputs. Got " + input_shape)
# Kernel size must be odd
if self.kernel_size[0] % 2 == 0 or self.kernel_size[1] % 2 == 0:
raise ValueError(
"The field kernel size must be an odd number. Got %s."
% (self.kernel_size)
)
# Check complex format
if in_channels % 2 != 0:
raise ValueError(
"Complex Tensors must have dimensions divisible by 2."
" input.size()["
+ str(self.channels_axis)
+ "] = "
+ str(self.nb_channels)
)
return in_channels
| [
"torch.nn.functional.pad",
"torch.Tensor"
] | 1.8.0 | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 |
1.3 | import os
import sys
# path at level marl/
sys.path.insert(0, os.path.abspath("."))
import time
import argparse
import numpy as np
from functools import partial
from collections import OrderedDict, defaultdict
import torch
# local
from algorithms.masac.utils import get_sample_scheme, dispatch_samples
from algorithms.masac.utils import make_parallel_env, log_results
from algorithms.masac import MASAC
from runners.make_env import ENV_MAP
from runners.sample_batch import EpisodeBatch
from runners.ctde_runner import CTDEEpisodeRunner
from runners.replay_buffer import EpisodeReplayBuffer
from utils.exp_utils import setup_experiment, ExperimentLogger, ExperimentState
from utils.exp_utils import time_left, time_str, merge_dict
#####################################################################################
### arguments
#####################################################################################
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=str, default="masac",
help="name of the experiment")
parser.add_argument("--save_dir", type=str, default="./exps",
help="top level path to save experiment/training results")
parser.add_argument("--sub_dir", type=str, nargs='+',
help="sub folders for experiment (hierarchical), e.g. sub=a b c --> local-dir/a/b/c")
parser.add_argument("--tag", type=str, nargs='+',
help="additional info for experiment, i.e. hyperparameters")
parser.add_argument("--seed", default=1, type=int,
help="Random seed, if 0, do not set seed")
parser.add_argument("--restore", type=str, default=None,
help="directory in which training state and model are loaded")
# if specified and not restore, will load model for experiment init
# if also restore, will overwrite default path in restore_experiment
parser.add_argument("--restore_model", type=str, default=None,
help="file in which model are loaded")
## NOTE: episode-wise or transition-wise (per transtion now, easier to log)
parser.add_argument("--log_interval", default=25000, type=int,
help="frequency to log exploration/runner stats")
parser.add_argument("--train_interval", default=0, type=int,
help="number of steps collected before each train")
# parser.add_argument("--steps_per_update", default=100, type=int,
# help="number of env steps collected before 1 training update")
parser.add_argument("--target_update_interval", default=0, type=int,
help="syncing parameters with target networks")
parser.add_argument("--train_log_interval", default=25000, type=int,
help="frequency to log training stats, e.g. losses")
parser.add_argument("--eval_interval", default=25000, type=int,
help="number of steps collected before each evaluation")
parser.add_argument("--save_interval", default=100000, type=int)
# misc
parser.add_argument("--cuda", default=False, action='store_true')
parser.add_argument("--cluster", default=False, action='store_true',
help='if running in cluster (allow more resources)')
parser.add_argument("--overwrite", type=str, nargs='+',
help="overwrite env config with format: nested_name nested_type value ...")
parser.add_argument("--use_tensorboard", default=False, action='store_true',
help="if to use tensorboard for logging")
parser.add_argument("--show_visual_range", default=False, action='store_true',
help='if to show agent visual range when rendering')
# Environment
parser.add_argument("--env", type=str, default="mpe_hier",
help="name of the environment", choices=["mpe", "mpe_hier"])
parser.add_argument("--scenario", type=str, default="simple_spread",
help="name of the scenario script")
parser.add_argument("--env_config", type=str, default="",
help="file to environment scenario config")
## max episode length for termination
parser.add_argument("--episode_length", default=25, type=int,
help="max episode length")
parser.add_argument("--agent_alg", default="MASAC", type=str,
help="agent model type", choices=['MASAC', 'SAC'])
parser.add_argument("--adversary_alg", default="MASAC", type=str,
help="adversary model type", choices=['MASAC', 'SAC'])
parser.add_argument("--discrete_action", action='store_true')
# training
parser.add_argument("--n_episodes", default=20000, type=int,
help="max number of episodes to sample")
## for non-early-terminated episodes, n_env_steps ~= n_episodes * episode_length
parser.add_argument("--n_env_steps", default=500000, type=int,
help="max number of env steps to sample")
## NOTE: episode-wise or step-wise (episode now)
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size for model training per update")
## in case train batch size too large, could use smaller batch size
## but multiple rounds of updates
parser.add_argument("--n_updates_per_train", default=1, type=int,
help="number of updates per training round")
parser.add_argument("--lr", default=0.01, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--gamma", type=float, default=0.95,
help="discount factor")
parser.add_argument("--sync_samples", default=False, action='store_true',
help="if to use synchronized samples for each agent training")
# sac parameters
parser.add_argument("--target_entropy", type=float, default=10.0,
help="constraint on SAC entropy target")
# exploration/sampling
## NOTE: episode-wise or transition-wise (per episodes now)
parser.add_argument("--sample_batch_size", default=8, type=int,
help="number of data points sampled () per run")
parser.add_argument("--max_buffer_size", default=40000, type=int,
help="maximum number of samples (episodes) to save in replay buffer")
# parser.add_argument("--max_buffer_size", default=int(1e6), type=int,
# help="maximum number of samples (transitions) to save in replay buffer")
parser.add_argument("--n_exploration_eps", default=25000, type=int,
help="what is this ???")
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--n_step", type=int, default=1,
help="length of multistep value backup")
# model
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--critic", type=str, default="mlp",
help="type of critic network", choices=["mlp", "rnn", "gnn"])
parser.add_argument("--actor", type=str, default="mlp",
help="type of actor network", choices=["mlp", "rnn", "gnn"])
# evaluation
parser.add_argument("--no_eval", default=False, action='store_true',
help="do evaluation during training")
parser.add_argument("--no_render", default=False, action='store_true',
help='if to stop rendering in evaluation rollouts')
parser.add_argument("--eval_n_episodes", default=10, type=int)
parser.add_argument("--eval_batch_size", default=2, type=int,
help="number of data points evaluated () per run")
# loggings
parser.add_argument("--log_agent_returns", default=False, action='store_true',
help="if to log per agent returns on tensorboard")
# parallelism
parser.add_argument("--n_rollout_threads", default=4, type=int,
help="number of parallel sampling workers to use")
parser.add_argument("--n_training_threads", default=4, type=int)
args = parser.parse_args()
return args
#####################################################################################
### main
####################################################################################
def run(args):
""" main entry func """
# NOTE: experiment setup
config, is_restore = setup_experiment(args)
logger = ExperimentLogger(config.save_dir, log_std_out=True, use_tensorboard=config.use_tensorboard)
if not config.cuda:
torch.set_num_threads(config.n_training_threads)
# NOTE: init/load experiment state
estate = ExperimentState()
if is_restore:
estate.load_state(config.restore_exp_state)
# make counter copies to reduce writing ...
episode = estate.episode # total episodes so far
t_env = estate.t_env # total env interacetion steps so far
# t_max = config.n_env_steps # max number of steps to runs
t_max = config.n_episodes * config.episode_length
# NOTE: make vectorized env
env_func = ENV_MAP[config.env]
p_env_func = partial(env_func, config.scenario, benchmark=False,
show_visual_range=config.show_visual_range)
env = make_parallel_env(p_env_func, config.env_config, config.sample_batch_size,
config.n_rollout_threads, config.seed)
if not config.no_eval:
eval_env = make_parallel_env(p_env_func, config.env_config,
config.eval_batch_size, 1, config.seed)
# NOTE: make learner agent
if is_restore or config.restore_model is not None:
learner = MASAC.init_from_save(config.restore_model)
else:
learner = MASAC.init_from_env(
env,
agent_alg=config.agent_alg,
adversary_alg=config.adversary_alg,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim,
rnn_policy=(config.actor == "rnn"),
rnn_critic=(config.critic == "rnn"),
# sac stuff
target_entropy=config.target_entropy
)
# NOTE: make sampling runner (env wrapper)
scheme = get_sample_scheme(learner.nagents, env.observation_space, env.action_space)
runner = CTDEEpisodeRunner(scheme, env, learner, logger, config.sample_batch_size,
config.episode_length, device=config.device, t_env=t_env,
ma_step_keys=["log_probs"], is_training=True)
if not config.no_eval:
eval_runner = CTDEEpisodeRunner(scheme, eval_env, learner, logger,
config.eval_batch_size, config.episode_length,
device=config.device, t_env=t_env,
ma_step_keys=["log_probs"], is_training=False)
buffer = EpisodeReplayBuffer(scheme, config.max_buffer_size,
config.episode_length, device=config.device, prefill_num=2*config.batch_size)
# NOTE: start training
logger.info("Beginning training")
start_time = time.time()
last_time = start_time
############################################
# while t_env <= t_max:
while episode <= config.n_episodes:
# NOTE: Run for a whole episode at a time
learner.prep_rollouts(device=config.device)
explr_pct_remaining = max(0, config.n_exploration_eps - episode) / config.n_exploration_eps
learner.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
learner.reset_noise()
episode_batch, _ = runner.run()
buffer.insert_episode_batch(episode_batch)
# update counters
episode += config.sample_batch_size
t_env = runner.t_env
estate.episode = episode
estate.t_env = t_env
############################################
# NOTE: logging (exploration/sampling)
if (estate.last_log_t == 0) or (t_env - estate.last_log_t >= config.log_interval):
logger.info("\n")
logger.info("*** sampling log ***")
# timing
logger.info("t_env: {} / {}, eps: {} / {}".format(
t_env, t_max, episode, config.n_episodes))
logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, estate.last_log_t, t_env, t_max),
time_str(time.time() - start_time)
))
last_time = time.time()
# log collected episode stats
results = runner.get_summaries()
runner.reset_summaries()
log_results(t_env, results, logger, mode="sample",
log_agent_returns=config.log_agent_returns)
estate.last_log_t = t_env
############################################
# NOTE: training updates
## change to batch_size * n_updates_per_train for n_updates > 1
if buffer.can_sample(config.batch_size) and (t_env - estate.last_train_t >= config.train_interval):
learner.prep_training(device=config.device)
for _ in range(config.n_updates_per_train):
episode_sample = None
for a_i in range(learner.nagents):
if config.sync_samples:
# if not None, reuse episode_sample
if episode_sample is None:
episode_sample = buffer.sample(config.batch_size)
else:
# each agent can have different collective experience samples
episode_sample = buffer.sample(config.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != config.device:
episode_sample.to(config.device)
# dispatch sample to per agent [(B,T,D)]*N
sample = dispatch_samples(episode_sample, scheme, learner.nagents)
learner.update(sample, a_i) #, logger=logger)
# sync target networks
if t_env - estate.last_target_update_t >= config.target_update_interval:
learner.update_all_targets()
estate.last_target_update_t = t_env
learner.prep_rollouts(device=config.device)
estate.last_train_t = t_env
# collect & log trianing stats
if t_env - estate.last_train_log_t >= config.train_log_interval:
train_results = learner.get_summaries()
learner.reset_summaries()
logger.info("\n")
logger.info("*** training log ***")
log_results(t_env, train_results, logger, mode="train")
estate.last_train_log_t = t_env
############################################
# NOTE: Execute test runs once in a while
if not config.no_eval and ((estate.last_test_t == 0) or (t_env - estate.last_test_t >= config.eval_interval)):
n_test_runs = max(1, config.eval_n_episodes // eval_runner.batch_size)
eval_episodes = []
for _ in range(n_test_runs):
eval_bt, _ = eval_runner.run(render=(not config.no_render))
eval_episodes.append(eval_bt)
# collect evaluation stats
eval_results = eval_runner.get_summaries()
eval_runner.reset_summaries()
eval_episodes = eval_episodes[0].concat(eval_episodes[1:])
logger.info("\n")
logger.info("*** evaluation log ***")
log_results(t_env, eval_results, logger, mode="eval", episodes=eval_episodes,
log_agent_returns=config.log_agent_returns)
estate.last_test_t = t_env
############################################
# NOTE: checkpoint
if (estate.last_save_t == 0) or (t_env - estate.last_save_t >= config.save_interval):
os.makedirs(config.save_dir + "/checkpoints", exist_ok=True)
learner.save(config.save_dir + "/checkpoints" + "/model_{}.ckpt".format(t_env))
learner.save(config.save_dir + "/model.ckpt")
logger.info("\n")
logger.info("*** checkpoint log ***")
logger.info("Saving models to {}".format(
"/checkpoints" + "/model_{}.ckpt".format(t_env)
))
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
############################################
# NOTE: clean up
learner.save(config.save_dir + "/model.ckpt") # final save
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
env.close()
logger.export_scalars_to_json("summary.json")
logger.info("Finished Training")
logger.close()
if __name__ == '__main__':
args = parse_args()
run(args)
| [
"torch.set_num_threads"
] | 1.3.1 | Justin-Yuan/learn-to-interact | eb013bb3bab269bda8a8075e64fe3bcd2964d8ae |
1.3 | import os
import sys
# path at level marl/
sys.path.insert(0, os.path.abspath("."))
import time
import argparse
import numpy as np
from functools import partial
from collections import OrderedDict, defaultdict
import torch
# local
from algorithms.rmaddpg.utils import get_sample_scheme, dispatch_samples
from algorithms.rmaddpg.utils import make_parallel_env, log_results
from algorithms.rmaddpg.utils import log_weights
from algorithms.rmaddpg import RMADDPG
from runners.make_env import ENV_MAP
from runners.sample_batch import EpisodeBatch
from runners.episode_runner import EpisodeRunner
from runners.replay_buffer import EpisodeReplayBuffer
from utils.exp_utils import setup_experiment, ExperimentLogger, ExperimentState
from utils.exp_utils import time_left, time_str, merge_dict
#####################################################################################
### arguments
#####################################################################################
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=str, default="maddpg",
help="name of the experiment")
parser.add_argument("--save_dir", type=str, default="./exps",
help="top level path to save experiment/training results")
parser.add_argument("--sub_dir", type=str, nargs='+',
help="sub folders for experiment (hierarchical), e.g. sub=a b c --> local-dir/a/b/c")
parser.add_argument("--tag", type=str, nargs='+',
help="additional info for experiment, i.e. hyperparameters")
parser.add_argument("--seed", default=1, type=int,
help="Random seed, if 0, do not set seed")
parser.add_argument("--restore", type=str, default=None,
help="directory in which training state and model are loaded")
# if specified and not restore, will load model for experiment init
# if also restore, will overwrite default path in restore_experiment
parser.add_argument("--restore_model", type=str, default=None,
help="file in which model are loaded")
## NOTE: episode-wise or transition-wise (per transtion now, easier to log)
parser.add_argument("--log_interval", default=25000, type=int,
help="frequency to log exploration/runner stats")
parser.add_argument("--train_interval", default=0, type=int,
help="number of steps collected before each train")
# parser.add_argument("--steps_per_update", default=100, type=int,
# help="number of env steps collected before 1 training update")
parser.add_argument("--target_update_interval", default=0, type=int,
help="syncing parameters with target networks")
parser.add_argument("--train_log_interval", default=25000, type=int,
help="frequency to log training stats, e.g. losses")
parser.add_argument("--eval_interval", default=25000, type=int,
help="number of steps collected before each evaluation")
parser.add_argument("--save_interval", default=500000, type=int)
# misc
parser.add_argument("--cuda", default=False, action='store_true')
parser.add_argument("--cluster", default=False, action='store_true',
help='if running in cluster (allow more resources)')
parser.add_argument("--overwrite", type=str, nargs='+',
help="overwrite env config with format: nested_name nested_type value ...")
parser.add_argument("--use_tensorboard", default=False, action='store_true',
help="if to use tensorboard for logging")
parser.add_argument("--show_visual_range", default=False, action='store_true',
help='if to show agent visual range when rendering')
# Environment
parser.add_argument("--env", type=str, default="mpe_hier",
help="name of the environment", choices=["mpe", "mpe_hier"])
parser.add_argument("--scenario", type=str, default="simple_spread",
help="name of the scenario script")
parser.add_argument("--env_config", type=str, default="",
help="file to environment scenario config")
## max episode length for termination
parser.add_argument("--episode_length", default=25, type=int,
help="max episode length")
parser.add_argument("--agent_alg", default="MADDPG", type=str,
help="agent model type", choices=['MADDPG', 'DDPG'])
parser.add_argument("--adversary_alg", default="MADDPG", type=str,
help="adversary model type", choices=['MADDPG', 'DDPG'])
parser.add_argument("--discrete_action", action='store_true')
# training
parser.add_argument("--n_episodes", default=20000, type=int,
help="max number of episodes to sample")
## for non-early-terminated episodes, n_env_steps ~= n_episodes * episode_length
parser.add_argument("--n_env_steps", default=500000, type=int,
help="max number of env steps to sample")
## NOTE: episode-wise or step-wise (episode now)
parser.add_argument("--batch_size", default=32, type=int,
help="Batch size for model training per update")
## in case train batch size too large, could use smaller batch size
## but multiple rounds of updates
parser.add_argument("--n_updates_per_train", default=1, type=int,
help="number of updates per training round")
parser.add_argument("--lr", default=0.01, type=float)
parser.add_argument("--tau", default=0.01, type=float)
parser.add_argument("--gamma", type=float, default=0.95,
help="discount factor")
parser.add_argument("--sync_samples", default=False, action='store_true',
help="if to use synchronized samples for each agent training")
# exploration/sampling
## NOTE: episode-wise or transition-wise (per episodes now)
parser.add_argument("--sample_batch_size", default=8, type=int,
help="number of data points sampled () per run")
parser.add_argument("--max_buffer_size", default=40000, type=int,
help="maximum number of samples (episodes) to save in replay buffer")
# parser.add_argument("--max_buffer_size", default=int(1e6), type=int,
# help="maximum number of samples (transitions) to save in replay buffer")
parser.add_argument("--n_exploration_eps", default=25000, type=int,
help="what is this ???")
parser.add_argument("--init_noise_scale", default=0.3, type=float)
parser.add_argument("--final_noise_scale", default=0.0, type=float)
parser.add_argument("--n_step", type=int, default=1,
help="length of multistep value backup")
# model
parser.add_argument("--hidden_dim", default=64, type=int)
parser.add_argument("--critic", type=str, default="mlp",
help="type of critic network", choices=["mlp", "rnn", "gnn"])
parser.add_argument("--actor", type=str, default="mlp",
help="type of actor network", choices=["mlp", "rnn", "gnn"])
parser.add_argument("--norm_in", default=False, action='store_true',
help="if to normalize inputs to agent networks")
parser.add_argument("--constrain_out", default=False, action='store_true',
help="if to use tanh for network final activation")
# evaluation
parser.add_argument("--no_eval", default=False, action='store_true',
help="do evaluation during training")
parser.add_argument("--no_render", default=False, action='store_true',
help='if to stop rendering in evaluation rollouts')
parser.add_argument("--eval_n_episodes", default=10, type=int)
parser.add_argument("--eval_batch_size", default=2, type=int,
help="number of data points evaluated () per run")
# loggings
parser.add_argument("--log_agent_returns", default=False, action='store_true',
help="if to log per agent returns on tensorboard")
# parallelism
parser.add_argument("--n_rollout_threads", default=4, type=int,
help="number of parallel sampling workers to use")
parser.add_argument("--n_training_threads", default=4, type=int)
args = parser.parse_args()
return args
#####################################################################################
### main
####################################################################################
def run(args):
""" main entry func """
# NOTE: experiment setup
config, is_restore = setup_experiment(args)
logger = ExperimentLogger(config.save_dir, log_std_out=True, use_tensorboard=config.use_tensorboard)
if not config.cuda:
torch.set_num_threads(config.n_training_threads)
# NOTE: init/load experiment state
estate = ExperimentState()
if is_restore:
estate.load_state(config.restore_exp_state)
# make counter copies to reduce writing ...
episode = estate.episode # total episodes so far
t_env = estate.t_env # total env interacetion steps so far
# t_max = config.n_env_steps # max number of steps to runs
t_max = config.n_episodes * config.episode_length
# NOTE: make vectorized env
env_func = ENV_MAP[config.env]
p_env_func = partial(env_func, config.scenario, benchmark=False,
show_visual_range=config.show_visual_range)
env = make_parallel_env(p_env_func, config.env_config, config.sample_batch_size,
config.n_rollout_threads, config.seed)
if not config.no_eval:
eval_env = make_parallel_env(p_env_func, config.env_config,
config.eval_batch_size, 1, config.seed)
# NOTE: make learner agent
if is_restore or config.restore_model is not None:
learner = RMADDPG.init_from_save(config.restore_model)
else:
learner = RMADDPG.init_from_env(
env,
agent_alg=config.agent_alg,
adversary_alg=config.adversary_alg,
tau=config.tau,
lr=config.lr,
hidden_dim=config.hidden_dim,
rnn_policy=(config.actor == "rnn"),
rnn_critic=(config.critic == "rnn"),
norm_in=config.norm_in,
constrain_out=config.constrain_out
)
# NOTE: make sampling runner (env wrapper)
scheme = get_sample_scheme(learner.nagents, env.observation_space, env.action_space)
runner = EpisodeRunner(scheme, env, learner, logger, config.sample_batch_size,
config.episode_length, device=config.device, t_env=t_env)
if not config.no_eval:
eval_runner = EpisodeRunner(scheme, eval_env, learner, logger,
config.eval_batch_size, config.episode_length,
device=config.device, t_env=t_env,
is_training=False)
buffer = EpisodeReplayBuffer(scheme, config.max_buffer_size,
config.episode_length, device=config.device, prefill_num=2*config.batch_size)
# NOTE: start training
logger.info("Beginning training")
start_time = time.time()
last_time = start_time
############################################
# while t_env <= t_max:
while episode <= config.n_episodes:
# NOTE: Run for a whole episode at a time
learner.prep_rollouts(device=config.device)
explr_pct_remaining = max(0, config.n_exploration_eps - episode) / config.n_exploration_eps
learner.scale_noise(config.final_noise_scale + (config.init_noise_scale - config.final_noise_scale) * explr_pct_remaining)
learner.reset_noise()
episode_batch, _ = runner.run()
buffer.insert_episode_batch(episode_batch)
# update counters
episode += config.sample_batch_size
t_env = runner.t_env
estate.episode = episode
estate.t_env = t_env
############################################
# NOTE: logging (exploration/sampling)
if (estate.last_log_t == 0) or (t_env - estate.last_log_t >= config.log_interval):
logger.info("\n")
logger.info("*** sampling log ***")
# timing
logger.info("t_env: {} / {}, eps: {} / {}".format(
t_env, t_max, episode, config.n_episodes))
logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, estate.last_log_t, t_env, t_max),
time_str(time.time() - start_time)
))
last_time = time.time()
# log collected episode stats
results = runner.get_summaries()
runner.reset_summaries()
log_results(t_env, results, logger, mode="sample",
log_agent_returns=config.log_agent_returns)
estate.last_log_t = t_env
############################################
# NOTE: training updates
## change to batch_size * n_updates_per_train for n_updates > 1
if buffer.can_sample(config.batch_size) and (t_env - estate.last_train_t >= config.train_interval):
learner.prep_training(device=config.device)
for _ in range(config.n_updates_per_train):
episode_sample = None
for a_i in range(learner.nagents):
if config.sync_samples:
# if not None, reuse episode_sample
if episode_sample is None:
episode_sample = buffer.sample(config.batch_size)
else:
# each agent can have different collective experience samples
episode_sample = buffer.sample(config.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != config.device:
episode_sample.to(config.device)
# dispatch sample to per agent [(B,T,D)]*N
sample = dispatch_samples(episode_sample, scheme, learner.nagents)
learner.update(sample, a_i) #, logger=logger)
# sync target networks
if t_env - estate.last_target_update_t >= config.target_update_interval:
learner.update_all_targets()
estate.last_target_update_t = t_env
learner.prep_rollouts(device=config.device)
estate.last_train_t = t_env
# collect & log trianing stats
if t_env - estate.last_train_log_t >= config.train_log_interval:
train_results = learner.get_summaries()
learner.reset_summaries()
logger.info("\n")
logger.info("*** training log ***")
log_results(t_env, train_results, logger, mode="train")
estate.last_train_log_t = t_env
############################################
# NOTE: Execute test runs once in a while
if not config.no_eval and ((estate.last_test_t == 0) or (t_env - estate.last_test_t >= config.eval_interval)):
n_test_runs = max(1, config.eval_n_episodes // eval_runner.batch_size)
eval_episodes = []
for _ in range(n_test_runs):
eval_bt, _ = eval_runner.run(render=(not config.no_render))
eval_episodes.append(eval_bt)
# collect evaluation stats
eval_results = eval_runner.get_summaries()
eval_runner.reset_summaries()
eval_episodes = eval_episodes[0].concat(eval_episodes[1:])
logger.info("\n")
logger.info("*** evaluation log ***")
log_results(t_env, eval_results, logger, mode="eval", episodes=eval_episodes,
log_agent_returns=config.log_agent_returns)
estate.last_test_t = t_env
# NOTE: debug noly, log network weights
log_weights(learner, logger, t_env)
############################################
# NOTE: checkpoint
if (estate.last_save_t == 0) or (t_env - estate.last_save_t >= config.save_interval):
os.makedirs(config.save_dir + "/checkpoints", exist_ok=True)
learner.save(config.save_dir + "/checkpoints" + "/model_{}.ckpt".format(t_env))
learner.save(config.save_dir + "/model.ckpt")
logger.info("\n")
logger.info("*** checkpoint log ***")
logger.info("Saving models to {}".format(
"/checkpoints" + "/model_{}.ckpt".format(t_env)
))
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
############################################
# NOTE: clean up
learner.save(config.save_dir + "/model.ckpt") # final save
estate.last_save_t = t_env
estate.save_state(config.save_dir + "/exp_state.pkl")
env.close()
logger.export_scalars_to_json("summary.json")
logger.info("Finished Training")
logger.close()
if __name__ == '__main__':
args = parse_args()
run(args)
| [
"torch.set_num_threads"
] | 1.3.1 | Justin-Yuan/learn-to-interact | eb013bb3bab269bda8a8075e64fe3bcd2964d8ae |
1.4 | '''
super slomo
code refered from https://github.com/avinashpaliwal/Super-SloMo.git
'''
# pylint: disable=E1101
import logging
import torch
from slomo import UNet, backWarp
from imageProcess import initModel, getStateDict, getPadBy32, doCrop, identity, Option, extend
from config import config
log = logging.getLogger('Moe')
modelPath = './model/slomo/SuperSloMo.ckpt'
RefTime = 2
WindowSize = 2
ramCoef = [.95 / x for x in (8100., 2484., 8100., 2466., 4014., 1080.)]
getFlowComp = lambda *_: UNet(6, 4)
getFlowIntrp = lambda *_: UNet(20, 5)
getFlowBack = lambda opt: backWarp(opt.width, opt.height, config.device(), config.dtype())
getBatchSize = lambda load, ramCoef: max(1, int((config.calcFreeMem() / load) * ramCoef))
modules = dict(
flowComp={'weight': 'state_dictFC', 'f': getFlowComp, 'outShape': (1, 4, 1, 1)},
ArbTimeFlowIntrp={'weight': 'state_dictAT', 'f': getFlowIntrp, 'outShape': (1, 5, 1, 1)})
def newOpt(func, ramCoef, align=32, padding=45, scale=1, **_):
opt = Option()
opt.modelCached = func
opt.ramCoef = ramCoef
opt.align = align
opt.padding = padding
opt.scale = scale
opt.squeeze = identity
opt.unsqueeze = identity
return opt
def getOptS(modelPath, modules, ramCoef):
opt = Option(modelPath)
weights = getStateDict(modelPath)
opt.modules = modules
opt.ramOffset = config.getRunType() * len(modules)
for i, key in enumerate(modules):
m = modules[key]
wKey = m['weight']
constructor = m.get('f', 0)
rc = m['ramCoef'][config.getRunType()] if 'ramCoef' in m else ramCoef[opt.ramOffset + i]
o = dict((k, m[k]) for k in ('align', 'padding', 'scale') if k in m)
model = initModel(opt, weights[wKey], key, constructor)
if 'outShape' in m:
opt.__dict__[key] = newOpt(model, rc, **o)
else:
model.ramCoef = rc
opt.__dict__[key] = model
return opt
def setOutShape(opt, height, width):
load = width * height
od = opt.__dict__
for key, o in opt.modules.items():
batchSize = opt.bf(load, od[key].ramCoef)
if 'outShape' in o:
q = o['outShape']
od[key].outShape = [batchSize, *q[1:-2], int(height * q[-2]), int(width * q[-1])]
if 'staticDims' in o:
for i in o['staticDims']:
od[key].outShape[i] = q[i]
if 'streams' in o and (not 0 in o.get('staticDims', {})):
for name in o['streams']:
od[name].send((None, batchSize))
return opt
def getOptP(opt, bf=getBatchSize):
opt.startPadding = 0
opt.i = 0
opt.bf = bf
return opt
extendRes = lambda res, item: res.extend(item) if type(item) == list else (None if item is None else res.append(item))
def makeStreamFunc(func, node, opt, nodes, name, padStates, initFunc, pushFunc):
for n in nodes:
node.append(n)
def f(x):
node.reset()
node.trace(0, p='{} start'.format(name))
if not opt.i:
setOutShape(opt, *initFunc(opt, x))
if opt.end:
for s in padStates:
s.setPadding(opt.end)
opt.end = 0
if opt.start:
opt.startPadding = opt.start
for s in padStates:
s.setPadding(opt.start)
opt.start = 0
last = True if x is None else None
if not last:
pushFunc(opt.pad(x.unsqueeze(0)))
opt.i += 1
out = []
extend(out, opt.out.send(last))
node.trace()
while last:
try:
extend(out, opt.out.send(last))
except StopIteration: break
res = []
for item in out:
extendRes(res, func(opt.unpad(item)))
return res
return f
def getOpt(option):
opt = getOptS(modelPath, modules, ramCoef)
opt.flowBackWarp = None
opt.outStart = 0
opt.batchSize = 0
opt.sf = option['sf']
opt.bf = getBatchSize
if opt.sf < 2:
raise RuntimeError('Error: --sf/slomo factor has to be at least 2')
return opt
def doSlomo(func, node, opt):
# Temporary fix for issue #7 https://github.com/avinashpaliwal/Super-SloMo/issues/7 -
# - Removed per channel mean subtraction for CPU.
def f(data):
node.reset()
node.trace(0, p='slomo start')
batchSize = len(data)
if not batchSize or len(data[0]) < 2:
return
if opt.flowBackWarp is None:
width, height, opt.pad, opt.unpad = getPadBy32(data[0][0], opt)
opt.width = width
opt.height = height
opt.flowBackWarp = initModel(opt, None, None, getFlowBack)
setOutShape(opt, height, width)
opt.batchSize = opt.flowComp.outShape[0]
log.info('Slomo batch size={}'.format(opt.batchSize))
flowBackWarp = opt.flowBackWarp
opt.flowComp.outShape[0] = batchSize
opt.ArbTimeFlowIntrp.outShape[0] = batchSize
sf = opt.sf
tempOut = [0 for _ in range(batchSize * sf + 1)]
# Save reference frames
tempOut[0] = data[0][0]
for i, frames in enumerate(data):
tempOut[(i + 1) * sf] = frames[1]
# Load data
I0 = opt.pad(torch.stack([frames[0] for frames in data]))
I1 = opt.pad(torch.stack([frames[1] for frames in data]))
flowOut = doCrop(opt.flowComp, torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:,:2,:,:]
F_1_0 = flowOut[:,2:,:,:]
node.trace()
# Generate intermediate frames
for intermediateIndex in range(1, sf):
t = intermediateIndex / sf
temp = -t * (1 - t)
fCoeff = (temp, t * t, (1 - t) * (1 - t), temp)
wCoeff = (1 - t, t)
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
g_I0_F_t_0 = flowBackWarp(I0, F_t_0)
g_I1_F_t_1 = flowBackWarp(I1, F_t_1)
intrpOut = doCrop(opt.ArbTimeFlowIntrp, torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = torch.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = flowBackWarp(I0, F_t_0_f)
g_I1_F_t_1_f = flowBackWarp(I1, F_t_1_f)
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
# Save intermediate frame
for i in range(batchSize):
tempOut[intermediateIndex + i * sf] = opt.unpad(Ft_p[i].detach())
node.trace()
if data is None and opt.outEnd:
tempOut = tempOut[:opt.outEnd]
opt.outEnd = 0
res = []
for item in tempOut[opt.outStart:]:
extendRes(res, func(item))
opt.outStart = max(0, opt.outStart - len(tempOut))
return res
return f | [
"torch.sigmoid",
"torch.cat",
"torch.stack"
] | 1.4 | lotress/MoePhoto | 6f47515d2cf236773a46413f57839565fa665796 |
1.1 | #!/usr/bin/env python3
"""
File: anilkfo_cifarfs.py
Author: Seb Arnold - seba1511.net
Email: [email protected]
Github: seba-1511
Description:
Demonstrates how to use the low-level differentiable optimization utilities
to implement ANIL+KFC on CIFAR-FS.
A demonstration of the high-level API is available in:
examples/vision/metacurvature_fc100.py
"""
import random
import numpy as np
import torch
import learn2learn as l2l
class CifarCNN(torch.nn.Module):
"""
Example of a 4-layer CNN network for FC100/CIFAR-FS.
"""
def __init__(self, output_size=5, hidden_size=32, layers=4):
super(CifarCNN, self).__init__()
self.hidden_size = hidden_size
features = l2l.vision.models.ConvBase(
output_size=hidden_size,
hidden=hidden_size,
channels=3,
max_pool=False,
layers=layers,
max_pool_factor=0.5,
)
self.features = torch.nn.Sequential(
features,
l2l.nn.Lambda(lambda x: x.mean(dim=[2, 3])),
l2l.nn.Flatten(),
)
self.linear = torch.nn.Linear(self.hidden_size, output_size, bias=True)
l2l.vision.models.maml_init_(self.linear)
def forward(self, x):
x = self.features(x)
x = self.linear(x)
return x
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1).view(targets.shape)
return (predictions == targets).sum().float() / targets.size(0)
def fast_adapt(
batch,
features,
classifier,
update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device):
data, labels = batch
data, labels = data.to(device), labels.to(device)
data = features(data)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots*ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model & learned update
for step in range(adaptation_steps):
adaptation_error = loss(classifier(adaptation_data), adaptation_labels)
if step > 0: # Update the learnable update function
update_grad = torch.autograd.grad(adaptation_error,
update.parameters(),
create_graph=True,
retain_graph=True)
diff_sgd(update, update_grad)
classifier_updates = update(adaptation_error,
classifier.parameters(),
create_graph=True,
retain_graph=True)
diff_sgd(classifier, classifier_updates)
# Evaluate the adapted model
predictions = classifier(evaluation_data)
eval_error = loss(predictions, evaluation_labels)
eval_accuracy = accuracy(predictions, evaluation_labels)
return eval_error, eval_accuracy
def main(
fast_lr=0.1,
meta_lr=0.003,
num_iterations=10000,
meta_batch_size=16,
adaptation_steps=5,
shots=5,
ways=5,
cuda=1,
seed=1234
):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device('cpu')
if cuda and torch.cuda.device_count():
torch.cuda.manual_seed(seed)
device = torch.device('cuda')
# Create Tasksets using the benchmark interface
tasksets = l2l.vision.benchmarks.get_tasksets(
name='cifarfs',
train_samples=2*shots,
train_ways=ways,
test_samples=2*shots,
test_ways=ways,
root='~/data',
)
# Create model and learnable update
model = CifarCNN(output_size=ways)
model.to(device)
features = model.features
classifier = model.linear
kfo_transform = l2l.optim.transforms.KroneckerTransform(l2l.nn.KroneckerLinear)
fast_update = l2l.optim.ParameterUpdate(
parameters=classifier.parameters(),
transform=kfo_transform,
)
fast_update.to(device)
diff_sgd = l2l.optim.DifferentiableSGD(lr=fast_lr)
all_parameters = list(model.parameters()) + list(fast_update.parameters())
opt = torch.optim.Adam(all_parameters, meta_lr)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
for iteration in range(num_iterations):
opt.zero_grad()
meta_train_error = 0.0
meta_train_accuracy = 0.0
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
for task in range(meta_batch_size):
# Compute meta-training loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.train.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
evaluation_error.backward()
meta_train_error += evaluation_error.item()
meta_train_accuracy += evaluation_accuracy.item()
# Compute meta-validation loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.validation.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
meta_valid_error += evaluation_error.item()
meta_valid_accuracy += evaluation_accuracy.item()
# Print some metrics
print('\n')
print('Iteration', iteration)
print('Meta Train Error', meta_train_error / meta_batch_size)
print('Meta Train Accuracy', meta_train_accuracy / meta_batch_size)
print('Meta Valid Error', meta_valid_error / meta_batch_size)
print('Meta Valid Accuracy', meta_valid_accuracy / meta_batch_size)
# Average the accumulated gradients and optimize
for p in model.parameters():
p.grad.data.mul_(1.0 / meta_batch_size)
for p in fast_update.parameters():
p.grad.data.mul_(1.0 / meta_batch_size)
opt.step()
meta_test_error = 0.0
meta_test_accuracy = 0.0
for task in range(meta_batch_size):
# Compute meta-testing loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.test.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
meta_test_error += evaluation_error.item()
meta_test_accuracy += evaluation_accuracy.item()
print('Meta Test Error', meta_test_error / meta_batch_size)
print('Meta Test Accuracy', meta_test_accuracy / meta_batch_size)
if __name__ == '__main__':
main()
| [
"torch.nn.Linear",
"torch.device",
"torch.cuda.manual_seed",
"torch.optim.Adam",
"torch.from_numpy",
"torch.manual_seed",
"torch.cuda.device_count",
"torch.nn.CrossEntropyLoss"
] | 1.1.0 | Brikwerk/learn2learn | c0b7c088f15986880b136ec27059644ac513db60 |
1.5 | #!/usr/bin/env python
# coding: utf-8
from tqdm import tqdm
import os
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use("default")
import seaborn as sns
sns.set_style("ticks")
import sys
sys.path.append("../")
from src.models.CNN import AdaptiveConvNet
from src.utils import get_device, plot_network_mask
import argparse
def argument_parser():
parser = argparse.ArgumentParser(description="Run Nonparametric Bayesian Architecture Learning")
parser.add_argument('--use-cuda', action='store_false',
help="Use CPU or GPU")
parser.add_argument("--prior_temp", type=float, default=1.,
help="Temperature for Concrete Bernoulli from prior")
parser.add_argument("--temp", type=float, default=.5,
help="Temperature for Concrete Bernoulli from posterior")
parser.add_argument("--epsilon", type=float, default=0.01,
help="Epsilon to select the activated layers")
parser.add_argument("--truncation_level", type=int, default=10,
help="K+: Truncation for Z matrix")
parser.add_argument("--a_prior", type=float, default=1.1,
help="a parameter for Beta distribution")
parser.add_argument("--b_prior", type=float, default=10.,
help="b parameter for Beta distribution")
parser.add_argument("--kernel", type=int, default=5,
help="Kernel size. Default is 3.")
parser.add_argument("--num_samples", type=int, default=5,
help="Number of samples of Z matrix")
parser.add_argument("--epochs", type=int, default=50,
help="Number of training epochs.")
parser.add_argument("--lr", type=float, default=0.003,
help="Learning rate.")
parser.add_argument("--l2", type=float, default=1e-6,
help="Coefficient of weight decay.")
parser.add_argument("--batch_size", type=float, default=64,
help="Batch size.")
parser.add_argument("--max_width", type=int, default=64,
help="Dimension of hidden representation.")
return parser.parse_known_args()[0]
args = argument_parser()
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Normalize the test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform_train, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False)
device = get_device(args)
model = AdaptiveConvNet(input_channels=1,
num_classes=10,
num_channels=args.max_width,
kernel_size=args.kernel,
args=args,
device=device).to(device)
model = model.to(device)
print(model)
loss_fn = nn.CrossEntropyLoss(reduction="none")
optimizer = torch.optim.AdamW(model.parameters(), args.lr, weight_decay=args.l2)
if not os.path.exists("results"):
os.mkdir("results")
def evaluate(test_loader):
loglike = 0
error_sum = 0
with torch.no_grad():
model.eval()
for i, (data, labels) in enumerate(test_loader):
data = data.float().to(device)
labels = labels.long().to(device)
output = model(data, args.num_samples)
pred = output.mean(0)
logits = F.softmax(pred, dim=1)
ll = -F.nll_loss(logits, labels, reduction="sum").item()
loglike += ll
predicted = torch.argmax(logits, 1)
error = predicted.ne(labels).sum().item()
error_sum += error
test_loglikes = loglike / len(test_dataset)
test_err = error_sum / len(test_dataset)
test_metrics = {'test_err': round(test_err * 100, 3),
'test_like': round(test_loglikes, 3)}
return test_metrics
train_losses = []
with tqdm(range(args.epochs)) as tq:
for epoch in tq:
train_loss = 0.0
model.train()
for i, (data, labels) in enumerate(train_loader):
data = data.float().to(device)
labels = labels.long().to(device)
# making grad zero
optimizer.zero_grad()
# sample an architecture
act_vec = model(data, args.num_samples)
loss = model.estimate_ELBO(loss_fn, act_vec, labels, N_train=len(train_dataset), kl_weight=1)
loss.backward()
optimizer.step()
# adding losses
train_loss += loss.item()
train_loss = train_loss / len(train_loader)
train_losses.append(train_loss)
test_results = evaluate(test_loader)
print("Test error: {} Test Log likelihood: {}".format(test_results['test_err'], test_results['test_like']))
kl_beta = model.structure_sampler.get_kl()
tq.set_postfix({'Tr. loss': '%.6f' % train_loss, 'KL Beta': '%.6f' % kl_beta})
torch.save(model, "results/model_MNIST.pt")
| [
"torch.argmax",
"torch.no_grad",
"torch.save",
"torch.utils.data.DataLoader",
"torch.nn.functional.softmax",
"torch.nn.functional.nll_loss",
"torch.nn.CrossEntropyLoss"
] | 1.5.0 | kckishan/Depth_and_Dropout | 64bbff9169d588486d92946485e108342daa29b0 |
0.4 | """CLI and utils for training a batch of models and analysing hyper parameter tuning results"""
import train
import models
import data_processor as dp
import commons
import argparse
import torch
import os
import collections
def train_models(training_configs, email=False):
"""Train a batch of models"""
for i, config in enumerate(training_configs):
print('\nTraining Model {} of {}: {}'.format(i + 1, len(training_configs), config.name))
train.train(config, plot_learning_curves=False, cuda=torch.cuda.is_available(), email=email)
print('All Models have been evaluated')
def print_evaluation_report(training_config):
"""Print the training and evaluation results for a model"""
# Training Config
print('Training Config')
for key, val in training_config.__dict__.items():
print('{}\t{}'.format(key, val))
print()
# Checkpoint
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
return
print('Last checkpoint stats')
for key, val in checkpoint.__dict__.items():
print('{}\t{}'.format(key, val))
def _get_hps_for_autoencoder(training_config, checkpoint):
hps = collections.OrderedDict()
hps['name'] = training_config.name
hps['trainable_params'] = checkpoint.trainable_params
hps['epoch'] = checkpoint.epoch
hps['best_loss'] = checkpoint.best_loss
hps['batch_size'] = training_config.batch_size
hps['lr'] = training_config.model_params['lr']
hps['momentum'] = training_config.model_params['momentum']
hps['num_init_filters'] = training_config.model_params['num_init_filters']
hps['num_pools'] = training_config.model_params['num_pools']
hps['num_fc'] = training_config.model_params['num_fc']
hps['fc_scale_down'] = training_config.model_params['fc_scale_down']
hps['kernel_size'] = training_config.model_params['kernel_size']
hps['shared_weights'] = training_config.model_params['shared_weights']
hps['skip_connections'] = training_config.model_params['skip_connections']
return ["{} : {}".format(key, val) for key, val in hps.items()]
def _get_hps_for_classifier(training_config, checkpoint):
hps = collections.OrderedDict()
hps['name'] = training_config.name
hps['trainable_params'] = checkpoint.trainable_params
hps['epoch'] = checkpoint.epoch
hps['best_loss'] = checkpoint.best_loss
hps['batch_size'] = training_config.batch_size
hps['lr'] = training_config.model_params['lr']
hps['momentum'] = training_config.model_params['momentum']
hps['arch'] = training_config.model_params['arch']
hps['batchnorm'] = training_config.model_params['batchnorm']
return ["{} : {}".format(key, val) for key, val in hps.items()]
def save_evaluation_report(training_configs, config_path):
"""Compile and save hyper-tuning report for all models in the batch"""
hps = []
for i, training_config in enumerate(training_configs):
print('Saving report for Model {}: {}'.format(i + 1, training_config.name))
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
continue
if training_config.model == 'conv_autoencoder':
hps.append(_get_hps_for_autoencoder(training_config, checkpoint))
elif training_config.model == 'cnn_classifier':
hps.append(_get_hps_for_classifier(training_config, checkpoint))
else:
raise Exception('Invalid model code: {}'.format(training_configs.model))
with open(os.path.join(os.path.dirname(config_path), 'hps.txt'), 'w') as rep_file:
rep_file.write('\n'.join(['\t'.join(hp) for hp in hps]))
def save_evaluation_plots(training_configs):
"""Create and save learning curves for all models in the batch"""
for i, training_config in enumerate(training_configs):
print('Saving plot for Model {}: {}'.format(i + 1, training_config.name))
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
continue
path = os.path.join(training_config.models_dir, "{}_lc.png".format(training_config.name))
commons.save_learning_curve(checkpoint.training_losses, checkpoint.cv_losses, path)
def cli():
"""Runs CLI"""
# Arguments Parser
parser = argparse.ArgumentParser(description='Hyper Parameter tuning related actions')
parser.add_argument('-c', '--config_files_path', help='Path to a file containing a list of training config files')
parser.add_argument('-m', '--mode', choices=['train', 'print-report', 'save-hps', 'save-plots'], default='train',
help='Action to perform')
parser.add_argument('-e', '--email', action='store_true', help='Send emails')
parser.add_argument('-d', '--dataset', action='store_true', help='Print Dataset Details')
# Parse arguments
args = parser.parse_args()
# Get model configs (read a single config file with newline separated paths to model configs)
if args.config_files_path is None:
raise Exception('Config file not specified')
else:
with open(args.config_files_path, 'r') as cfile:
config_files = cfile.read().split('\n')
train_configs = [train.TrainingConfig.load_from_file(fl) for fl in config_files]
# Actions
if args.mode == 'train':
# Train a batch of models
train_models(train_configs, email=args.email)
elif args.mode == 'print-report':
# Print report for all models
for i, train_config in enumerate(train_configs):
if args.dataset and i == 0:
dataset_config = dp.DataPrepConfig.load_from_dataset(train_config.dataset_path)
print('Dataset config for Model 1')
for key, val in dataset_config.__dict__.items():
print('{}\t{}'.format(key, val))
print()
print('*' * 10 + 'Model {}: {}'.format(i + 1, train_config.name))
print_evaluation_report(train_config)
print()
elif args.mode == 'save-hps':
# Save hyper parameters for all models
save_evaluation_report(train_configs, args.config_files_path)
elif args.mode == 'save-plots':
# Save learning curves for all models
save_evaluation_plots(train_configs)
else:
raise Exception('Invalid mode: ' + args.mode)
if __name__ == '__main__':
cli()
| [
"torch.cuda.is_available"
] | 0.4.1 | rbiswas143/deep-audioviz-experiments-train | 294c648ca9115efce6127fb242ac3f6f51cdf532 |
1.2 | import numpy as np
import scipy.sparse as sp
import torch
from sklearn.model_selection import train_test_split
import torch.sparse as ts
import torch.nn.functional as F
import warnings
def encode_onehot(labels):
"""Convert label to onehot format.
Parameters
----------
labels : numpy.array
node labels
Returns
-------
numpy.array
onehot labels
"""
eye = np.eye(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx
def tensor2onehot(labels):
"""Convert label tensor to label onehot tensor.
Parameters
----------
labels : torch.LongTensor
node labels
Returns
-------
torch.LongTensor
onehot labels tensor
"""
eye = torch.eye(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx.to(labels.device)
def preprocess(adj, features, labels, preprocess_adj=False, preprocess_feature=False, sparse=False, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor, and normalize the input data.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
preprocess_adj : bool
whether to normalize the adjacency matrix
preprocess_feature : bool
whether to normalize the feature matrix
sparse : bool
whether to return sparse tensor
device : str
'cpu' or 'cuda'
"""
if preprocess_adj:
adj_norm = normalize_adj(adj)
if preprocess_feature:
features = normalize_feature(features)
labels = torch.LongTensor(labels)
if sparse:
adj = sparse_mx_to_torch_sparse_tensor(adj)
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features.todense()))
adj = torch.FloatTensor(adj.todense())
return adj.to(device), features.to(device), labels.to(device)
def to_tensor(adj, features, labels=None, device='cpu'):
"""Convert adj, features, labels from array or sparse matrix to
torch Tensor.
Parameters
----------
adj : scipy.sparse.csr_matrix
the adjacency matrix.
features : scipy.sparse.csr_matrix
node features
labels : numpy.array
node labels
device : str
'cpu' or 'cuda'
"""
if sp.issparse(adj):
adj = sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = torch.FloatTensor(adj)
if sp.issparse(features):
features = sparse_mx_to_torch_sparse_tensor(features)
else:
features = torch.FloatTensor(np.array(features))
if labels is None:
return adj.to(device), features.to(device)
else:
labels = torch.LongTensor(labels)
return adj.to(device), features.to(device), labels.to(device)
def normalize_feature(mx):
"""Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def normalize_adj(mx):
"""Normalize sparse adjacency matrix,
A' = (D + I)^-1/2 * ( A + I ) * (D + I)^-1/2
Row-normalize sparse matrix
Parameters
----------
mx : scipy.sparse.csr_matrix
matrix to be normalized
Returns
-------
scipy.sprase.lil_matrix
normalized matrix
"""
# TODO: maybe using coo format would be better?
if type(mx) is not sp.lil.lil_matrix:
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1/2).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
mx = mx.dot(r_mat_inv)
return mx
def normalize_sparse_tensor(adj, fill_value=1):
"""Normalize sparse tensor. Need to import torch_scatter
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
def add_self_loops(edge_index, edge_weight=None, fill_value=1, num_nodes=None):
# num_nodes = maybe_num_nodes(edge_index, num_nodes)
loop_index = torch.arange(0, num_nodes, dtype=torch.long,
device=edge_index.device)
loop_index = loop_index.unsqueeze(0).repeat(2, 1)
if edge_weight is not None:
assert edge_weight.numel() == edge_index.size(1)
loop_weight = edge_weight.new_full((num_nodes, ), fill_value)
edge_weight = torch.cat([edge_weight, loop_weight], dim=0)
edge_index = torch.cat([edge_index, loop_index], dim=1)
return edge_index, edge_weight
def normalize_adj_tensor(adj, sparse=False):
"""Normalize adjacency tensor matrix.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# warnings.warn('If you find the training process is too slow, you can uncomment line 207 in deeprobust/graph/utils.py. Note that you need to install torch_sparse')
# TODO if this is too slow, uncomment the following code,
# but you need to install torch_scatter
# return normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
mx = mx @ r_mat_inv
return mx
def degree_normalize_adj(mx):
"""Row-normalize sparse matrix"""
mx = mx.tolil()
if mx[0, 0] == 0 :
mx = mx + sp.eye(mx.shape[0])
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
# mx = mx.dot(r_mat_inv)
mx = r_mat_inv.dot(mx)
return mx
def degree_normalize_sparse_tensor(adj, fill_value=1):
"""degree_normalize_sparse_tensor.
"""
edge_index = adj._indices()
edge_weight = adj._values()
num_nodes= adj.size(0)
edge_index, edge_weight = add_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
from torch_scatter import scatter_add
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-1)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
values = deg_inv_sqrt[row] * edge_weight
shape = adj.shape
return torch.sparse.FloatTensor(edge_index, values, shape)
def degree_normalize_adj_tensor(adj, sparse=True):
"""degree_normalize_adj_tensor.
"""
device = torch.device("cuda" if adj.is_cuda else "cpu")
if sparse:
# return degree_normalize_sparse_tensor(adj)
adj = to_scipy(adj)
mx = degree_normalize_adj(adj)
return sparse_mx_to_torch_sparse_tensor(mx).to(device)
else:
mx = adj + torch.eye(adj.shape[0]).to(device)
rowsum = mx.sum(1)
r_inv = rowsum.pow(-1).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv)
mx = r_mat_inv @ mx
return mx
def accuracy(output, labels):
"""Return accuracy of output compared to labels.
Parameters
----------
output : torch.Tensor
output from model
labels : torch.Tensor or numpy.array
node labels
Returns
-------
float
accuracy
"""
if not hasattr(labels, '__len__'):
labels = [labels]
if type(labels) is not torch.Tensor:
labels = torch.LongTensor(labels)
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def loss_acc(output, labels, targets, avg_loss=True):
if type(labels) is not torch.Tensor:
labels = torch.LongTensor(labels)
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()[targets]
loss = F.nll_loss(output[targets], labels[targets], reduction='mean' if avg_loss else 'none')
if avg_loss:
return loss, correct.sum() / len(targets)
return loss, correct
# correct = correct.sum()
# return loss, correct / len(labels)
def classification_margin(output, true_label):
"""Calculate classification margin for outputs.
`probs_true_label - probs_best_second_class`
Parameters
----------
output: torch.Tensor
output vector (1 dimension)
true_label: int
true label for this node
Returns
-------
list
classification margin for this node
"""
probs = torch.exp(output)
probs_true_label = probs[true_label].clone()
probs[true_label] = 0
probs_best_second_class = probs[probs.argmax()]
return (probs_true_label - probs_best_second_class).item()
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
sparserow=torch.LongTensor(sparse_mx.row).unsqueeze(1)
sparsecol=torch.LongTensor(sparse_mx.col).unsqueeze(1)
sparseconcat=torch.cat((sparserow, sparsecol),1)
sparsedata=torch.FloatTensor(sparse_mx.data)
return torch.sparse.FloatTensor(sparseconcat.t(),sparsedata,torch.Size(sparse_mx.shape))
# slower version....
# sparse_mx = sparse_mx.tocoo().astype(np.float32)
# indices = torch.from_numpy(
# np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
# values = torch.from_numpy(sparse_mx.data)
# shape = torch.Size(sparse_mx.shape)
# return torch.sparse.FloatTensor(indices, values, shape)
def to_scipy(tensor):
"""Convert a dense/sparse tensor to scipy matrix"""
if is_sparse_tensor(tensor):
values = tensor._values()
indices = tensor._indices()
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
else:
indices = tensor.nonzero().t()
values = tensor[indices[0], indices[1]]
return sp.csr_matrix((values.cpu().numpy(), indices.cpu().numpy()), shape=tensor.shape)
def is_sparse_tensor(tensor):
"""Check if a tensor is sparse tensor.
Parameters
----------
tensor : torch.Tensor
given tensor
Returns
-------
bool
whether a tensor is sparse tensor
"""
# if hasattr(tensor, 'nnz'):
if tensor.layout == torch.sparse_coo:
return True
else:
return False
def get_train_val_test(nnodes, val_size=0.1, test_size=0.8, stratify=None, seed=None):
"""This setting follows nettack/mettack, where we split the nodes
into 10% training, 10% validation and 80% testing data
Parameters
----------
nnodes : int
number of nodes in total
val_size : float
size of validation set
test_size : float
size of test set
stratify :
data is expected to split in a stratified fashion. So stratify should be labels.
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_val :
node validation indices
idx_test :
node test indices
"""
assert stratify is not None, 'stratify cannot be None!'
if seed is not None:
np.random.seed(seed)
idx = np.arange(nnodes)
train_size = 1 - val_size - test_size
idx_train_and_val, idx_test = train_test_split(idx,
random_state=None,
train_size=train_size + val_size,
test_size=test_size,
stratify=stratify)
if stratify is not None:
stratify = stratify[idx_train_and_val]
idx_train, idx_val = train_test_split(idx_train_and_val,
random_state=None,
train_size=(train_size / (train_size + val_size)),
test_size=(val_size / (train_size + val_size)),
stratify=stratify)
return idx_train, idx_val, idx_test
def get_train_test(nnodes, test_size=0.8, stratify=None, seed=None):
"""This function returns training and test set without validation.
It can be used for settings of different label rates.
Parameters
----------
nnodes : int
number of nodes in total
test_size : float
size of test set
stratify :
data is expected to split in a stratified fashion. So stratify should be labels.
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_test :
node test indices
"""
assert stratify is not None, 'stratify cannot be None!'
if seed is not None:
np.random.seed(seed)
idx = np.arange(nnodes)
train_size = 1 - test_size
idx_train, idx_test = train_test_split(idx, random_state=None,
train_size=train_size,
test_size=test_size,
stratify=stratify)
return idx_train, idx_test
def get_train_val_test_gcn(labels, seed=None):
"""This setting follows gcn, where we randomly sample 20 instances for each class
as training data, 500 instances as validation data, 1000 instances as test data.
Note here we are not using fixed splits. When random seed changes, the splits
will also change.
Parameters
----------
labels : numpy.array
node labels
seed : int or None
random seed
Returns
-------
idx_train :
node training indices
idx_val :
node validation indices
idx_test :
node test indices
"""
if seed is not None:
np.random.seed(seed)
idx = np.arange(len(labels))
nclass = labels.max() + 1
idx_train = []
idx_unlabeled = []
for i in range(nclass):
labels_i = idx[labels==i]
labels_i = np.random.permutation(labels_i)
idx_train = np.hstack((idx_train, labels_i[: 20])).astype(np.int)
idx_unlabeled = np.hstack((idx_unlabeled, labels_i[20: ])).astype(np.int)
idx_unlabeled = np.random.permutation(idx_unlabeled)
idx_val = idx_unlabeled[: 500]
idx_test = idx_unlabeled[500: 1500]
return idx_train, idx_val, idx_test
def get_train_test_labelrate(labels, label_rate):
"""Get train test according to given label rate.
"""
nclass = labels.max() + 1
train_size = int(round(len(labels) * label_rate / nclass))
print("=== train_size = %s ===" % train_size)
idx_train, idx_val, idx_test = get_splits_each_class(labels, train_size=train_size)
return idx_train, idx_test
def get_splits_each_class(labels, train_size):
"""We randomly sample n instances for class, where n = train_size.
"""
idx = np.arange(len(labels))
nclass = labels.max() + 1
idx_train = []
idx_val = []
idx_test = []
for i in range(nclass):
labels_i = idx[labels==i]
labels_i = np.random.permutation(labels_i)
idx_train = np.hstack((idx_train, labels_i[: train_size])).astype(np.int)
idx_val = np.hstack((idx_val, labels_i[train_size: 2*train_size])).astype(np.int)
idx_test = np.hstack((idx_test, labels_i[2*train_size: ])).astype(np.int)
return np.random.permutation(idx_train), np.random.permutation(idx_val), \
np.random.permutation(idx_test)
def unravel_index(index, array_shape):
rows = index // array_shape[1]
cols = index % array_shape[1]
return rows, cols
def get_degree_squence(adj):
try:
return adj.sum(0)
except:
return ts.sum(adj, dim=1).to_dense()
def likelihood_ratio_filter(node_pairs, modified_adjacency, original_adjacency, d_min, threshold=0.004):
"""
Filter the input node pairs based on the likelihood ratio test proposed by Zügner et al. 2018, see
https://dl.acm.org/citation.cfm?id=3220078. In essence, for each node pair return 1 if adding/removing the edge
between the two nodes does not violate the unnoticeability constraint, and return 0 otherwise. Assumes unweighted
and undirected graphs.
"""
N = int(modified_adjacency.shape[0])
# original_degree_sequence = get_degree_squence(original_adjacency)
# current_degree_sequence = get_degree_squence(modified_adjacency)
original_degree_sequence = original_adjacency.sum(0)
current_degree_sequence = modified_adjacency.sum(0)
concat_degree_sequence = torch.cat((current_degree_sequence, original_degree_sequence))
# Compute the log likelihood values of the original, modified, and combined degree sequences.
ll_orig, alpha_orig, n_orig, sum_log_degrees_original = degree_sequence_log_likelihood(original_degree_sequence, d_min)
ll_current, alpha_current, n_current, sum_log_degrees_current = degree_sequence_log_likelihood(current_degree_sequence, d_min)
ll_comb, alpha_comb, n_comb, sum_log_degrees_combined = degree_sequence_log_likelihood(concat_degree_sequence, d_min)
# Compute the log likelihood ratio
current_ratio = -2 * ll_comb + 2 * (ll_orig + ll_current)
# Compute new log likelihood values that would arise if we add/remove the edges corresponding to each node pair.
new_lls, new_alphas, new_ns, new_sum_log_degrees = updated_log_likelihood_for_edge_changes(node_pairs,
modified_adjacency, d_min)
# Combination of the original degree distribution with the distributions corresponding to each node pair.
n_combined = n_orig + new_ns
new_sum_log_degrees_combined = sum_log_degrees_original + new_sum_log_degrees
alpha_combined = compute_alpha(n_combined, new_sum_log_degrees_combined, d_min)
new_ll_combined = compute_log_likelihood(n_combined, alpha_combined, new_sum_log_degrees_combined, d_min)
new_ratios = -2 * new_ll_combined + 2 * (new_lls + ll_orig)
# Allowed edges are only those for which the resulting likelihood ratio measure is < than the threshold
allowed_edges = new_ratios < threshold
if allowed_edges.is_cuda:
filtered_edges = node_pairs[allowed_edges.cpu().numpy().astype(np.bool)]
else:
filtered_edges = node_pairs[allowed_edges.numpy().astype(np.bool)]
allowed_mask = torch.zeros(modified_adjacency.shape)
allowed_mask[filtered_edges.T] = 1
allowed_mask += allowed_mask.t()
return allowed_mask, current_ratio
def degree_sequence_log_likelihood(degree_sequence, d_min):
"""
Compute the (maximum) log likelihood of the Powerlaw distribution fit on a degree distribution.
"""
# Determine which degrees are to be considered, i.e. >= d_min.
D_G = degree_sequence[(degree_sequence >= d_min.item())]
try:
sum_log_degrees = torch.log(D_G).sum()
except:
sum_log_degrees = np.log(D_G).sum()
n = len(D_G)
alpha = compute_alpha(n, sum_log_degrees, d_min)
ll = compute_log_likelihood(n, alpha, sum_log_degrees, d_min)
return ll, alpha, n, sum_log_degrees
def updated_log_likelihood_for_edge_changes(node_pairs, adjacency_matrix, d_min):
""" Adopted from https://github.com/danielzuegner/nettack
"""
# For each node pair find out whether there is an edge or not in the input adjacency matrix.
edge_entries_before = adjacency_matrix[node_pairs.T]
degree_sequence = adjacency_matrix.sum(1)
D_G = degree_sequence[degree_sequence >= d_min.item()]
sum_log_degrees = torch.log(D_G).sum()
n = len(D_G)
deltas = -2 * edge_entries_before + 1
d_edges_before = degree_sequence[node_pairs]
d_edges_after = degree_sequence[node_pairs] + deltas[:, None]
# Sum the log of the degrees after the potential changes which are >= d_min
sum_log_degrees_after, new_n = update_sum_log_degrees(sum_log_degrees, n, d_edges_before, d_edges_after, d_min)
# Updated estimates of the Powerlaw exponents
new_alpha = compute_alpha(new_n, sum_log_degrees_after, d_min)
# Updated log likelihood values for the Powerlaw distributions
new_ll = compute_log_likelihood(new_n, new_alpha, sum_log_degrees_after, d_min)
return new_ll, new_alpha, new_n, sum_log_degrees_after
def update_sum_log_degrees(sum_log_degrees_before, n_old, d_old, d_new, d_min):
# Find out whether the degrees before and after the change are above the threshold d_min.
old_in_range = d_old >= d_min
new_in_range = d_new >= d_min
d_old_in_range = d_old * old_in_range.float()
d_new_in_range = d_new * new_in_range.float()
# Update the sum by subtracting the old values and then adding the updated logs of the degrees.
sum_log_degrees_after = sum_log_degrees_before - (torch.log(torch.clamp(d_old_in_range, min=1))).sum(1) \
+ (torch.log(torch.clamp(d_new_in_range, min=1))).sum(1)
# Update the number of degrees >= d_min
new_n = n_old - (old_in_range!=0).sum(1) + (new_in_range!=0).sum(1)
new_n = new_n.float()
return sum_log_degrees_after, new_n
def compute_alpha(n, sum_log_degrees, d_min):
try:
alpha = 1 + n / (sum_log_degrees - n * torch.log(d_min - 0.5))
except:
alpha = 1 + n / (sum_log_degrees - n * np.log(d_min - 0.5))
return alpha
def compute_log_likelihood(n, alpha, sum_log_degrees, d_min):
# Log likelihood under alpha
try:
ll = n * torch.log(alpha) + n * alpha * torch.log(d_min) + (alpha + 1) * sum_log_degrees
except:
ll = n * np.log(alpha) + n * alpha * np.log(d_min) + (alpha + 1) * sum_log_degrees
return ll
def ravel_multiple_indices(ixs, shape, reverse=False):
"""
"Flattens" multiple 2D input indices into indices on the flattened matrix, similar to np.ravel_multi_index.
Does the same as ravel_index but for multiple indices at once.
Parameters
----------
ixs: array of ints shape (n, 2)
The array of n indices that will be flattened.
shape: list or tuple of ints of length 2
The shape of the corresponding matrix.
Returns
-------
array of n ints between 0 and shape[0]*shape[1]-1
The indices on the flattened matrix corresponding to the 2D input indices.
"""
if reverse:
return ixs[:, 1] * shape[1] + ixs[:, 0]
return ixs[:, 0] * shape[1] + ixs[:, 1]
def visualize(your_var):
"""visualize computation graph"""
from graphviz import Digraph
import torch
from torch.autograd import Variable
from torchviz import make_dot
make_dot(your_var).view()
def reshape_mx(mx, shape):
indices = mx.nonzero()
return sp.csr_matrix((mx.data, (indices[0], indices[1])), shape=shape)
# def check_path(file_path):
# if not osp.exists(file_path):
# os.system(f'mkdir -p {file_path}')
| [
"torch.cat",
"torch.LongTensor",
"torch.eye",
"torch.nn.functional.nll_loss",
"torch.exp",
"torch.Size",
"torch.FloatTensor",
"torch.zeros",
"torch.device",
"torch.clamp",
"torch.isinf",
"torch.log",
"torch.arange",
"torch.sparse.FloatTensor",
"torch.diag",
"torch.sparse.sum"
] | 1.2.0 | CrownX/DeepRobust | 276a7048aded2cf3a190d3851ffd4587b7d1dd49 |
1.10 | import logging
import os
from abc import ABC
from typing import Tuple, Any
import numpy as np
import torch
import torchvision
from pandas import read_csv
from torch.utils.data import Dataset, DataLoader
from torchvision.datasets import CIFAR10, CIFAR100
from torchvision.datasets.folder import pil_loader, accimage_loader
from torchvision.transforms import transforms
from tqdm import tqdm
import configs
from functions.evaluate_roxf import configdataset, DATASETS
from functions.mining import SimpleMemoryBank
from utils.augmentations import GaussianBlurOpenCV
class BaseDataset(Dataset, ABC):
def get_img_paths(self):
raise NotImplementedError
class HashingDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train',
separate_multiclass=False,
ratio=1):
if torchvision.get_image_backend() == 'PIL':
self.loader = pil_loader
else:
self.loader = accimage_loader
self.separate_multiclass = separate_multiclass
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_data = []
self.train_labels = []
self.ratio = ratio
filename = os.path.join(self.root, self.filename)
is_pkl = False
with open(filename, 'r') as f:
while True:
lines = f.readline()
if not lines:
break
path_tmp = lines.split()[0]
label_tmp = lines.split()[1:]
self.is_onehot = len(label_tmp) != 1
if not self.is_onehot:
label_tmp = lines.split()[1]
if self.separate_multiclass:
assert self.is_onehot, 'if multiclass, please use onehot'
nonzero_index = np.nonzero(np.array(label_tmp, dtype=np.int))[0]
for c in nonzero_index:
self.train_data.append(path_tmp)
label_tmp = ['1' if i == c else '0' for i in range(len(label_tmp))]
self.train_labels.append(label_tmp)
else:
self.train_data.append(path_tmp)
self.train_labels.append(label_tmp)
is_pkl = path_tmp.endswith('.pkl') # if save as pkl, pls make sure dont use different style of loading
if is_pkl:
self.loader = torch.load
self.train_data = np.array(self.train_data)
self.train_labels = np.array(self.train_labels, dtype=float)
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.train_data)
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
self.train_data = self.train_data[randidx]
self.train_labels = self.train_labels[randidx]
logging.info(f'Number of data: {self.train_data.shape[0]}')
def filter_classes(self, classes): # only work for single class dataset
new_data = []
new_labels = []
for idx, c in enumerate(classes):
new_onehot = np.zeros(len(classes))
new_onehot[idx] = 1
cmask = self.train_labels.argmax(axis=1) == c
new_data.append(self.train_data[cmask])
new_labels.append(np.repeat([new_onehot], int(np.sum(cmask)), axis=0))
# new_labels.append(self.train_labels[cmask])
self.train_data = np.concatenate(new_data)
self.train_labels = np.concatenate(new_labels)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.train_data[index], self.train_labels[index]
target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.train_data)
def get_img_paths(self):
return self.train_data
class IndexDatasetWrapper(BaseDataset):
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
outs = self.ds.__getitem__(index)
return tuple(list(outs) + [index])
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class Denormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
return tensor
class InstanceDiscriminationDataset(BaseDataset):
def augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.transform(self.to_pil(img))
def weak_augment_image(self, img):
# if use this, please run script with --no-aug and --gpu-mean-transform
return self.weak_transform(self.to_pil(img))
def __init__(self, ds, tmode='simclr', imgsize=224, weak_mode=0) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
if 'simclr' in tmode:
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=int(0.1 * size)),
transforms.ToTensor(),
# 0.2 * 224 = 44 pixels
transforms.RandomErasing(p=0.2, scale=(0.02, 0.2))])
self.transform = data_transforms
# lazy fix, can be more pretty and general, cibhash part 1/2
elif tmode == 'cibhash':
logging.info('CIBHash Augmentations')
s = 0.5
size = imgsize
color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=size, scale=(0.5, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.7),
transforms.RandomGrayscale(p=0.2),
GaussianBlurOpenCV(kernel_size=3),
# GaussianBlur(kernel_size=3),
transforms.ToTensor()])
self.transform = data_transforms
else:
raise ValueError(f'unknown mode {tmode}')
if weak_mode == 1:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.CenterCrop(imgsize),
transforms.ToTensor()
])
elif weak_mode == 2:
logging.info(f'Weak mode {weak_mode} activated.')
self.weak_transform = transforms.Compose([
transforms.Resize(256), # temp lazy hard code
transforms.RandomCrop(imgsize),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
self.weak_mode = weak_mode
self.tmode = tmode
self.imgsize = imgsize
self.to_pil = transforms.ToPILImage()
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# if self.tmode == 'simclr':
# aug_imgs = [img, self.augment_image(img)]
# else:
if self.weak_mode != 0:
aug_imgs = [self.weak_augment_image(img), self.augment_image(img)]
else:
aug_imgs = [self.augment_image(img), self.augment_image(img)]
return torch.stack(aug_imgs, dim=0), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class RotationDataset(BaseDataset):
@staticmethod
def rotate_img(img, rot):
img = np.transpose(img.numpy(), (1, 2, 0))
if rot == 0: # 0 degrees rotation
out = img
elif rot == 90: # 90 degrees rotation
out = np.flipud(np.transpose(img, (1, 0, 2)))
elif rot == 180: # 90 degrees rotation
out = np.fliplr(np.flipud(img))
elif rot == 270: # 270 degrees rotation / or -90
out = np.transpose(np.flipud(img), (1, 0, 2))
else:
raise ValueError('rotation should be 0, 90, 180, or 270 degrees')
return torch.from_numpy(np.transpose(out, (2, 0, 1)).copy())
def __init__(self, ds) -> None:
super(Dataset, self).__init__()
self.__dict__['ds'] = ds
def __setattr__(self, name, value):
setattr(self.ds, name, value)
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
return getattr(self.ds, attr)
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
out = self.ds.__getitem__(index)
img, target = out[:2] # exclude index
# rot_label = np.random.randint(0, 4) # .item()
rot_labels = [0, 1, 2, 3]
rots = [0, 90, 180, 270]
# rots = [0, rots[rot_label]]
rot_imgs = [self.rotate_img(img, rot) for rot in rots]
return torch.stack(rot_imgs, dim=0), torch.tensor(rot_labels), target, index
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
class LandmarkDataset(BaseDataset):
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False, return_id=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
self.return_id = return_id
def get_path(i: str):
return os.path.join(root, self.set_name, i[0], i[1], i[2], i + ".jpg")
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['id'].apply(get_path)
self.max_index = self.df['landmark_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['landmark_id'][index])
else:
target = self.df['landmark_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.return_id:
return img, target, (self.df['id'][index], index)
return img, target
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class SingleIDDataset(BaseDataset):
"""Dataset with only single class ID
To be merge with Landmark"""
def __init__(self, root,
transform=None,
target_transform=None,
filename='train.csv',
onehot=False):
self.loader = pil_loader
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.filename = filename
self.train_labels = []
self.set_name = filename[:-4]
self.onehot = onehot
def get_path(i: str):
return os.path.join(root, "imgs", i)
filename = os.path.join(self.root, self.filename)
self.df = read_csv(filename)
self.df['path'] = self.df['path'].apply(get_path)
self.max_index = self.df['class_id'].max() + 1
logging.info(f'Number of data: {len(self.df)}')
def to_onehot(self, i):
t = torch.zeros(self.max_index)
t[i] = 1
return t
def __getitem__(self, index):
img = self.df['path'][index]
if self.onehot:
target = self.to_onehot(self.df['class_id'][index])
else:
target = self.df['class_id'][index]
# target = torch.tensor(target)
img = self.loader(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index
def __len__(self):
return len(self.df)
def get_img_paths(self):
return self.df['path'].to_numpy()
class ROxfordParisDataset(BaseDataset):
def __init__(self,
dataset='roxford5k',
filename='test.txt',
transform=None,
target_transform=None):
self.loader = pil_loader
self.transform = transform
self.target_transform = target_transform
assert filename in ['test.txt', 'database.txt']
self.set_name = filename
assert dataset in DATASETS
self.cfg = configdataset(dataset, os.path.join('data'))
logging.info(f'Number of data: {self.__len__()}')
def __getitem__(self, index):
if self.set_name == 'database.txt':
img = self.cfg['im_fname'](self.cfg, index)
elif self.set_name == 'test.txt':
img = self.cfg['qim_fname'](self.cfg, index)
img = self.loader(img)
if self.set_name == 'test.txt':
img = img.crop(self.cfg['gnd'][index]['bbx'])
if self.transform is not None:
img = self.transform(img)
return img, index, index # img, None, index is throw error
def __len__(self):
if self.set_name == 'test.txt':
return self.cfg['nq']
elif self.set_name == 'database.txt':
return self.cfg['n']
def get_img_paths(self):
raise NotImplementedError('Not supported.')
class DescriptorDataset(BaseDataset):
def __init__(self, root, filename, ratio=1):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
self.ratio = ratio
if ratio != 1:
assert 0 < ratio < 1, 'data ratio is in between 0 and 1 exclusively'
N = len(self.data_dict['codes'])
randidx = np.arange(N)
np.random.shuffle(randidx)
randidx = randidx[:int(ratio * N)]
for key in self.data_dict:
self.data_dict[key] = self.data_dict[key][randidx]
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
label = self.data_dict['labels'][index] # label is 1 indexed, convert to 0-indexed
return embed, label, index # img, None, index is throw error
def __len__(self):
return len(self.data_dict['codes'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class EmbeddingDataset(BaseDataset):
def __init__(self, root,
filename='train.txt'):
self.data_dict = torch.load(os.path.join(root, filename), map_location=torch.device('cpu'))
self.filename = filename
self.root = root
logging.info(f'Number of data in {filename}: {self.__len__()}')
def __getitem__(self, index):
embed = self.data_dict['codes'][index]
if self.filename == 'train.txt':
label = self.data_dict['labels'][index] - 1 # label is 1 indexed, convert to 0-indexed
else:
label = 0
landmark_id = self.data_dict['id'][index]
return embed, label, (landmark_id, index) # img, None, index is throw error
def __len__(self):
return len(self.data_dict['id'])
def get_img_paths(self):
raise NotImplementedError('Not supported for descriptor dataset. Please try usual Image Dataset if you want to get all image paths.')
class NeighbourDatasetWrapper(BaseDataset):
def __init__(self, ds, model, config) -> None:
super(Dataset, self).__init__()
self.ds = ds
device = config['device']
loader = DataLoader(ds, config['batch_size'],
shuffle=False,
drop_last=False,
num_workers=os.cpu_count())
model.eval()
pbar = tqdm(loader, desc='Obtain Codes', ascii=True, bar_format='{l_bar}{bar:10}{r_bar}',
disable=configs.disable_tqdm)
ret_feats = []
for i, (data, labels, index) in enumerate(pbar):
with torch.no_grad():
data, labels = data.to(device), labels.to(device)
x, code_logits, b = model(data)[:3]
ret_feats.append(x.cpu())
ret_feats = torch.cat(ret_feats)
mbank = SimpleMemoryBank(len(self.ds), model.backbone.in_features, device)
mbank.update(ret_feats)
neighbour_topk = config['dataset_kwargs'].get('neighbour_topk', 5)
indices = mbank.mine_nearest_neighbors(neighbour_topk)
self.indices = indices[:, 1:] # exclude itself
def __getitem__(self, index: int) -> Tuple[Any, Any, Any, Any, Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target, index) where target is index of the target class.
"""
img, target = self.ds.__getitem__(index)
randidx = np.random.choice(self.indices[index], 1)[0]
nbimg, nbtar = self.ds.__getitem__(randidx)
return img, target, index, nbimg, nbtar, randidx
def __len__(self):
return len(self.ds)
def get_img_paths(self):
return self.ds.get_img_paths()
def one_hot(nclass):
def f(index):
index = torch.tensor(int(index)).long()
return torch.nn.functional.one_hot(index, nclass)
return f
def cifar(nclass, **kwargs):
transform = kwargs['transform']
ep = kwargs['evaluation_protocol']
fn = kwargs['filename']
reset = kwargs['reset']
CIFAR = CIFAR10 if int(nclass) == 10 else CIFAR100
traind = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=True, download=True)
traind = IndexDatasetWrapper(traind)
testd = CIFAR(f'data/cifar{nclass}',
transform=transform, target_transform=one_hot(int(nclass)),
train=False, download=True)
testd = IndexDatasetWrapper(testd)
if ep == 2: # using orig train and test
if fn == 'test.txt':
return testd
else: # train.txt and database.txt
return traind
combine_data = np.concatenate([traind.data, testd.data], axis=0)
combine_targets = np.concatenate([traind.targets, testd.targets], axis=0)
path = f'data/cifar{nclass}/0_0_{ep}_{fn}'
load_data = fn == 'train.txt'
load_data = load_data and (reset or not os.path.exists(path))
if not load_data:
logging.info(f'Loading {path}')
data_index = torch.load(path)
else:
train_data_index = []
query_data_index = []
db_data_index = []
data_id = np.arange(combine_data.shape[0]) # [0, 1, ...]
for i in range(nclass):
class_mask = combine_targets == i
index_of_class = data_id[class_mask].copy() # index of the class [2, 10, 656,...]
np.random.shuffle(index_of_class)
if ep == 1:
query_n = 100 # // (nclass // 10)
train_n = 500 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
elif ep == 2: # ep2 = take all data
query_n = 1000 # // (nclass // 10)
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db
elif ep == 3: # Bi-Half Cifar10(II)
query_n = 1000
train_n = 500
index_for_query = index_of_class[:query_n].tolist()
index_for_db = index_of_class[query_n:].tolist()
index_for_train = index_for_db[:train_n]
else:
raise NotImplementedError('')
train_data_index.extend(index_for_train)
query_data_index.extend(index_for_query)
db_data_index.extend(index_for_db)
train_data_index = np.array(train_data_index)
query_data_index = np.array(query_data_index)
db_data_index = np.array(db_data_index)
torch.save(train_data_index, f'data/cifar{nclass}/0_0_{ep}_train.txt')
torch.save(query_data_index, f'data/cifar{nclass}/0_0_{ep}_test.txt')
torch.save(db_data_index, f'data/cifar{nclass}/0_0_{ep}_database.txt')
data_index = {
'train.txt': train_data_index,
'test.txt': query_data_index,
'database.txt': db_data_index
}[fn]
traind.data = combine_data[data_index]
traind.targets = combine_targets[data_index]
return traind
def imagenet100(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/imagenet{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def cars(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/cars', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def landmark(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
return_id = kwargs.get('return_id', False)
d = LandmarkDataset('data/landmark', transform=transform, filename=filename, return_id=return_id)
return d
def nuswide(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
separate_multiclass = kwargs.get('separate_multiclass', False)
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/nuswide_v2_256{suffix}',
transform=transform,
filename=filename,
separate_multiclass=separate_multiclass,
ratio=kwargs.get('ratio', 1))
return d
def nuswide_single(**kwargs):
return nuswide(separate_multiclass=True, **kwargs)
def coco(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/coco{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def roxford5k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='roxford5k', filename=filename, transform=transform)
return d
def rparis6k(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = ROxfordParisDataset(dataset='rparis6k', filename=filename, transform=transform)
return d
def gldv2delgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/gldv2delgembed', filename=filename)
return d
def roxford5kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/roxford5kdelgembed', filename=filename)
return d
def rparis6kdelgembed(**kwargs):
filename = kwargs['filename']
d = EmbeddingDataset('data/rparis6kdelgembed', filename=filename)
return d
def descriptor(**kwargs):
filename = kwargs['filename']
data_folder = kwargs['data_folder']
d = DescriptorDataset(data_folder, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def mirflickr(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/mirflickr{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def sop_instance(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = SingleIDDataset('data/sop_instance', transform=transform, filename=filename)
return d
def sop(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
suffix = kwargs.get('dataset_name_suffix', '')
d = HashingDataset(f'data/sop{suffix}', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
def food101(**kwargs):
transform = kwargs['transform']
filename = kwargs['filename']
d = HashingDataset('data/food-101', transform=transform, filename=filename, ratio=kwargs.get('ratio', 1))
return d
| [
"torch.zeros",
"torch.cat",
"torch.nn.functional.one_hot",
"torch.stack",
"torch.device",
"torch.save",
"torch.no_grad",
"torch.tensor",
"torch.load"
] | 1.10.0 | jiahuei/cisip-FIRe | bcbda2b74dc5a0b26f0338f707a257d660b688a1 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import optim
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.callbacks.finetuning import BackboneFinetuning
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.simple_models import ClassificationModel
def test_lr_monitor_single_lr(tmpdir):
""" Test that learning rates are extracted and logged for single lr scheduler. """
tutils.reset_seed()
model = BoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert all(v is None for v in lr_monitor.last_momentum_values.values()), \
'Momentum should not be logged by default'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['lr-SGD'], \
'Names of learning rates not set correctly'
@pytest.mark.parametrize('opt', ['SGD', 'Adam'])
def test_lr_monitor_single_lr_with_momentum(tmpdir, opt: str):
"""Test that learning rates and momentum are extracted and logged for single lr scheduler."""
class LogMomentumModel(BoringModel):
def __init__(self, opt):
super().__init__()
self.opt = opt
def configure_optimizers(self):
if self.opt == 'SGD':
opt_kwargs = {'momentum': 0.9}
elif self.opt == 'Adam':
opt_kwargs = {'betas': (0.9, 0.999)}
optimizer = getattr(optim, self.opt)(self.parameters(), lr=1e-2, **opt_kwargs)
lr_scheduler = optim.lr_scheduler.OneCycleLR(optimizer, max_lr=1e-2, total_steps=10_000)
return [optimizer], [lr_scheduler]
model = LogMomentumModel(opt=opt)
lr_monitor = LearningRateMonitor(log_momentum=True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert all(v is not None for v in lr_monitor.last_momentum_values.values()), \
'Expected momentum to be logged'
assert len(lr_monitor.last_momentum_values) == len(trainer.lr_schedulers), \
'Number of momentum values logged does not match number of lr schedulers'
assert all(k == f'lr-{opt}-momentum' for k in lr_monitor.last_momentum_values.keys()), \
'Names of momentum values not set correctly'
def test_log_momentum_no_momentum_optimizer(tmpdir):
"""
Test that if optimizer doesn't have momentum then a warning is raised with log_momentum=True.
"""
class LogMomentumModel(BoringModel):
def configure_optimizers(self):
optimizer = optim.ASGD(self.parameters(), lr=1e-2)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = LogMomentumModel()
lr_monitor = LearningRateMonitor(log_momentum=True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=2,
limit_train_batches=5,
log_every_n_steps=1,
callbacks=[lr_monitor],
)
with pytest.warns(RuntimeWarning, match="optimizers do not have momentum."):
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert all(v == 0 for v in lr_monitor.last_momentum_values.values()), \
'Expected momentum to be logged'
assert len(lr_monitor.last_momentum_values) == len(trainer.lr_schedulers), \
'Number of momentum values logged does not match number of lr schedulers'
assert all(k == 'lr-ASGD-momentum' for k in lr_monitor.last_momentum_values.keys()), \
'Names of momentum values not set correctly'
def test_lr_monitor_no_lr_scheduler(tmpdir):
tutils.reset_seed()
class CustomBoringModel(BoringModel):
def configure_optimizers(self):
optimizer = optim.SGD(self.parameters(), lr=0.1)
return optimizer
model = CustomBoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
with pytest.warns(RuntimeWarning, match='have no learning rate schedulers'):
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
def test_lr_monitor_no_logger(tmpdir):
tutils.reset_seed()
model = BoringModel()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
callbacks=[lr_monitor],
logger=False,
)
with pytest.raises(MisconfigurationException, match='`Trainer` that has no logger'):
trainer.fit(model)
@pytest.mark.parametrize("logging_interval", ['step', 'epoch'])
def test_lr_monitor_multi_lrs(tmpdir, logging_interval: str):
""" Test that learning rates are extracted and logged for multi lr schedulers. """
tutils.reset_seed()
class CustomBoringModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def configure_optimizers(self):
optimizer1 = optim.Adam(self.parameters(), lr=1e-2)
optimizer2 = optim.Adam(self.parameters(), lr=1e-2)
lr_scheduler1 = optim.lr_scheduler.StepLR(optimizer1, 1, gamma=0.1)
lr_scheduler2 = optim.lr_scheduler.StepLR(optimizer2, 1, gamma=0.1)
return [optimizer1, optimizer2], [lr_scheduler1, lr_scheduler2]
model = CustomBoringModel()
model.training_epoch_end = None
lr_monitor = LearningRateMonitor(logging_interval=logging_interval)
log_every_n_steps = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
log_every_n_steps=log_every_n_steps,
limit_train_batches=7,
limit_val_batches=0.1,
callbacks=[lr_monitor],
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of lr schedulers'
assert lr_monitor.lr_sch_names == ['lr-Adam', 'lr-Adam-1'], \
'Names of learning rates not set correctly'
if logging_interval == 'step':
expected_number_logged = trainer.global_step // log_every_n_steps
if logging_interval == 'epoch':
expected_number_logged = trainer.max_epochs
assert all(len(lr) == expected_number_logged for lr in lr_monitor.lrs.values()), \
'Length of logged learning rates do not match the expected number'
def test_lr_monitor_param_groups(tmpdir):
""" Test that learning rates are extracted and logged for single lr scheduler. """
tutils.reset_seed()
class CustomClassificationModel(ClassificationModel):
def configure_optimizers(self):
param_groups = [{
'params': list(self.parameters())[:2],
'lr': self.lr * 0.1
}, {
'params': list(self.parameters())[2:],
'lr': self.lr
}]
optimizer = optim.Adam(param_groups)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.1)
return [optimizer], [lr_scheduler]
model = CustomClassificationModel()
dm = ClassifDataModule()
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
)
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert lr_monitor.lrs, 'No learning rates logged'
assert len(lr_monitor.lrs) == 2 * len(trainer.lr_schedulers), \
'Number of learning rates logged does not match number of param groups'
assert lr_monitor.lr_sch_names == ['lr-Adam']
assert list(lr_monitor.lrs.keys()) == ['lr-Adam/pg1', 'lr-Adam/pg2'], \
'Names of learning rates not set correctly'
def test_lr_monitor_custom_name(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer, [scheduler] = super().configure_optimizers()
lr_scheduler = {'scheduler': scheduler, 'name': 'my_logging_name'}
return optimizer, [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=0.1,
limit_train_batches=0.5,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(TestModel())
assert lr_monitor.lr_sch_names == list(lr_monitor.lrs.keys()) == ['my_logging_name']
def test_lr_monitor_custom_pg_name(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD([{'params': list(self.layer.parameters()), 'name': 'linear'}], lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(TestModel())
assert lr_monitor.lr_sch_names == ['lr-SGD']
assert list(lr_monitor.lrs) == ['lr-SGD/linear']
def test_lr_monitor_duplicate_custom_pg_names(tmpdir):
tutils.reset_seed()
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.linear_a = torch.nn.Linear(32, 16)
self.linear_b = torch.nn.Linear(16, 2)
def forward(self, x):
x = self.linear_a(x)
x = self.linear_b(x)
return x
def configure_optimizers(self):
param_groups = [
{
'params': list(self.linear_a.parameters()),
'name': 'linear'
},
{
'params': list(self.linear_b.parameters()),
'name': 'linear'
},
]
optimizer = torch.optim.SGD(param_groups, lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
limit_val_batches=2,
limit_train_batches=2,
callbacks=[lr_monitor],
progress_bar_refresh_rate=0,
weights_summary=None,
)
with pytest.raises(
MisconfigurationException, match='A single `Optimizer` cannot have multiple parameter groups with identical'
):
trainer.fit(TestModel())
def test_multiple_optimizers_basefinetuning(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(
torch.nn.Linear(32, 32),
torch.nn.Linear(32, 32),
torch.nn.Linear(32, 32),
torch.nn.ReLU(True),
)
self.layer = torch.nn.Linear(32, 2)
def training_step(self, batch, batch_idx, optimizer_idx):
return super().training_step(batch, batch_idx)
def forward(self, x):
return self.layer(self.backbone(x))
def configure_optimizers(self):
parameters = list(filter(lambda p: p.requires_grad, self.parameters()))
opt = optim.Adam(parameters, lr=0.1)
opt_2 = optim.Adam(parameters, lr=0.1)
opt_3 = optim.Adam(parameters, lr=0.1)
optimizers = [opt, opt_2, opt_3]
schedulers = [
optim.lr_scheduler.StepLR(opt, step_size=1, gamma=0.5),
optim.lr_scheduler.StepLR(opt_2, step_size=1, gamma=0.5),
]
return optimizers, schedulers
class Check(Callback):
def on_train_epoch_start(self, trainer, pl_module) -> None:
num_param_groups = sum([len(opt.param_groups) for opt in trainer.optimizers])
assert lr_monitor.lr_sch_names == ['lr-Adam', 'lr-Adam-1']
if trainer.current_epoch == 0:
assert num_param_groups == 3
elif trainer.current_epoch == 1:
assert num_param_groups == 4
assert list(lr_monitor.lrs) == ['lr-Adam-1', 'lr-Adam/pg1', 'lr-Adam/pg2']
elif trainer.current_epoch == 2:
assert num_param_groups == 5
assert list(lr_monitor.lrs) == ['lr-Adam/pg1', 'lr-Adam/pg2', 'lr-Adam-1/pg1', 'lr-Adam-1/pg2']
else:
expected = ['lr-Adam/pg1', 'lr-Adam/pg2', 'lr-Adam-1/pg1', 'lr-Adam-1/pg2', 'lr-Adam-1/pg3']
assert list(lr_monitor.lrs) == expected
class TestFinetuning(BackboneFinetuning):
def freeze_before_training(self, pl_module):
self.freeze(pl_module.backbone[0])
self.freeze(pl_module.backbone[1])
self.freeze(pl_module.layer)
def finetune_function(self, pl_module, epoch: int, optimizer, opt_idx: int):
"""Called when the epoch begins."""
if epoch == 1 and opt_idx == 0:
self.unfreeze_and_add_param_group(pl_module.backbone[0], optimizer, lr=0.1)
if epoch == 2 and opt_idx == 1:
self.unfreeze_and_add_param_group(pl_module.layer, optimizer, lr=0.1)
if epoch == 3 and opt_idx == 1:
assert len(optimizer.param_groups) == 2
self.unfreeze_and_add_param_group(pl_module.backbone[1], optimizer, lr=0.1)
assert len(optimizer.param_groups) == 3
lr_monitor = LearningRateMonitor()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=5,
limit_val_batches=0,
limit_train_batches=2,
callbacks=[TestFinetuning(), lr_monitor, Check()],
progress_bar_refresh_rate=0,
weights_summary=None,
checkpoint_callback=False
)
model = TestModel()
model.training_epoch_end = None
trainer.fit(model)
expected = [0.1, 0.05, 0.025, 0.0125, 0.00625]
assert lr_monitor.lrs['lr-Adam/pg1'] == expected
expected = [0.1, 0.05, 0.025, 0.0125]
assert lr_monitor.lrs['lr-Adam/pg2'] == expected
expected = [0.1, 0.05, 0.025, 0.0125, 0.00625]
assert lr_monitor.lrs['lr-Adam-1/pg1'] == expected
expected = [0.1, 0.05, 0.025]
assert lr_monitor.lrs['lr-Adam-1/pg2'] == expected
expected = [0.1, 0.05]
assert lr_monitor.lrs['lr-Adam-1/pg3'] == expected
| [
"torch.nn.Linear",
"torch.optim.lr_scheduler.OneCycleLR",
"torch.optim.lr_scheduler.StepLR",
"torch.optim.Adam",
"torch.optim.SGD",
"torch.nn.ReLU"
] | 1.6 | calebrob6/pytorch-lightning | 4c79b3a5b343866217784c66d122819c59a92c1d |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will generate 2 traces: one for `training_step` and one for `validation_step`.
The traces can be visualized in 2 ways:
* With Chrome:
1. Open Chrome and copy/paste this url: `chrome://tracing/`.
2. Once tracing opens, click on `Load` at the top-right and load one of the generated traces.
* With PyTorch Tensorboard Profiler (Instructions are here: https://github.com/pytorch/kineto/tree/master/tb_plugin)
1. pip install tensorboard torch-tb-profiler
2. tensorboard --logdir={FOLDER}
"""
import sys
import torch
import torchvision
import torchvision.models as models
import torchvision.transforms as T
from pl_examples import _DATASETS_PATH, cli_lightning_logo
from pytorch_lightning import LightningDataModule, LightningModule
from pytorch_lightning.utilities.cli import LightningCLI
DEFAULT_CMD_LINE = (
"--trainer.max_epochs=1",
"--trainer.limit_train_batches=15",
"--trainer.limit_val_batches=15",
"--trainer.profiler=pytorch",
f"--trainer.gpus={int(torch.cuda.is_available())}",
)
class ModelToProfile(LightningModule):
def __init__(self, name: str = "resnet50"):
super().__init__()
self.model = getattr(models, name)(pretrained=True)
self.criterion = torch.nn.CrossEntropyLoss()
def training_step(self, batch, batch_idx):
inputs, labels = batch
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
inputs, labels = batch
outputs = self.model(inputs)
loss = self.criterion(outputs, labels)
self.log("val_loss", loss)
def predict_step(self, batch, batch_idx, dataloader_idx: int = None):
inputs = batch[0]
return self.model(inputs)
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=0.001, momentum=0.9)
class CIFAR10DataModule(LightningDataModule):
transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()])
def train_dataloader(self, *args, **kwargs):
trainset = torchvision.datasets.CIFAR10(
root=_DATASETS_PATH, train=True, download=True, transform=self.transform
)
return torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True, num_workers=0)
def val_dataloader(self, *args, **kwargs):
valset = torchvision.datasets.CIFAR10(root=_DATASETS_PATH, train=False, download=True, transform=self.transform)
return torch.utils.data.DataLoader(valset, batch_size=32, shuffle=True, num_workers=0)
def cli_main():
if len(sys.argv) == 1:
sys.argv += DEFAULT_CMD_LINE
LightningCLI(ModelToProfile, CIFAR10DataModule)
if __name__ == '__main__':
cli_lightning_logo()
cli_main()
| [
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader"
] | 1.6 | calebrob6/pytorch-lightning | 4c79b3a5b343866217784c66d122819c59a92c1d |
1.3 | import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# torch.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
from pyro.distributions import MultivariateNormal, Normal, Independent
from sklearn.cluster import KMeans, SpectralClustering
from sklearn.metrics import adjusted_rand_score
import scipy
from scipy.sparse import csgraph
from scipy.sparse.linalg import eigsh
import sys
sys.path.append('/home/REDACTED/chf-github/model/')
from utils import check_has_missing, quad_function, convert_XY_pack_pad
sys.path.append('../evaluation/')
from eval_utils import get_cluster_swap_metric, get_cluster_pear_metric
sys.path.append('../plot/')
from plot_utils import plot_latent_labels, plot_delta_comp
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
# 'figure.figsize': (10,6),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
class Model(nn.Module):
def __init__(self):
torch.manual_seed(0)
np.random.seed(0)
if torch.cuda.is_available():
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
super(Model, self).__init__()
def forward(self,**kwargs):
raise ValueError('Should be overriden')
def get_masks(self, M):
m_t = ((torch.flip(torch.cumsum(torch.flip(M.sum(-1), (1,)), 1), (1,))>1.)*1)
m_g_t = (m_t.sum(-1)>1)*1.
lens = m_t.sum(-1)
return m_t, m_g_t, lens
def masked_gaussian_nll_3d(self, x, mu, std):
nll = 0.5*np.log(2*np.pi) + torch.log(std)+((mu-x)**2)/(2*std**2)
masked_nll = nll
return masked_nll
def apply_reg(self, p, reg_type='l2'):
if reg_type == 'l1':
return torch.sum(torch.abs(p))
elif reg_type=='l2':
return torch.sum(p.pow(2))
else:
raise ValueError('bad reg')
def fit(self, train_loader, valid_loader, epochs, lr, eval_freq=1, print_freq=1000, anneal = False, fname = None, verbose=False, plot_debug=False, epoch_debug=False):
if verbose:
eval_freq = 50
opt = torch.optim.Adam(self.parameters(), lr=lr, eps=1e-3)
best_nelbo, best_nll, best_kl, best_ep = 100000, 100000, 100000, -1
training_loss = list()
training_epochs = list()
testing_loss = list()
testing_epochs = list()
test_ari_vals = list()
test_mse_vals = list()
test_pear_vals = list()
test_swaps_vals = list()
train_nelbo = list()
train_nll = list()
train_kl = list()
test_nelbo = list()
test_nll = list()
test_kl = list()
train_likelihood = list()
test_likelihood = list()
train_affinity_num_clusters = list()
test_affinity_num_clusters = list()
if fname is not None:
logging.basicConfig(
filename=fname[:-4]+'_loss.log', filemode='w',
format='%(asctime)s - %(levelname)s \t %(message)s',
level=logging.INFO)
if anneal:
anneal = 0.01
# print ('With annealing starting at ',anneal)
else:
# print ('No annealing')
anneal = 1.
# TODO: consider caching the convert_XY_pad content because it's the bulk of the computation?
"""
if check_has_missing(X) or check_has_missing(Y):
has_missing = True
else:
has_missing = False
XY = concat(X,Y)
newXY, all_seq_lengths = convert_XY_pack_pad(XY)
"""
Y, S, X, M, T = [i for i in train_loader][0]
has_missing = False
newXY = None
all_seq_lengths = None
if check_has_missing(X) or check_has_missing(Y):
has_missing = True
XY = torch.cat([X,Y], axis=2)
newXY, all_seq_lengths = convert_XY_pack_pad(XY, how=self.how_missing)
else:
has_missing = False
# now validation
val_Y, val_S, val_X, val_M, val_T = [i for i in valid_loader][0]
val_has_missing = False
val_newXY = None
val_all_seq_lengths = None
if check_has_missing(val_X) or check_has_missing(val_Y):
val_has_missing = True
val_XY = torch.cat([val_X,val_Y], axis=2)
val_newXY, val_all_seq_lengths = convert_XY_pack_pad(val_XY, how=self.how_missing)
else:
val_has_missing = False
for epoch in range(1, epochs+1):
anneal = min(1, epoch/(epochs*0.5))
self.train()
batch_loss = 0
test_batch_loss = 0
idx = 0
test_idx = 0
for data_tuples in train_loader:
opt.zero_grad()
# if epoch == 3:
(nelbo, nll, kl), loss = self.forward(*data_tuples, anneal = anneal,
has_missing=has_missing,XY=newXY,
all_seq_lengths=all_seq_lengths)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
if epoch_debug:
train_nelbo.append(nelbo)
train_nll.append(nll)
train_kl.append(kl)
# from torch.autograd import grad
# grad(loss, model.debug['y_out'], only_inputs=True)
# grad(loss, model.debug['rnn'], only_inputs=True)
# grad(loss, model.debug['enc_h_mu'], only_inputs=True)
loss.backward()
opt.step()
idx +=1
batch_loss += loss.item()
cur_mse = batch_loss/float(idx)
training_loss.append(cur_mse)
if epoch%eval_freq==0:
self.eval()
(nelbo, nll, kl), eval_loss = self.forward(*valid_loader.dataset.tensors, anneal = 1.,
has_missing=val_has_missing,XY=val_newXY,
all_seq_lengths=val_all_seq_lengths)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
if nelbo<best_nelbo:
best_nelbo = nelbo; best_nll = nll; best_kl = kl; best_ep = epoch
if fname is not None:
torch.save(self.state_dict(), fname)
if epoch_debug:
test_nelbo.append(nelbo)
test_nll.append(nll)
test_kl.append(kl)
# if kl < 0.:
# print('%.3f' % kl,)
train_Y, train_S, train_X, train_M, train_T = train_loader.dataset.tensors
test_Y, test_S, test_X, test_M, test_T = valid_loader.dataset.tensors
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
train_z, _ = self.get_mu(train_X,train_Y)
train_z = train_z.detach().numpy()
# likelihood = self.imp_sampling(train_X, train_Y)
# train_likelihood.append(likelihood)
# for different cluster algs, plot labels and true subtypes
K = 2
km = KMeans(n_clusters=K)
km.fit(train_z)
self.subtypes_km = km
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X, test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_S)]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
test_swaps = get_cluster_swap_metric(test_clusters, test_T[:,0,0].detach().numpy(), best_delta.detach().numpy())
test_pear = get_cluster_pear_metric(test_clusters, test_T[:,0,0].detach().numpy(), best_delta.detach().numpy())
test_ari_vals.append(test_ari)
test_mse_vals.append(test_mse)
test_swaps_vals.append(test_swaps)
test_pear_vals.append(test_pear)
test_batch_loss += eval_loss.item()
test_idx += 1
testing_loss.append(test_batch_loss/float(test_idx))
likelihood = self.imp_sampling(train_X, train_Y, imp_samples=50)
train_likelihood.append(likelihood)
likelihood = self.imp_sampling(test_X, test_Y, imp_samples=50)
test_likelihood.append(likelihood)
if plot_debug:
train_Y, train_S, train_X, train_M, train_T = train_loader.dataset.tensors
test_Y, test_S, test_X, test_M, test_T = valid_loader.dataset.tensors
train_z, _ = self.get_mu(train_X,train_Y)
train_z = train_z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
K = 2
km = KMeans(n_clusters=K)
km.fit(train_z)
self.subtypes_km = km
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X, test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_S)]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
plot_latent_labels(test_z, test_S, 'plots/pngs/lr_%.3f_%03d_latent.png' % (lr, epoch), title='Epoch %d, ARI: %.3f' % (epoch, test_ari))
plot_delta_comp(test_T[:,0,0].detach().numpy(), best_delta.detach().numpy(), 'plots/pngs/lr_%.3f_%03d_delta.png' % (lr, epoch), title='Epoch %d, Pear: %.3f' % (epoch, test_pear))
self.train()
# print ('Best NELBO:%.3f, NLL:%.3f, KL:%.3f@ epoch %d'%(best_nelbo, best_nll, best_kl, best_ep))
self.best_nelbo = best_nelbo
self.best_nll = best_nll
self.best_kl = best_kl
self.best_ep = best_ep
if fname is not None and epochs > eval_freq:
print('loaded state_dict. nelbo: %.4f (ep %d)' % (best_nelbo, best_ep))
self.load_state_dict(torch.load(fname))
self.eval()
self.training_loss = training_loss
self.testing_loss = testing_loss
if plot_debug:
import os
import imageio
png_dir = 'plots/pngs/'
kargs = {'duration': 0.3}
images = []
for file_name in sorted(os.listdir(png_dir)):
if file_name.endswith('_latent.png'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('plots/data%d_latent_%.3f.gif' % (self.data_num, lr), images, **kargs)
images = []
for file_name in sorted(os.listdir(png_dir)):
if file_name.endswith('_delta.png'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('plots/data%d_delta_%.3f.gif' % (self.data_num, lr), images, **kargs)
# delete everything when you're done
for file_name in os.listdir(png_dir):
root = os.getcwd()
complete_fname = os.path.join(root, png_dir+file_name)
if not os.path.isdir(complete_fname):
os.unlink(complete_fname)
if epoch_debug:
import pickle
f = open('data%d_results_lr%.3f.pk' % (self.data_num, lr), 'wb')
results = {'epochs': epochs,
'eval_freq': eval_freq,
'ari': test_ari_vals,
'mse': test_mse_vals,
'swaps': test_swaps_vals,
'pear': test_pear_vals,
'train_likelihood': train_likelihood,
'test_likelihood': test_likelihood,
'train_loss': training_loss,
'test_loss': testing_loss,
'best_nelbo': best_nelbo,
'best_nll': best_nll,
'best_kl': best_kl,
'best_ep': best_ep,
'train_nelbo': train_nelbo,
'train_nll': train_nll,
'train_kl': train_kl,
'test_nelbo': test_nelbo,
'test_nll': test_nll,
'test_kl': test_kl,
# 'train_affinity_num_clusters': train_affinity_num_clusters,
# 'test_affinity_num_clusters': test_affinity_num_clusters,
'train_M_sum': train_M.sum(),
'test_M_sum': test_M.sum()
}
pickle.dump(results, f)
f.close()
return best_nelbo, best_nll, best_kl, best_ep
class TwoLayer(nn.Module):
def __init__(self,dim_input, dim_inner, dim_output):
super(TwoLayer, self).__init__()
self.fc1 = nn.Linear(dim_input,dim_inner)
self.fc2 = nn.Linear(dim_inner,dim_output)
def forward(self, x):
x = self.fc2(F.relu(self.fc1(x)))
return x
class Sublign(Model):
def __init__(self, dim_stochastic, dim_hidden, dim_rnn, C=0.0, dim_biomarkers=3,
reg_type = 'l2', sigmoid=True, learn_time=True, auto_delta=True, max_delta=10.,
plot_debug=False, epoch_debug=False, beta=0.001, device='cpu',
how_missing='linear'):
"""
note no lr here. lr is in fit.
"""
super(Sublign, self).__init__()
self.dim_stochastic = dim_stochastic
self.dim_hidden = dim_hidden
self.dim_rnn = dim_rnn
self.n_biomarkers = dim_biomarkers
self.C = C
self.reg_type = reg_type
self.sigmoid = sigmoid
self.dz_features = self.dim_stochastic
rnn_input_size = self.n_biomarkers + 1
self.subtypes_km = None
self.rnn = nn.RNN(rnn_input_size, self.dim_rnn, 1, batch_first = True)
self.enc_h_mu = nn.Linear(self.dim_rnn, self.dim_stochastic)
self.enc_h_sig = nn.Linear(self.dim_rnn, self.dim_stochastic)
self.how_missing = how_missing
# initialize functions theta = g1(z)
if self.sigmoid:
self.dec_z_beta0 = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_beta1 = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
else:
self.dec_z_a = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_b = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
self.dec_z_c = TwoLayer(self.dz_features, self.dim_hidden, self.n_biomarkers)
# experiments for delta
if auto_delta:
self.max_delta = 10.
self.auto_delta = True
self.learn_time = True
elif learn_time:
self.max_delta = max_delta
self.auto_delta = False
self.learn_time = True
else:
self.max_delta = 0.
self.auto_delta = False
self.learn_time = False
if not learn_time:
self.learn_time = False
self.max_delta = 0.
self.auto_delta = False
self.N_delta_bins = 50
if device == 'cpu':
self.device = torch.device('cpu')
else:
self.device = torch.device('cuda')
self.debug = {}
self.beta = beta
self.data_num = 1
def get_delta_options(self, Xvals):
# output delta_options is tensor size N_patients, N_delta_bins
N_patients = Xvals.shape[0]
if self.auto_delta:
max_time_patient = Xvals.max(axis=1).values
max_time_all = max_time_patient.max()
max_delta_patient = max_time_all - max_time_patient
delta_options = torch.zeros(N_patients,self.N_delta_bins).to(self.device)
for i in range(N_patients):
delta_options[i] = torch.linspace(0,max_delta_patient[i,0],self.N_delta_bins)
return delta_options
else:
delta_options = torch.linspace(0, self.max_delta, self.N_delta_bins)
return delta_options[None,:].repeat(N_patients, 1).to(self.device)
def calc_loss_per_delta(self, X, Y, M, theta, delta_options, kl):
"""
input:
- X (N_patients, N_visits, 1)
- Y (N_patients, N_visits, N_biomarkers)
- theta (N_patients, N_biomarkers each component)
- delta_options (N_patients, N_delta_bins)
output:
- loss_per_patient (N_patients, N_delta_bins)
step 1: convert everything to size N_patients, N_visits, N_biomarkers, N_delta_bins
step 2: calculate loss yhat = f(x+delta; theta)
"""
N_patients, N_visits, N_biomarkers = Y.shape
X_repeat = X[:,:,:,None].repeat(1,1,N_biomarkers,self.N_delta_bins)
Y_repeat = Y[:,:,:,None].repeat(1,1,1,self.N_delta_bins)
delta_opt_repeat = delta_options[:,None,None,:].repeat(1,N_visits,N_biomarkers,1)
if self.sigmoid:
beta0 = theta[0][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
beta1 = theta[1][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
sig_input = X_repeat + delta_opt_repeat
mm = torch.nn.Sigmoid()
mm_input = (beta0 + beta1 * sig_input).to(self.device)
yhat = mm(mm_input)
else:
a = theta[0][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
b = theta[1][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
c = theta[2][:,None,:,None].repeat(1,N_visits,1,self.N_delta_bins)
quad_input = X_repeat + delta_opt_repeat
yhat = quad_function(a,b,c,quad_input)
kl_repeat = kl[:,None].repeat(1,self.N_delta_bins)
loss = ((yhat - Y_repeat)**2)
M_repeat = M[:,:,:,None].repeat(1,1,1,self.N_delta_bins)
loss = loss.masked_fill(M_repeat == 0., 0.)
loss_sum = loss.sum(axis=1).sum(axis=1)
delta_term = torch.log(torch.ones_like(loss_sum) / self.N_delta_bins).to(self.device)
kl_repeat = kl_repeat.to(self.device)
return loss_sum + self.beta*kl_repeat + delta_term
def get_best_delta(self, X,Y,M,theta, kl):
"""
output: best_delta is size N_patients
step 1: if subnolign, return 0.
step 2: get all the delta options
step 3: calculate loss for each option
step 4: find best delta option
note that z could be either from sample or get_mu so not included here
"""
# TODO: interpolate X and Y if they're missing
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
N = X.shape[0]
if not self.learn_time:
return torch.zeros(N)
delta_options = self.get_delta_options(X)
loss_per_delta = self.calc_loss_per_delta(X,Y,M,theta, delta_options, kl)
min_delta = loss_per_delta.min(axis=1).indices
best_delta = torch.zeros(N).to(self.device)
for i in range(N):
best_delta[i] = delta_options[i][min_delta[i]]
return best_delta
def predict_Y(self, X,Y,theta,delta):
"""
input:
- X (N_patients, N_visits, 1)
- Y (N_patients, N_visits, N_biomarkers)
- theta (N_patients, N_biomarkers each component)
- delta (N_patients)
output:
- yhat (N_patients, N_visits, N_biomarkers)
step 1: convert everything to size N_patients, N_visits, N_biomarkers
step 2: calculate loss yhat = f(x+delta; theta)
"""
N_patients, N_visits, N_biomarkers = Y.shape
X_repeat = X.repeat(1,1,N_biomarkers)
delta_rep = delta[:,None,None].repeat(1,N_visits,N_biomarkers)
if self.sigmoid:
beta0 = theta[0][:,None,:].repeat(1,N_visits,1)
beta1 = theta[1][:,None,:].repeat(1,N_visits,1)
sig_input = X_repeat + delta_rep
mm = torch.nn.Sigmoid()
mm_input = (beta0 + beta1 * sig_input).to(self.device)
yhat = mm(mm_input)
else:
a = theta[0][:,None,:].repeat(1,N_visits,1)
b = theta[1][:,None,:].repeat(1,N_visits,1)
c = theta[2][:,None,:].repeat(1,N_visits,1)
quad_input = X_repeat + delta_rep
yhat = quad_function(a,b,c,quad_input)
return yhat
def get_loss(self, Y, S, X, M, anneal=1.,
XY=None,all_seq_lengths=None, has_missing=False):
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
z, kl = self.sample(X,Y,XY=XY,
all_seq_lengths=all_seq_lengths, has_missing=has_missing)
theta = self.infer_functional_params(z)
with torch.no_grad():
best_delta = self.get_best_delta(X,Y,M,theta, kl)
yhat = self.predict_Y(X,Y,theta,best_delta)
self.debug['y_out'] = yhat
squared = (Y - yhat)**2
# mask out originally missing values
squared = squared.masked_fill(M == 0., 0)
nll = squared.sum(-1).sum(-1)
delta_term = torch.log(torch.ones_like(nll) / self.N_delta_bins)
# nelbo = nll + self.beta*anneal*kl + delta_term
nelbo = nll + self.beta*anneal*kl
return nelbo, nll, kl
def forward(self, Y, S, X, M, T, anneal = 1.,
XY=None,all_seq_lengths=None, has_missing=False):
if type(M) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
if XY is None and (check_has_missing(X) or check_has_missing(Y)):
has_missing = True
XY = torch.cat([X,Y], axis=2)
newXY, all_seq_lengths = convert_XY_pack_pad(XY, how=self.how_missing)
else:
has_missing = False
(nelbo, nll, kl) = self.get_loss(Y, S, X, M, anneal = anneal, XY=XY,all_seq_lengths=all_seq_lengths, has_missing=has_missing)
reg_loss = nelbo
for name,param in self.named_parameters():
reg_loss += self.C*self.apply_reg(param, reg_type=self.reg_type)
normalizer = torch.sum(M)
norm_nelbo = (torch.sum(nelbo) / normalizer)
norm_nll = (torch.sum(nll)/normalizer)
norm_kl = torch.mean(kl)
norm_reg = torch.sum(reg_loss) / normalizer
return (norm_nelbo, norm_nll, norm_kl), norm_reg
def sample(self, X,Y,mu_std=False,XY=None,all_seq_lengths=None, has_missing=False):
"""
Returns z and KL sampled from observed X,Y
"""
cacheXY = XY
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
XY = torch.cat([X,Y], axis=2)
# import pdb; pdb.set_trace()
if has_missing:
# batch_in, sequences = convert_XY_pack_pad(XY,how=self.how_missing)
pack = torch.nn.utils.rnn.pack_padded_sequence(cacheXY, all_seq_lengths, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
elif check_has_missing(XY):
batch_in, sequences = convert_XY_pack_pad(XY,how=self.how_missing)
pack = torch.nn.utils.rnn.pack_padded_sequence(cacheXY, all_seq_lengths, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
else:
_, hidden = self.rnn(XY)
self.debug['rnn'] = hidden
hid = torch.squeeze(hidden)
hid = hid.to(self.device)
# idx contains list of indices representing the current datapoints in X
# mu_param is a pytorch tensor (randomly initialized) of size N x dimensionality of latent space
# gamma = 1 (learning w/ inf. network) or 0. (learning w/ svi)
mu_table = mu_param[idx]
mu_enc = self.enc_h_mu(hid)
mu = gamma*mu_enc+(1-gamma)*mu_table
sig = torch.exp(self.enc_h_sig(hid))
q_dist = Independent(Normal(mu, sig), 1)
z = torch.squeeze(q_dist.rsample((1,)))
p_dist = Independent(Normal(torch.zeros_like(mu), torch.ones_like(sig)), 1)
kl = q_dist.log_prob(z)-p_dist.log_prob(z)
self.debug['hid'] = hid
self.debug['kl'] = kl
self.debug['mu'] = mu
self.debug['sig'] = sig
if mu_std:
return z, kl, mu
else:
return z, kl
def get_mu(self, X,Y):
N = X.shape[0]
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
XY = torch.cat([X,Y], axis=2)
if check_has_missing(XY):
batch_in, sequences = convert_XY_pack_pad(XY)
pack = torch.nn.utils.rnn.pack_padded_sequence(batch_in, sequences, batch_first=True, enforce_sorted=False)
_, hidden = self.rnn(pack)
else:
_, hidden = self.rnn(XY)
hid = torch.squeeze(hidden)
mu = self.enc_h_mu(hid)
return mu, torch.zeros(N)
def infer_functional_params(self, z):
if self.sigmoid:
return [self.dec_z_beta0(z), self.dec_z_beta1(z)]
else:
return [self.dec_z_a(z), self.dec_z_b(z), self.dec_z_c(z)]
def get_subtypes(self, X, Y, K=2):
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy()
else:
z = z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
if np.isnan(z).any():
print('z has nan in it')
import pdb; pdb.set_trace()
km.fit(z)
self.subtypes_km = km
z_mus = km.cluster_centers_
N_dims = Y.shape[2]
if self.sigmoid:
cent_lst = np.zeros((K,N_dims,2))
else:
cent_lst = np.zeros((K,N_dims,3))
for k_ix in range(K):
z_mu = z_mus[k_ix]
z_mu = torch.tensor(z_mu[None,:]).to(self.device)
theta = self.infer_functional_params(z_mu)
if theta[0].get_device() > -1:
theta = [t.cpu().detach().numpy() for t in theta]
else:
theta = [t.detach().numpy() for t in theta]
for param_i, param_component in enumerate(theta):
for dim_i, dim_val in enumerate(param_component[0]):
cent_lst[k_ix,dim_i,param_i] = dim_val
return cent_lst
def get_param_subtypes(self, X, Y, K=2):
"""
step 1: get z using mu not sampling
step 2: K-means cluster these z and save centers
step 3: return theta_k = g1(z_k) for K clusters
"""
params = self.get_params(X,Y)
pdb
z = z.detach().numpy()
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
km.fit(z)
self.subtypes_km = km
z_mus = km.cluster_centers_
cent_lst = list()
for k_ix in range(K):
z_mu = z_mus[k_ix]
z_mu = torch.tensor(z_mu[None,:]).to(self.device)
theta = self.infer_functional_params(z_mu)
theta = [t.detach().numpy() for t in theta]
cent_lst.append(theta)
return cent_lst
def get_params(self, X, Y):
"""
different from get_subtypes because now there is one theta per person
NOT num subtypes
"""
z, _ = self.get_mu(X,Y)
# z = z.detach().numpy()
if self.sigmoid:
return [self.dec_z_beta0(z), self.dec_z_beta1(z)]
else:
return [self.dec_z_a(z), self.dec_z_b(z), self.dec_z_c(z)]
def get_labels(self, data_dict):
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy()
else:
z = z.detach().numpy()
labels = self.subtypes_km.predict(z)
return labels
def get_deltas(self, data_dict):
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
M = torch.tensor(data_dict['mask_collect']).to(self.device)
z, kl = self.get_mu(X,Y)
theta = self.infer_functional_params(z)
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
M = torch.tensor(M).to(self.device)
best_delta = self.get_best_delta(X,Y,M,theta, kl)
return best_delta
def get_mse(self,X,Y,M,theta,best_delta):
yhat = self.predict_Y(X,Y,theta,best_delta)
squared = (Y - yhat)**2
nll = squared.sum(-1).sum(-1)
normsum = torch.sum(M)
return torch.sum(nll) / normsum
def score(self, train_data_dict, test_data_dict, K=2):
"""
step 1: get delta
step 2: get subtype assignments
step 3: get performance metrics
"""
for col in ['Y_collect', 'obs_t_collect', 's_collect', 't_collect']:
if col not in test_data_dict:
print('ERROR: %s not in test_data_dict' % col)
return
cent_lst = self.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=K)
test_X = torch.tensor(test_data_dict['obs_t_collect']).to(self.device)
test_Y = torch.tensor(test_data_dict['Y_collect']).to(self.device)
test_M = torch.tensor(test_data_dict['mask_collect']).to(self.device)
test_z, kl = self.get_mu(test_X,test_Y)
test_theta = self.infer_functional_params(test_z)
best_delta = self.get_best_delta(test_X,test_Y, test_M, test_theta, kl)
test_z = test_z.detach().numpy()
test_clusters = self.subtypes_km.predict(test_z)
true_clusters = [int(i) for i in np.squeeze(test_data_dict['s_collect'])]
test_M = torch.ones_like(test_X)
test_mse = self.get_mse(test_X, test_Y, test_M, test_theta, best_delta)
test_ari = adjusted_rand_score(test_clusters, true_clusters)
test_swaps = get_cluster_swap_metric(test_clusters, test_data_dict['t_collect'][:,0,0], best_delta.detach().numpy())
test_pear = get_cluster_pear_metric(test_clusters, test_data_dict['t_collect'][:,0,0], best_delta.detach().numpy())
results = {
'mse': test_mse,
'ari': test_ari,
'swaps': test_swaps,
'pear': test_pear,
'cent_lst': cent_lst
}
return results
def imp_sampling(self, X, Y, imp_samples=10, delta_gran = 20):
delta_gran = self.N_delta_bins
if type(X) == np.ndarray:
X = torch.tensor(X).to(self.device)
Y = torch.tensor(Y).to(self.device)
ll_estimates = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
ll_priors = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
ll_posteriors = torch.zeros((imp_samples,delta_gran,X.shape[0])).to(X.device)
# TODO: fix this
N_latent_dim = self.dz_features
mu_prior, std_prior = torch.zeros(N_latent_dim), torch.ones(N_latent_dim)
M = torch.ones_like(Y)
for sample in range(imp_samples):
z, kl, qz_mu = self.sample(X,Y,mu_std=True)
qz_sig = torch.ones(N_latent_dim)
theta = self.infer_functional_params(z)
ll_estimate_list, ll_posterior_list, ll_prior_list = [],[],[]
for dval in np.linspace(0,5,delta_gran):
best_delta = self.get_best_delta(X,Y,M,theta, kl)
dval = best_delta*0.+dval
#print (best_delta.shape, dval)
#best_delta = dval
yhat = self.predict_Y(X,Y,theta,best_delta)
nll = (yhat - Y) ** 2
ll_estimate_list.append(-1*nll.sum(-1).sum(-1))
ll_prior_list.append((-1*self.masked_gaussian_nll_3d(z, mu_prior, std_prior)).sum(-1))
ll_posterior_list.append((-1*self.masked_gaussian_nll_3d(z, qz_mu, qz_sig)).sum(-1))
ll_priors[sample] = torch.stack(ll_prior_list)
ll_estimates[sample] = torch.stack(ll_estimate_list)
ll_posteriors[sample] = torch.stack(ll_posterior_list)
nll_estimate = -1*(torch.logsumexp(ll_estimates.view(imp_samples*delta_gran,-1) + ll_priors.view(imp_samples*delta_gran,-1) - ll_posteriors.view(imp_samples*delta_gran,-1), dim=0) - np.log(imp_samples*delta_gran))
log_p = torch.mean(nll_estimate)
return log_p
def get_subtypes_datadict(self, data_dict, K=2):
"""
assumes you've already fit a model
"""
X = torch.tensor(data_dict['obs_t_collect']).to(self.device)
Y = torch.tensor(data_dict['Y_collect']).to(self.device)
M = torch.tensor(data_dict['mask_collect']).to(self.device)
z, _ = self.get_mu(X,Y)
if z.get_device() > -1:
z = z.cpu().detach().numpy().copy()
else:
z = z.detach().numpy().copy()
if self.subtypes_km is None:
# for different cluster algs, plot labels and true subtypes
km = KMeans(n_clusters=K)
km.fit(z)
self.subtypes_km = km
labels = self.subtypes_km.predict(z)
return labels
def get_hyperparameters(data_format_num):
if data_format_num < 3:
C, ds, dh, drnn, reg_type, lr = 0., 10, 20, 50, 'l1', 0.01
if data_format_num == 5 or data_format_num == 3:
C, ds, dh, drnn, reg_type, lr = 0.01, 20, 20, 100, 'l2', 0.01
# if data_format_num == 4:
# C, ds, dh, drnn, reg_type, lr = 0.0, 30, 10, 50, 'l1', 0.001
if data_format_num == 1:
C, ds, dh, drnn, reg_type, lr = 0.0, 20, 30, 150, 'l1', 0.001
# C, ds, dh, drnn, reg_type, lr = 0.0, 20, 20, 100, 'l1', 0.001
if data_format_num == 11:
C, ds, dh, drnn, reg_type, lr = 0.0, 20, 30, 150, 'l1', 0.001
elif data_format_num > 2:
C, ds, dh, drnn, reg_type, lr = 0., 20, 50, 100, 'l1', 0.01
return C, ds, dh, drnn, reg_type, lr
def main():
import argparse
import os
import sys
sys.path.append('../data')
sys.path.append('../plot')
from load import sigmoid, quadratic, chf, parkinsons, load_data_format
from data_utils import parse_data, change_missing
from plot_utils import plot_subtypes, plot_latent
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', action='store', type=int, default=800, help="Number of epochs")
parser.add_argument('--trials', action='store', type=int, default=1, help="Number of trials")
parser.add_argument('--model_name', action='store', type=str, default='SubLign', help="Model name for Latex table making")
# datasets
parser.add_argument('--data_num', action='store', type=int, help="Data Format Number")
parser.add_argument('--chf', action='store_true', help="Use CHF dataset")
parser.add_argument('--ppmi', action='store_true', help="Use PPMI dataset")
# delta setup
# parser.add_argument('--auto_delta', action='store_true', help="Learn delta dynamically for each patient")
parser.add_argument('--max_delta', action='store', type=float, help="Maximum possible delta")
parser.add_argument('--no_time', action='store_true', help="Learn time at all")
# debugging
parser.add_argument('--verbose', action='store_true', help="Plot everything")
parser.add_argument('--missing', action='store', type=float, default=0., help="What percent of data to make missing")
parser.add_argument('--plot_debug', action='store_true', help="Make animated gif about alignment / clusterings over epochs")
parser.add_argument('--epoch_debug', action='store_true', help="Save pickle about epoch differences over training")
parser.add_argument('--likelihood', action='store_true', help="Print likelihood")
parser.add_argument('--lr', action='store', type=float, help="Learning rate override")
parser.add_argument('--eval_freq', action='store', type=int, help="Make this larger than epochs for faster results", default=25)
# other experiments
args = parser.parse_args()
trial_results = np.zeros((args.trials, 4))
data_format_num = args.data_num
if args.max_delta is None:
auto_delta = True
else:
auto_delta = False
for trial_num in range(args.trials):
# datasets
if data_format_num is not None:
max_visits = 4
num_output_dims = 3 if data_format_num < 3 else 1
use_sigmoid = data_format_num < 3
if data_format_num > 10:
use_sigmoid = True
num_output_dims = 3
C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
if args.lr != None:
print('Learning rate: %.3f' % args.lr)
lr = args.lr
data = load_data_format(data_format_num, trial_num, cache=True)
shuffle = False
elif args.chf:
print('HERE2')
data = chf()
max_visits = 38
shuffle = True
elif args.ppmi:
data = parkinsons()
max_visits = 17
shuffle = True
# data = data[data['subtype'] == 1]
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=max_visits, test_per=0.2, valid_per=0.2, shuffle=shuffle)
# train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids = parse_data(data.values, max_visits=max_visits, test_per=0.2, shuffle=shuffle)
# pickle.dump((train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids), open('../synthetic_runs/data.pk', 'wb'))
# import pickle
# train_data_loader, train_data_dict, test_data_loader, test_data_dict, p_ids, full_p_ids = pickle.load(open('../synthetic_runs/data.pk', 'rb'))
if args.missing > 0.:
train_data_loader, train_data_dict = change_missing(train_data_dict, args.missing)
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits)
"""
best parmas found through hypertuning (cross_validation/hpsearch.py)
# sigmoid: C (0.01), dim_h (20), ds (10 mid), dim_rnn (50 mid), reg_type (l1), lr (0.1)
# quad: C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
ppmi: (0.0, 10, 10, 50, 'l1', 0.1)
"""
# dim_stochastic, dim_hidden, dim_rnn, C, dim_biomarkers=3, reg_type = 'l2',
if data_format_num is not None:
model = Sublign(d_s, d_h, d_rnn, C, num_output_dims, sigmoid=use_sigmoid, reg_type=reg_type, auto_delta=auto_delta, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(train_data_loader, test_data_loader, args.epochs, lr, verbose=args.verbose, fname='runs/data%d_trial%d.pt' % (data_format_num, trial_num), eval_freq=args.eval_freq,epoch_debug=args.epoch_debug, plot_debug=args.plot_debug)
elif args.chf:
args.verbose = False
model = Sublign(10, 20, 50, 0.1, data.shape[1] - 4, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
model.fit(data_loader, data_loader, args.epochs, 0.01, verbose=args.verbose)
subtypes = model.get_subtypes(collect_dict['obs_t_collect'], collect_dict['Y_collect'], K=3)
labels = model.get_labels(collect_dict['obs_t_collect'], collect_dict['Y_collect'])
deltas = model.get_deltas(collect_dict['obs_t_collect'], collect_dict['Y_collect'], collect_dict['mask_collect'])
zs = model.get_mu(collect_dict['obs_t_collect'], collect_dict['Y_collect'])
import pickle
pickle.dump((labels, deltas, subtypes, unique_pid, collect_dict, zs), open('../clinical_runs/chf_sublign_hera3.pk', 'wb'))
return
elif args.ppmi:
args.verbose = False
# (0.0, 10, 10, 50, 'l1', 0.1)
# C (0.1), dim_h (50), ds (10), dim_rnn (100), reg_type (l1), lr (0.1)
model = Sublign(10, 10, 20, 0., data.shape[1] - 4, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=args.max_delta, learn_time=(not args.no_time))
# model.fit(train_data_loader, test_data_loader, args.epochs, 0.1, verbose=args.verbose)
# subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
# labels = model.get_labels(train_data_dict)
# deltas = model.get_deltas(train_data_dict)
model.fit(data_loader, data_loader, args.epochs, 0.1, verbose=args.verbose)
subtypes = model.get_subtypes(collect_dict['obs_t_collect'], collect_dict['Y_collect'], K=3)
labels = model.get_labels(collect_dict)
deltas = model.get_deltas(collect_dict)
# gt_labels = [int(i) for i in test_data_dict['s_collect'].squeeze()]
# print('ARI: %.3f' % adjusted_rand_score(gt_labels, labels))
import pickle
pickle.dump((labels, deltas, subtypes, unique_pid, collect_dict), open('../clinical_runs/ppmi_sublign_PDonly.pk', 'wb'))
return
subtypes = model.get_subtypes(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'], K=2)
train_results = model.score(train_data_dict, train_data_dict)
test_results = model.score(train_data_dict, test_data_dict)
Y = test_data_dict['Y_collect']
X = test_data_dict['obs_t_collect']
M = test_data_dict['mask_collect']
S = None
T = None
if args.likelihood:
log_p = model.imp_sampling(X,Y,imp_samples=50)
print('Test Liklihood: %.3f' % log_p)
(nelbo, nll, kl), _ = model.forward(Y, S, X, M, T, anneal=1.)
# def forward(self, Y, S, X, M, T, anneal = 1.):
nelbo, nll, kl = nelbo.mean().detach().numpy(), nll.mean().detach().numpy(), kl.mean().detach().numpy()
if args.verbose:
plot_subtypes(subtypes, args.sigmoid, train_data_dict)
plot_latent(model, test_data_dict)
trial_results[trial_num] = [test_results['mse'],test_results['ari'], test_results['swaps'], test_results['pear']]
if args.trials == 1:
print('Train: %.3f, %.3f, %.3f, %.3f' % (train_results['mse'], train_results['ari'], train_results['swaps'], train_results['pear']))
print('Test : %.3f, %.3f, %.3f, %.3f' % (test_results['mse'], test_results['ari'], test_results['swaps'], test_results['pear']))
print('NELBO: %.3f, NLL: %.3f, KL: %.3f' % (nelbo, nll, kl))
else:
line_str = list()
for i,j in zip(trial_results.mean(axis=0), trial_results.std(axis=0)):
line_str.append('%.3f $\\pm$ %.3f' % (i,j))
print(' & '.join([args.model_name] + line_str) + '\\\\')
trials_fname = 'runs/%s.txt' % args.model_name
if not os.path.exists(trials_fname):
f = open(trials_fname, 'w')
else:
f = open(trials_fname, 'a')
# f.write(' & '.join([args.model_name] + line_str) + '\\\\' + '\n')
# f.close()
if __name__=='__main__':
main() | [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.ones",
"torch.squeeze",
"torch.cuda.is_available",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.load",
"torch.nn.RNN",
"torch.sum",
"torch.manual_seed",
"torch.abs",
"torch.tensor",
"torch.zeros_like",
"torch.zeros",
"torch.device",
"torch.linspace",
"torch.log",
"torch.nn.Sigmoid",
"torch.no_grad",
"torch.ones_like",
"torch.mean"
] | 1.3.0 | irenetrampoline/clustering-interval-censored | f6ab06a6cf3098ffe006d1b95d1b4f1d158b0bc4 |
1.2 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"])
cand_bert = BertModel.from_pretrained(params['bert_model'])
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = ctxt_bert.config
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
embedding_ctxt = None
if token_idx_ctxt is not None:
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt
)
embedding_cands = None
if token_idx_cands is not None:
embedding_cands = self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_ctxt, embedding_cands
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(model_path)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
embedding_context, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands, None, None, None
)
return embedding_context.cpu().detach()
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands.cpu().detach()
# TODO: why do we need cpu here?
# return embedding_cands
# Score candidates given context input and label input
# If cand_encs is provided (pre-computed), cand_ves is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
random_negs=True,
cand_encs=None, # pre-computed candidate encoding.
):
# Encode contexts first
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX
)
embedding_ctxt, _ = self.model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, None, None, None
)
# Candidate encoding is given, do not need to re-compute
# Directly return the score of context encoding and candidate encoding
if cand_encs is not None:
return embedding_ctxt.mm(cand_encs.t())
# Train time. We compare with all elements of the batch
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cand_vecs, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
if random_negs:
# train on random negatives
return embedding_ctxt.mm(embedding_cands.t())
else:
# train on hard negatives
embedding_ctxt = embedding_ctxt.unsqueeze(1) # batchsize x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # batchsize x embed_size x 2
scores = torch.bmm(embedding_ctxt, embedding_cands) # batchsize x 1 x 1
scores = torch.squeeze(scores)
return scores
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(self, context_input, cand_input, label_input=None):
flag = label_input is None
scores = self.score_candidate(context_input, cand_input, flag)
bs = scores.size(0)
if label_input is None:
target = torch.LongTensor(torch.arange(bs))
target = target.to(self.device)
loss = F.cross_entropy(scores, target, reduction="mean")
else:
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
# TODO: add parameters?
loss = loss_fct(scores, label_input)
return loss, scores
def to_bert_input(token_idx, null_idx):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
| [
"torch.arange",
"torch.bmm",
"torch.cuda.device_count",
"torch.squeeze",
"torch.nn.functional.cross_entropy",
"torch.cuda.is_available",
"torch.load",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.DataParallel"
] | 1.2.0 | pbmstrk/BLINK | 7380cf7d63ff76563f017adc39fa5729ba36742a |
1.7 | import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from unbalancedot.utils import euclidean_cost
from unbalancedot.sinkhorn import BatchVanillaSinkhorn
from unbalancedot.entropy import (
KullbackLeibler,
Balanced,
TotalVariation,
Range,
PowerEntropy,
)
path = os.getcwd() + "/output"
if not os.path.isdir(path):
os.mkdir(path)
def template_measure(nsample):
x1 = np.linspace(0.0, 0.2, nsample)
a1 = np.ones(nsample)
a1[0], a1[-1] = 0.0, 0.0
a1 = a1 / np.sum(a1)
x2 = np.linspace(0.9, 1.0, nsample)
a2 = 0.05 - np.abs(x2 - 0.95)
a2[0], a2[-1] = 0.0, 0.0
a2 = a2 / np.sum(a2)
x = np.concatenate((x1, x2))
a = np.concatenate((0.65 * a1, 0.35 * a2))
a = a / np.sum(a)
y1 = np.linspace(0.2, 0.4, nsample)
b1 = np.linspace(0.0, 1.0, nsample)
b1[0], b1[-1] = 0.0, 0.0
b1 = b1 / np.sum(b1)
y2 = np.linspace(0.5, 0.9, nsample)
b2 = np.sqrt(np.abs(1 - ((y2 - 0.7) / 0.2) ** 2))
b2[0], b2[-1] = 0.0, 0.0
b2 = b2 / np.sum(b2)
y = np.concatenate((y1, y2))
b = np.concatenate((0.45 * b1, 0.55 * b2))
b = b / np.sum(b)
return a, x, b, y
# Init of measures and solvers
a, x, b, y = template_measure(1000)
A, X, B, Y = (
torch.from_numpy(a)[None, :],
torch.from_numpy(x)[None, :, None],
torch.from_numpy(b)[None, :],
torch.from_numpy(y)[None, :, None],
)
p, blur, reach = 2, 1e-3, 0.1
cost = euclidean_cost(p)
solver = BatchVanillaSinkhorn(
nits=10000, nits_grad=1, tol=1e-5, assume_convergence=True
)
list_entropy = [
Balanced(blur),
KullbackLeibler(blur, reach),
TotalVariation(blur, reach),
Range(blur, 0.7, 1.3),
PowerEntropy(blur, reach, 0.0),
]
# Init of plot
blue = (0.55, 0.55, 0.95)
red = (0.95, 0.55, 0.55)
fig = plt.figure(figsize=(8, 4))
plt.fill_between(x, 0, a, color="b")
plt.fill_between(y, 0, b, color="r")
plt.tight_layout()
plt.savefig(path + "/comparison_entropy_reference.eps", format="eps")
# Plotting each entropy separately
for entropy in list_entropy:
fig = plt.figure(figsize=(8, 4))
f, g = solver.sinkhorn_asym(A, X, B, Y, cost, entropy)
C = cost(X, Y)
pi = (
((f[:, :, None] + g[:, None, :] - C) / blur).exp()
* A[:, :, None]
* B[:, None, :]
)
pi_1, pi_2 = pi.sum(dim=2), pi.sum(dim=1)
pi_1, pi_2 = pi_1[0, :].data.numpy(), pi_2[0, :].data.numpy()
plt.plot(x, a, color="b", linestyle="--")
plt.plot(y, b, color="r", linestyle="--")
plt.fill_between(x, 0, pi_1, color=red)
plt.fill_between(y, 0, pi_2, color=blue)
plt.tight_layout()
plt.savefig(path + f"/comparison_entropy_{entropy.__name__}.eps",
format="eps")
| [
"torch.from_numpy"
] | 1.7 | thibsej/unbalanced-ot-functionals | bfd098e98ed10b90a36e0dbe7b099c1c31770931 |
1.6 | """Finetuning the library models for sequence classification on GLUE."""
import dataclasses
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Callable, Dict, Optional
import torch
import numpy as np
import transformers
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction
from transformers import GlueDataTrainingArguments as DataTrainingArguments
from transformers import HfArgumentParser, TrainingArguments, set_seed
from src.dataset import FewShotDataset
from src.models import BertForPromptFinetuning, RobertaForPromptFinetuning, resize_token_type_embeddings
from src.trainer import Trainer
from src.processors import processors_mapping, num_labels_mapping, output_modes_mapping, compute_metrics_mapping, bound_mapping
from filelock import FileLock
from datetime import datetime
from copy import deepcopy
from tqdm import tqdm
import json
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
# Few-shot type
# - finetune: standard fine-tuning
# - prompt: prompt-based fine-tuning
# - prompt-demo: prompt-based fine-tuning with demonstrations
few_shot_type: str = field(
default='prompt-demo',
metadata={"help": "Few-shot learning model type. Choice: finetune, prompt, prompt-demo"}
)
# Only for BERT-type model
random_segment: bool = field(
default=False,
metadata={"help": "Whether to reinitialize the token type embeddings (only for BERT)."}
)
@dataclass
class DynamicDataTrainingArguments(DataTrainingArguments):
"""
Arguments for dynamic training.
"""
num_k: Optional[int] = field(
default=16,
metadata={"help": "Number of training instances per class"}
)
num_sample: Optional[int] = field(
default=16,
metadata={"help": "Number of samples (for inference) in fine-tuning with demonstrations"}
)
num_demo: Optional[int] = field(
default=1,
metadata={"help": "Number of demonstrations from each class"}
)
auto_demo: bool = field(
default=True,
metadata={"help": "Automatically generate template for using demonstrations"}
)
# For prompting
template: str = field(
default=None,
metadata={"help": "Template"}
)
mapping: str = field(
default=None,
metadata={"help": "Label word mapping"}
)
template_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the templates, one per line. Do not set this when prompt_path is used"}
)
mapping_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the label word mappings, one per line. Do not set this when prompt_path is used"}
)
prompt_path: str = field(
default=None,
metadata={"help": "Path to a txt file that stores all the prompts (templates and mappings), one per line"}
)
template_id: int = field(
default=None,
metadata={"help": "Template id if using template_path"}
)
mapping_id: int = field(
default=None,
metadata={"help": "Mapping id if using template_path"}
)
prompt_id: int = field(
default=None,
metadata={"help": "Prompt id if using prompt_path"}
)
top_n_template: int = field(
default=None,
metadata={"help": "Use top-n template in the template path"}
)
# For logging
tag: str = field(
default='',
metadata={"help": "Set the tag and find the result easier in the log."}
)
# For filtering when using demonstrations
demo_filter: bool = field(
default=False,
metadata={"help": "Only use similar instances in demonstrations"}
)
demo_filter_rate: float = field(
default=0.5,
metadata={"help": "Only use top-x\% similar instances in demonstrations"}
)
demo_filter_model: str = field(
default=None,
metadata={"help": "Model name for demonstration filter embeddings. Will load embeddings based on the model name."}
)
debug_mode: bool = field(
default=False,
metadata={"help": "Debug mode"}
)
# For max length
double_demo: bool = field(
default=False,
metadata={"help": "Use double length for using demonstrations"}
)
first_sent_limit: int = field(
default=None,
metadata={"help": "Limit the length of the first sentence (i.e., sent_0)"}
)
other_sent_limit: int = field(
default=None,
metadata={"help": "Limit the length of sentences other than the first sentence"}
)
use_full_length: bool = field(
default=None,
metadata={"help": "Use the full length (512)"}
)
# GPT-3's in-context learning
gpt3_in_context_head: bool = field(
default=False,
metadata={"help": "GPT-3's in-context learning (context at the beginning)"}
)
gpt3_in_context_tail: bool = field(
default=False,
metadata={"help": "GPT-3's in-context learning (context at the end)"}
)
gpt3_in_context_num: int = field(
default=32,
metadata={"help": "Number of context examples"}
)
truncate_head: bool = field(
default=False,
metadata={"help": "When exceeding the maximum length, truncate the head instead of the tail."}
)
# Do not set up the following fields. They are set up automatically.
prompt: bool = field(
default=False,
metadata={"help": "Whether to use prompt-based fine-tuning"}
)
template_list: list = field(
default=None,
metadata={"help": "(DO NOT List of templates (only initialized after the program starts."}
)
@dataclass
class DynamicTrainingArguments(TrainingArguments):
# For ensemble
array_id: int = field(
default=-1,
metadata={"help": "Array ID (contains seed and hyper-paramter search) to idenfity the model"}
)
model_id: int = field(
default=-1,
metadata={"help": "Model ID (contains template information) to identify the model"}
)
save_logit: bool = field(
default=False,
metadata={"help": "Save test file logit with name $TASK-$MODEL_ID-$ARRAY_ID.npy"}
)
save_logit_dir: str = field(
default=None,
metadata={"help": "Where to save the prediction result"}
)
# Regularization
fix_layers: int = field(
default=0,
metadata={"help": "Fix bottom-n layers when optimizing"}
)
# Training
save_at_last: bool = field(
default=False,
metadata={"help": "Instead of saving the best (dev performance) checkpoint, save the last checkpoint"}
)
# Turn off train/test
no_train: bool = field(
default=False,
metadata={"help": "No training"}
)
no_predict: bool = field(
default=False,
metadata={"help": "No test"}
)
alpha: float = field(
default=0,
metadata={"help": "Number of context examples"}
)
def main():
parser = HfArgumentParser((ModelArguments, DynamicDataTrainingArguments, DynamicTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if 'prompt' in model_args.few_shot_type:
data_args.prompt = True
if training_args.no_train:
training_args.do_train = False
if training_args.no_predict:
training_args.do_predict = False
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
# Load prompt/template/mapping file
if data_args.prompt:
if data_args.prompt_path is not None:
assert data_args.prompt_id is not None
prompt_list = []
with open(data_args.prompt_path) as f:
for line in f:
line = line.strip()
template, mapping = line.split('\t')
prompt_list.append((template, mapping))
data_args.template, data_args.mapping = prompt_list[data_args.prompt_id]
logger.info("Specify load the %d-th prompt: %s | %s" % (data_args.prompt_id, data_args.template, data_args.mapping))
else:
if data_args.template_path is not None:
with open(data_args.template_path) as f:
data_args.template_list = []
for line in f:
line = line.strip()
if len(line) > 0:
data_args.template_list.append(line)
# Load top-n templates
if data_args.top_n_template is not None:
data_args.template_list = data_args.template_list[:data_args.top_n_template]
logger.info("Load top-%d templates from %s" % (len(data_args.template_list), data_args.template_path))
# ... or load i-th template
if data_args.template_id is not None:
data_args.template = data_args.template_list[data_args.template_id]
data_args.template_list = None
logger.info("Specify load the %d-th template: %s" % (data_args.template_id, data_args.template))
if data_args.mapping_path is not None:
assert data_args.mapping_id is not None # Only can use one label word mapping
with open(data_args.mapping_path) as f:
mapping_list = []
for line in f:
line = line.strip()
mapping_list.append(line)
data_args.mapping = mapping_list[data_args.mapping_id]
logger.info("Specify using the %d-th mapping: %s" % (data_args.mapping_id, data_args.mapping))
# Check save path
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(f"Output directory ({training_args.output_dir}) already exists.")
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = num_labels_mapping[data_args.task_name]
output_mode = output_modes_mapping[data_args.task_name]
logger.info("Task name: {}, number of labels: {}, output mode: {}".format(data_args.task_name, num_labels, output_mode))
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Automatically generate template for using demonstrations
if data_args.auto_demo and model_args.few_shot_type == 'prompt-demo':
# GPT-3's in-context learning
if data_args.gpt3_in_context_head or data_args.gpt3_in_context_tail:
logger.info("Automatically convert the template to GPT-3's in-context learning.")
assert data_args.template_list is None
old_template = data_args.template
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for instance_id in range(data_args.gpt3_in_context_num):
sub_template = old_template + ''
# Replace sent_id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}*".format(sent_id), "_{}*".format(sent_num + sent_num * instance_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*labelx_{}*".format(instance_id))
if data_args.gpt3_in_context_tail:
new_template = new_template + sub_template # Put context at the end
else:
new_template = sub_template + new_template # Put context at the beginning
logger.info("| {} => {}".format(data_args.template, new_template))
data_args.template = new_template
else:
logger.info("Automatically convert the template to using demonstrations.")
if data_args.template_list is not None:
for i in range(len(data_args.template_list)):
old_template = data_args.template_list[i]
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for label_id in range(num_labels):
sub_template = old_template + ''
# Replace sent id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}*".format(sent_id), "_{}*".format(sent_num + sent_num * label_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*label_{}*".format(label_id))
new_template = new_template + sub_template
logger.info("| {} => {}".format(data_args.template_list[i], new_template))
data_args.template_list[i] = new_template
else:
old_template = data_args.template
new_template = old_template + ''
old_template = old_template.replace('*cls*', '')
# Single sentence or sentence pair?
sent_num = 1
if "_1" in old_template:
sent_num = 2
for label_id in range(num_labels):
sub_template = old_template + ''
# Replace sent id
for sent_id in range(sent_num):
sub_template = sub_template.replace("_{}".format(sent_id), "_{}".format(sent_num + sent_num * label_id + sent_id))
# Replace mask
sub_template = sub_template.replace("*mask*", "*label_{}*".format(label_id))
new_template = new_template + sub_template
logger.info("| {} => {}".format(data_args.template, new_template))
data_args.template = new_template
# Create config
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
if 'prompt' in model_args.few_shot_type:
if config.model_type == 'roberta':
model_fn = RobertaForPromptFinetuning
elif config.model_type == 'bert':
model_fn = BertForPromptFinetuning
else:
raise NotImplementedError
elif model_args.few_shot_type == 'finetune':
model_fn = AutoModelForSequenceClassification
else:
raise NotImplementedError
special_tokens = []
# Create tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
additional_special_tokens=special_tokens,
cache_dir=model_args.cache_dir,
)
# Get our special datasets.
train_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="train", use_demo=("demo" in model_args.few_shot_type))
)
eval_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="dev", use_demo=("demo" in model_args.few_shot_type))
if training_args.do_eval
else None
)
test_dataset = (
FewShotDataset(data_args, tokenizer=tokenizer, mode="test", use_demo=("demo" in model_args.few_shot_type))
if training_args.do_predict
else None
)
set_seed(training_args.seed)
model = model_fn.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# For BERT, increase the size of the segment (token type) embeddings
if config.model_type == 'bert':
model.resize_token_embeddings(len(tokenizer))
resize_token_type_embeddings(model, new_num_types=10, random_segment=model_args.random_segment)
# Pass dataset and argument information to the model
if data_args.prompt:
model.label_word_list = torch.tensor(train_dataset.label_word_list).long().cuda()
if output_modes_mapping[data_args.task_name] == 'regression':
# lower / upper bounds
model.lb, model.ub = bound_mapping[data_args.task_name]
model.model_args = model_args
model.data_args = data_args
model.tokenizer = tokenizer
# Build metric
def build_compute_metrics_fn(task_name: str) -> Callable[[EvalPrediction], Dict]:
def compute_metrics_fn(p: EvalPrediction):
# Note: the eval dataloader is sequential, so the examples are in order.
# We average the logits over each sample for using demonstrations.
predictions = p.predictions
num_logits = predictions.shape[-1]
logits = predictions.reshape([eval_dataset.num_sample, -1, num_logits])
logits = logits.mean(axis=0)
if num_logits == 1:
preds = np.squeeze(logits)
else:
preds = np.argmax(logits, axis=1)
# Just for sanity, assert label ids are the same.
label_ids = p.label_ids.reshape([eval_dataset.num_sample, -1])
label_ids_avg = label_ids.mean(axis=0)
label_ids_avg = label_ids_avg.astype(p.label_ids.dtype)
assert (label_ids_avg - label_ids[0]).mean() < 1e-2
label_ids = label_ids[0]
return compute_metrics_mapping[task_name](task_name, preds, label_ids)
return compute_metrics_fn
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=build_compute_metrics_fn(data_args.task_name)
)
# Training
if training_args.do_train:
trainer.train(model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None, alpha = training_args.alpha)
# Use the early stop, so do not save the model in the end (unless specify save_at_last)
if training_args.save_at_last:
trainer.save_model(training_args.output_dir)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
torch.save(model_args, os.path.join(training_args.output_dir, "model_args.bin"))
torch.save(data_args, os.path.join(training_args.output_dir, "data_args.bin"))
# Reload the best checkpoint (for eval)
model = model_fn.from_pretrained(training_args.output_dir)
model = model.to(training_args.device)
trainer.model = model
if data_args.prompt:
model.label_word_list = torch.tensor(train_dataset.label_word_list).long().cuda()
if output_modes_mapping[data_args.task_name] == 'regression':
# lower / upper bounds
model.lb, model.ub = bound_mapping[data_args.task_name]
model.model_args = model_args
model.data_args = data_args
model.tokenizer = tokenizer
# Evaluation
final_result = {
'time': str(datetime.today()),
}
eval_results = {}
if training_args.do_eval:
logger.info("*** Validate ***")
eval_datasets = [eval_dataset]
for eval_dataset in eval_datasets:
trainer.compute_metrics = build_compute_metrics_fn(eval_dataset.args.task_name)
output = trainer.evaluate(eval_dataset=eval_dataset)
eval_result = output.metrics
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name))
for key, value in eval_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
final_result[eval_dataset.args.task_name + '_dev_' + key] = value
eval_results.update(eval_result)
test_results = {}
if training_args.do_predict:
logging.info("*** Test ***")
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
test_datasets.append(
FewShotDataset(mnli_mm_data_args, tokenizer=tokenizer, mode="test", use_demo=('demo' in model_args.few_shot_type))
)
for test_dataset in test_datasets:
trainer.compute_metrics = build_compute_metrics_fn(test_dataset.args.task_name)
output = trainer.evaluate(eval_dataset=test_dataset)
test_result = output.metrics
output_test_file = os.path.join(
training_args.output_dir, f"test_results_{test_dataset.args.task_name}.txt"
)
if trainer.is_world_master():
with open(output_test_file, "w") as writer:
logger.info("***** Test results {} *****".format(test_dataset.args.task_name))
for key, value in test_result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
final_result[test_dataset.args.task_name + '_test_' + key] = value
if training_args.save_logit:
predictions = output.predictions
num_logits = predictions.shape[-1]
logits = predictions.reshape([test_dataset.num_sample, -1, num_logits]).mean(axis=0)
np.save(os.path.join(training_args.save_logit_dir, "{}-{}-{}.npy".format(test_dataset.task_name, training_args.model_id, training_args.array_id)), logits)
test_results.update(test_result)
with FileLock('log.lock'):
with open('log', 'a') as f:
final_result.update(vars(model_args))
final_result.update(vars(training_args))
final_result.update(vars(data_args))
if 'evaluation_strategy' in final_result:
final_result.pop('evaluation_strategy')
f.write(str(final_result) + '\n')
return eval_results
if __name__ == "__main__":
main()
| [
"torch.tensor"
] | 1.6.0 | leeyy2020/LM-BFF | 2c80b2ea3987c403c4d4abc6e202d280ea846210 |
1.5 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : train-duration.py
@Date : 2021/01/05, Tue
@Author : Atomicoo
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2020-2021, ShiGroup-NLP-XMU
@Desc : Synthetize sentences into speech.
'''
__author__ = 'Atomicoo'
import argparse
import os
import os.path as osp
import time
from scipy.io.wavfile import write
import torch
from utils.hparams import HParam
from utils.transform import StandardNorm
from helpers.synthesizer import Synthesizer
import vocoder.models
from vocoder.layers import PQMF
from utils.audio import dynamic_range_decompression
from datasets.dataset import TextProcessor
from models import ParallelText2Mel
from utils.utils import select_device, get_last_chkpt_path
try:
from helpers.manager import GPUManager
except ImportError as err:
print(err); gm = None
else:
gm = GPUManager()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--batch_size", default=8, type=int, help="Batch size")
parser.add_argument("--checkpoint", default=None, type=str, help="Checkpoint file path")
parser.add_argument("--melgan_checkpoint", default=None, type=str, help="Checkpoint file path of melgan")
parser.add_argument("--input_texts", default=None, type=str, help="Input text file path")
parser.add_argument("--outputs_dir", default=None, type=str, help="Output wave file directory")
parser.add_argument("--device", default=None, help="cuda device or cpu")
parser.add_argument("--name", default="parallel", type=str, help="Append to logdir name")
parser.add_argument("--config", default=None, type=str, help="Config file path")
args = parser.parse_args()
if torch.cuda.is_available():
index = args.device if args.device else str(0 if gm is None else gm.auto_choice())
else:
index = 'cpu'
device = select_device(index)
hparams = HParam(args.config) \
if args.config else HParam(osp.join(osp.abspath(os.getcwd()), "config", "default.yaml"))
logdir = osp.join(hparams.trainer.logdir, f"%s-%s" % (hparams.data.dataset, args.name))
checkpoint = args.checkpoint or get_last_chkpt_path(logdir)
normalizer = StandardNorm(hparams.audio.spec_mean, hparams.audio.spec_std)
processor = TextProcessor(hparams.text)
text2mel = ParallelText2Mel(hparams.parallel)
text2mel.eval()
synthesizer = Synthesizer(
model=text2mel,
checkpoint=checkpoint,
processor=processor,
normalizer=normalizer,
device=device
)
print('Synthesizing...')
since = time.time()
text_file = args.input_texts or hparams.synthesizer.inputs_file_path
with open(text_file, 'r', encoding='utf-8') as fr:
texts = fr.read().strip().split('\n')
melspecs = synthesizer.inference(texts)
print(f"Inference {len(texts)} spectrograms, total elapsed {time.time()-since:.3f}s. Done.")
vocoder_checkpoint = args.melgan_checkpoint or \
osp.join(hparams.trainer.logdir, f"{hparams.data.dataset}-melgan", hparams.melgan.checkpoint)
ckpt = torch.load(vocoder_checkpoint, map_location=device)
# Ref: https://github.com/kan-bayashi/ParallelWaveGAN/issues/169
decompressed = dynamic_range_decompression(melspecs)
decompressed_log10 = torch.log10(decompressed)
mu = torch.tensor(ckpt['stats']['mu']).to(device).unsqueeze(1)
var = torch.tensor(ckpt['stats']['var']).to(device).unsqueeze(1)
sigma = torch.sqrt(var)
melspecs = (decompressed_log10 - mu) / sigma
Generator = getattr(vocoder.models, ckpt['gtype'])
vocoder = Generator(**ckpt['config']).to(device)
vocoder.remove_weight_norm()
if ckpt['config']['out_channels'] > 1:
vocoder.pqmf = PQMF().to(device)
vocoder.load_state_dict(ckpt['model'])
if ckpt['config']['out_channels'] > 1:
waves = vocoder.pqmf.synthesis(vocoder(melspecs)).squeeze(1)
else:
waves = vocoder(melspecs).squeeze(1)
print(f"Generate {len(texts)} audios, total elapsed {time.time()-since:.3f}s. Done.")
print('Saving audio...')
outputs_dir = args.outputs_dir or hparams.synthesizer.outputs_dir
os.makedirs(outputs_dir, exist_ok=True)
for i, wav in enumerate(waves, start=1):
wav = wav.cpu().detach().numpy()
filename = osp.join(outputs_dir, f"{time.strftime('%Y-%m-%d')}_{i:03d}.wav")
write(filename, hparams.audio.sampling_rate, wav)
print(f"Audios saved to {outputs_dir}. Done.")
print(f'Done. ({time.time()-since:.3f}s)')
| [
"torch.sqrt",
"torch.log10",
"torch.cuda.is_available",
"torch.tensor",
"torch.load"
] | 1.5.0 | f2re/FCH-TTS | 54ddee710694929d978943356fe913609ed0aab5 |
0.4 | from __future__ import absolute_import
import torch as tr
from base.dataloader import BaseDataLoader
from torchvision.datasets import MNIST, FashionMNIST
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import numpy as np
class MnistDataLoader(BaseDataLoader):
def __init__(self, img_size = 2, train_batch_size=64, test_batch_size=64, get_tensor=True, supervised=True, classes = None):
super(MnistDataLoader, self).__init__(img_size, None, train_batch_size, test_batch_size, get_tensor,
supervised, classes)
def get_data(self):
train_dataset = MNIST('../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
test_dataset = MNIST('../data/mnist', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
train_data = np.array([x[0].numpy() for x in train_dataset])
train_labels = np.array([x[1].numpy() for x in train_dataset])
test_data = np.array([x[0].numpy() for x in test_dataset])
test_labels = np.array([x[1].numpy() for x in test_dataset])
if self.classes:
train_data = train_data[np.where(np.isin(train_labels, self.classes))]
train_labels = train_labels[np.where(np.isin(train_labels, self.classes))]
test_data = test_data[np.where(np.isin(test_labels, self.classes))]
test_labels = test_labels[np.where(np.isin(test_labels, self.classes))]
return train_data, test_data, train_labels, test_labels
class FashionMnistDataLoader(BaseDataLoader):
def __init__(self, train_batch_size=32, test_batch_size=32, get_tensor=True):
super(FashionMnistDataLoader, self).__init__((28, 28), None, train_batch_size, test_batch_size, get_tensor,
supervised=True)
def get_data(self):
FashionMNIST('../data/fashion', download=True)
train_data, train_labels = tr.load('../data/fashion/processed/training.pt')
test_data, test_labels = tr.load('../data/fashion/processed/test.pt')
train_data = normalize_mnist_images(train_data)
test_data = normalize_mnist_images(test_data)
return train_data, test_data, train_labels, test_labels
class MixedMnistDataLoader(BaseDataLoader):
def __init__(self, img_size = 2, train_batch_size=64, test_batch_size=64, get_tensor=True, supervised=True, classes = None):
super(MixedMnistDataLoader, self).__init__(img_size, None, train_batch_size, test_batch_size, get_tensor,
supervised, classes)
def get_data(self):
mnist_train_dataset = MNIST('../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
mnist_test_dataset = MNIST('../data/mnist', train=False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
mnist_train_data = np.array([x[0].numpy() for x in mnist_train_dataset])
mnist_train_labels = np.array([x[1].numpy() for x in mnist_train_dataset])
mnist_test_data = np.array([x[0].numpy() for x in mnist_test_dataset])
mnist_test_labels = np.array([x[1].numpy() for x in mnist_test_dataset])
fashion_train_dataset = FashionMNIST('../data/fashion', train = True, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
fashion_test_dataset = FashionMNIST('../data/fashion', train = False, download=True,
transform=transforms.Compose([
transforms.Resize(self.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]))
fashion_train_data = np.array([x[0].numpy() for x in fashion_train_dataset])
fashion_train_labels = np.array([x[1].numpy() for x in fashion_train_dataset])
fashion_test_data = np.array([x[0].numpy() for x in fashion_test_dataset])
fashion_test_labels = np.array([x[1].numpy() for x in fashion_test_dataset])
train_data = np.concatenate((mnist_train_data, fashion_train_data))
train_labels = np.concatenate((mnist_train_labels, 10 + fashion_train_labels))
test_data = np.concatenate((mnist_test_data, fashion_test_data))
test_labels = np.concatenate((mnist_test_labels, 10 + fashion_test_labels))
return train_data, test_data, train_labels, test_labels
| [
"torch.load"
] | 0.4.1 | maharshi95/GANTree | 5541c5fb0ba3d856081c03f37870a85fdd654681 |
1.1 | # modify from mmcv and mmdetection
import warnings
import torch.nn as nn
from .norm import build_norm_layer
from .act import build_act_layer
from .registry import UTILS
conv_cfg = {
'Conv': nn.Conv2d,
# TODO: octave conv
}
def build_conv_layer(cfg, *args, **kwargs):
""" Build convolution layer
Args:
cfg (None or dict): cfg should contain:
type (str): identify conv layer type.
layer args: args needed to instantiate a conv layer.
Returns:
layer (nn.Module): created conv layer
"""
assert isinstance(cfg, dict) and 'type' in cfg
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in conv_cfg:
raise KeyError('Unrecognized norm type {}'.format(layer_type))
else:
conv_layer = conv_cfg[layer_type]
layer = conv_layer(*args, **kwargs, **cfg_)
return layer
@UTILS.register_module
class ConvModule(nn.Module):
"""A conv block that contains conv/norm/activation layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
act_cfg (str or None): Config dict for activation layer.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=dict(type='Conv'),
norm_cfg=None,
act_cfg=dict(type='Relu', inplace=True),
order=('conv', 'norm', 'act'),
dropout=None):
super(ConvModule, self).__init__()
assert isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == set(['conv', 'norm', 'act'])
self.with_norm = norm_cfg is not None
self.with_act = act_cfg is not None
self.with_dropout = dropout is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == 'auto':
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn('ConvModule has norm and bias at the same time')
# build convolution layer
self.conv = build_conv_layer(conv_cfg,
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index('norm') > order.index('conv'):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
# build activation layer
if self.with_act:
# activate layer is after conv layer
if order.index('act') > order.index('conv'):
act_channels = out_channels
else:
act_channels = in_channels
self.act_name, act = build_act_layer(act_cfg, act_channels)
self.add_module(self.act_name, act)
if self.with_dropout:
self.dropout = nn.Dropout2d(p=dropout)
@property
def norm(self):
return getattr(self, self.norm_name)
@property
def activate(self):
return getattr(self, self.act_name)
def forward(self, x, activate=True, norm=True):
for layer in self.order:
if layer == 'conv':
x = self.conv(x)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_act:
x = self.activate(x)
if self.with_dropout:
x = self.dropout(x)
return x
@UTILS.register_module
class ConvModules(nn.Module):
"""Head
Args:
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias='auto',
conv_cfg=dict(type='Conv'),
norm_cfg=None,
act_cfg=dict(type='Relu', inplace=True),
order=('conv', 'norm', 'act'),
dropouts=None,
num_convs=1):
super().__init__()
if dropouts is not None:
assert num_convs == len(dropouts)
dropout = dropouts[0]
else:
dropout = None
layers = [
ConvModule(in_channels, out_channels, kernel_size, stride, padding,
dilation, groups, bias, conv_cfg, norm_cfg, act_cfg,
order, dropout),
]
for ii in range(1, num_convs):
if dropouts is not None:
dropout = dropouts[ii]
else:
dropout = None
layers.append(
ConvModule(out_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias, conv_cfg, norm_cfg, act_cfg,
order, dropout))
self.block = nn.Sequential(*layers)
def forward(self, x):
feat = self.block(x)
return feat
| [
"torch.nn.Sequential",
"torch.nn.Dropout2d"
] | 1.1.0 | E18301194/vedaseg | c62c8ea46dbba12f03262452dd7bed22969cfe4e |
0.4 | import argparse
import csv
import logging
import os
import sys
from ast import literal_eval
from datetime import datetime
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
from torch.optim.lr_scheduler import MultiStepLR
from tqdm import tqdm, trange
from collections import OrderedDict
import actquant
import models
from clr import CyclicLR # Until it will be included in official PyTorch release
from data import get_dataset
from logger import CsvLogger
from preprocess import get_transform
from utils.log import save_checkpoint
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ConvNet Training')
parser.add_argument('--results_dir', metavar='RESULTS_DIR', default='./results', help='results dir')
parser.add_argument('--datapath', metavar='DATA_PATH', default='./results', help='datasets dir')
parser.add_argument('--save', metavar='SAVE', default='', help='saved folder')
parser.add_argument('--dataset', metavar='DATASET', default='imagenet', help='dataset name or folder')
parser.add_argument('--model', '-a', metavar='MODEL', default='alexnet', choices=model_names,
help='model architecture: ' + ' | '.join(model_names) + ' (default: alexnet)')
parser.add_argument('--input_size', type=int, default=None, help='image input size')
parser.add_argument('--model_config', default='', help='additional architecture configuration')
parser.add_argument('--type', default='float32', help='type of tensor - e.g float16')
parser.add_argument('--gpus', default='0', help='gpus used for training - e.g 0,1,3')
parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs', default=200, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--optimizer', default='SGD', type=str, metavar='OPT', help='optimizer function used')
parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', type=str, metavar='FILE', help='evaluate model FILE on validation set')
parser.add_argument('--no-quantization', action='store_true', default=False, help='disables quantization')
parser.add_argument('--no-noise', action='store_true', default=False, help='noise')
parser.add_argument('--bitwidth', default=32, type=int, metavar='N', help='Quantization bitwidth (default: 5)')
parser.add_argument('--scale', default=1, type=float, metavar='N', help='scale of MobileNet2')
parser.add_argument('--step', default=2, type=int, metavar='N',
help='portion of net to be quantized at second stage(default: 2)')
parser.add_argument('--depth', default=18, type=int, metavar='N', help='depth of the model(default: 18)')
parser.add_argument('--act-bitwidth', default=32, type=int, metavar='N',
help='Quantization activation bitwidth (default: 5)')
parser.add_argument('--no-act-quantization', action='store_true', default=False, help='disables quantization')
parser.add_argument('--start-from-zero', action='store_true', default=False, help='Start from epoch 0')
parser.add_argument('--no-quant-edges', action='store_true', default=False,
help='no quantization for first and last layers')
#parser.add_argument('--step-setup', default='15,9', help='start of steps and interval- e.g 0,1')
parser.add_argument('--quant_start_stage', default=0, type=int, metavar='N', help='from which level of quant to start')
parser.add_argument('--quant_epoch_step', type=float, default=1.0, help='hot often to change state of quant')
# CLR
parser.add_argument('--clr', dest='clr', action='store_true', help='Use CLR')
parser.add_argument('--min-lr', type=float, default=1e-5, help='Minimal LR for CLR.')
parser.add_argument('--max-lr', type=float, default=1, help='Maximal LR for CLR.')
parser.add_argument('--epochs-per-step', type=int, default=20,
help='Number of epochs per step in CLR, recommended to be between 2 and 10.')
parser.add_argument('--mode', default='triangular2', help='CLR mode. One of {triangular, triangular2, exp_range}')
parser.add_argument('--find-clr', dest='find_clr', action='store_true',
help='Run search for optimal LR in range (min_lr, max_lr)')
# Optimization options
parser.add_argument('--learning_rate', '-lr', type=float, default=0.1, help='The learning rate.')
parser.add_argument('--momentum', '-m', type=float, default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float, default=4e-5, help='Weight decay (L2 penalty).')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma at scheduled epochs.')
parser.add_argument('--schedule', type=int, nargs='+', default=[81, 122, 164],
help='Decrease learning rate at these epochs.')
parser.add_argument('--val_batch_size', default=64, type=int , help='val mini-batch size (default: 64)')
# NICE
parser.add_argument('--param-std-cutoff', type=float, default=3, help='how many std to include before cutoff')
parser.add_argument('--quant-dataloader', action='store_true', default=False, help='Load quantized data loader')
parser.add_argument('-sb', '--act_stats_batch_size', default=64, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--no_pre_process_normalize', action='store_true', default=False, help='normalize in the preprocess')
parser.add_argument('--noise_mask', type=float, default=0.05, help='Probability to add noise')
clamp_stats_dict = {}
cos_loss_dict = {}
def load_model(model, checkpoint):
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k if k[0:7] != 'module.' else k[7:] # remove `module. if needed (happen when the model created with DataParallel
new_state_dict[name] = v if v.dim() > 1 or 'num_batches_tracked' in name else v*v.new_ones(1)
# load params
model.load_state_dict(new_state_dict, strict=False) #strict false in case the loaded doesn't have alll variables like running mean
def main():
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
global args, best_prec1
best_prec1 = 0
args = parser.parse_args()
time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if args.evaluate:
args.results_dir = '/tmp'
if args.save is '':
args.save = time_stamp
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
args.noise = not args.no_noise
args.quant = not args.no_quantization
args.act_quant = not args.no_act_quantization
args.quant_edges = not args.no_quant_edges
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
if args.gpus is not None:
args.gpus = [int(i) for i in args.gpus.split(',')]
device = 'cuda:' + str(args.gpus[0])
cudnn.benchmark = True
else:
device = 'cpu'
dtype = torch.float32
args.step_setup = None
model = models.__dict__[args.model]
model_config = {'scale': args.scale, 'input_size': args.input_size, 'dataset': args.dataset,
'bitwidth': args.bitwidth, 'quantize': args.quant, 'noise': args.noise, 'step': args.step,
'depth': args.depth, 'act_bitwidth': args.act_bitwidth, 'act_quant': args.act_quant,
'quant_edges': args.quant_edges, 'step_setup': args.step_setup,
'quant_epoch_step': args.quant_epoch_step, 'quant_start_stage': args.quant_start_stage,
'normalize': args.no_pre_process_normalize,
'noise_mask': args.noise_mask}
if args.model_config is not '':
model_config = dict(model_config, **literal_eval(args.model_config))
# create model
model = model(**model_config)
logging.info("creating model %s", args.model)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("number of parameters: ", params)
logging.info("created model with configuration: %s", model_config)
print(model)
data = None
checkpoint_epoch=0
# optionally resume from a checkpoint
if args.evaluate:
if not os.path.isfile(args.evaluate):
parser.error('invalid checkpoint: {}'.format(args.evaluate))
checkpoint = torch.load(args.evaluate, map_location=device)
load_model(model, checkpoint)
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
print("loaded checkpoint {0} (epoch {1})".format(args.evaluate, checkpoint['epoch']))
elif args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=device)
if not args.start_from_zero:
args.start_epoch = checkpoint['epoch'] - 1
best_test = checkpoint['best_prec1']
checkpoint_epoch = checkpoint['epoch']
load_model(model, checkpoint)
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
elif os.path.isdir(args.resume):
checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
csv_path = os.path.join(args.resume, 'results.csv')
print("=> loading checkpoint '{}'".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=device)
best_test = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
data = []
with open(csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
data.append(row)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
if args.gpus is not None:
model = torch.nn.DataParallel(model, [args.gpus[0]]) # Statistics need to be calculated on single GPU to be consistant with data among multiplr GPUs
# Data loading code
default_transform = {
'train': get_transform(args.dataset, input_size=args.input_size, augment=True,
integer_values=args.quant_dataloader, norm=not args.no_pre_process_normalize),
'eval': get_transform(args.dataset, input_size=args.input_size, augment=False,
integer_values=args.quant_dataloader, norm=not args.no_pre_process_normalize)
}
transform = getattr(model.module, 'input_transform', default_transform)
val_data = get_dataset(args.dataset, 'val', transform['eval'], datasets_path=args.datapath)
val_loader = torch.utils.data.DataLoader(
val_data,
batch_size=args.val_batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
train_data = get_dataset(args.dataset, 'train', transform['train'], datasets_path=args.datapath)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
statistics_train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.act_stats_batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.decay,
nesterov=True)
model, criterion = model.to(device, dtype), criterion.to(device, dtype)
if args.clr:
scheduler = CyclicLR(optimizer, base_lr=args.min_lr, max_lr=args.max_lr,
step_size=args.epochs_per_step * len(train_loader), mode=args.mode)
else:
scheduler = MultiStepLR(optimizer, milestones=args.schedule, gamma=args.gamma)
csv_logger = CsvLogger(filepath=save_path, data=data)
csv_logger.save_params(sys.argv, args)
csv_logger_training_stats = os.path.join(save_path, 'training_stats.csv')
# pre-training activation and parameters statistics calculation ####
if check_if_need_to_collect_statistics(model):
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.pre_training_statistics = True # Turn on pre-training activation statistics calculation
model.module.statistics_phase = True
validate(statistics_train_loader, model, criterion, device, epoch=0, num_of_batches=80, stats_phase=True) # Run validation on training set for statistics
model.module.quantize.get_act_max_value_from_pre_calc_stats(list(model.modules()))
_ = model.module.quantize.set_weight_basis(list(model.modules()), None)
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.pre_training_statistics = False # Turn off pre-training activation statistics calculation
model.module.statistics_phase = False
else: # Maximal activation values still need to be derived from loaded stats
model.module.quantize.assign_act_clamp_during_val(list(model.modules()), print_clamp_val=True)
model.module.quantize.assign_weight_clamp_during_val(list(model.modules()), print_clamp_val=True)
# model.module.quantize.get_act_max_value_from_pre_calc_stats(list(model.modules()))
if args.gpus is not None: # Return to Multi-GPU after statistics calculations
model = torch.nn.DataParallel(model.module, args.gpus)
model, criterion = model.to(device, dtype), criterion.to(device, dtype)
# pre-training activation statistics calculation ####
if args.evaluate:
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, device, epoch=0)
print("val_prec1: ", val_prec1)
return
# fast forward to curr stage
for i in range(args.quant_start_stage):
model.module.switch_stage(0)
for epoch in trange(args.start_epoch, args.epochs + 1):
if not isinstance(scheduler, CyclicLR):
scheduler.step()
# scheduler.optimizer = optimizer
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, device, epoch, optimizer, scheduler,
training_stats_logger=csv_logger_training_stats)
for layer in model.modules():
if isinstance(layer, actquant.ActQuantBuffers):
layer.print_clamp()
# evaluate on validation set
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, device, epoch)
# remember best prec@1 and save checkpoint
is_best = val_prec1 > best_prec1
best_prec1 = max(val_prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'config': args.model_config,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'layers_b_dict': model.module.layers_b_dict #TODO this doesn't work for multi gpu - need to del
}, is_best, path=save_path)
# New type of logging
csv_logger.write({'epoch': epoch + 1, 'val_error1': 1 - val_prec1, 'val_error5': 1 - val_prec5,
'val_loss': val_loss, 'train_error1': 1 - train_prec1,
'train_error5': 1 - train_prec5, 'train_loss': train_loss})
csv_logger.plot_progress(title=args.model+str(args.depth))
csv_logger.write_text('Epoch {}: Best accuracy is {:.2f}% top-1'.format(epoch + 1, best_prec1 * 100.))
def check_if_need_to_collect_statistics(model):
for layer in model.modules():
# for layer in model.module.layers_list():
if isinstance(layer, actquant.ActQuantBuffers):
if hasattr(layer, 'running_std') and float(layer.running_std) != 0:
return False
return True
def forward(data_loader, model, criterion, device, epoch=0, num_of_batches=None, training=True, optimizer=None,
scheduler=None, training_stats_logger=None, stats_phase=False):
correct1, correct5 = 0, 0
print_correct_1 , print_correct_5 = 0, 0
print_batch_counter = 0
quant_stage_counter = 0
quant_stage_correct_1 = 0
t = time.time()
for batch_idx, (inputs, target) in enumerate(tqdm(data_loader)):
if num_of_batches:
if batch_idx > num_of_batches: # Debug
break
if isinstance(scheduler, CyclicLR):
scheduler.batch_step()
inputs, target = inputs.to(device=device), target.to(device=device)
if (training):
epoch_progress = epoch + batch_idx/len(data_loader)
stage_switch = model.module.switch_stage(epoch_progress)
if stage_switch:
quant_stage_counter = 0
quant_stage_correct_1 = 0
# compute output
output = model(inputs)
loss = criterion(output, target)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
corr = correct(output, target, topk=(1, 5))
correct1 += corr[0]
correct5 += corr[1]
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
quant_stage_correct_1 += corr[0]
print_correct_1 += corr[0]
print_correct_5 += corr[1]
print_batch_counter += output.shape[0]
quant_stage_counter += output.shape[0]
if batch_idx % args.print_freq == 0:
if stats_phase:
tqdm.write('Stats phase : [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: '
'{:.2f}%({:.2f}%).'.format(batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.act_stats_batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.act_stats_batch_size * (batch_idx + 1))))
elif training:
tqdm.write('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: {:.2f}%({:.2f}%). \t'
'lr: {:.2e}.'.format(epoch, batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.batch_size * (batch_idx + 1)),
scheduler.get_lr()[0] if scheduler is not None else 0))
dur = time.time() - t
with open(training_stats_logger, 'a') as f: #TODO add title
f.write('{},{},{},{},{},{},{},{},{},{},{},{},{} \n'.format(epoch, batch_idx, len(data_loader),
epoch * len(data_loader) + batch_idx,
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.batch_size * (batch_idx + 1)),
scheduler.get_lr()[0] if scheduler is not None else 0,
dur ,
100. * quant_stage_correct_1 / quant_stage_counter,
)
)
else:
tqdm.write('Validation Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}. Top-1 accuracy: {:.2f}%({:.2f}%). '
'Top-5 accuracy: '
'{:.2f}%({:.2f}%).'.format(epoch, batch_idx, len(data_loader),
100. * batch_idx / len(data_loader), loss.item(),
100. * print_correct_1 / print_batch_counter,
100. * correct1 / (args.val_batch_size * (batch_idx + 1)),
100. * print_correct_5 / print_batch_counter,
100. * correct5 / (args.val_batch_size * (batch_idx + 1))))
print_correct_1, print_correct_5 = 0 , 0
print_batch_counter = 0
return loss.item(), correct1 / len(data_loader.dataset), correct5 / len(data_loader.dataset)
def train(data_loader, model, criterion, device, epoch, optimizer, scheduler,
training_stats_logger=None, num_of_batches=None):
# switch to train mode
model.train()
return forward(data_loader, model, criterion, device, epoch, training=True, optimizer=optimizer,
scheduler=scheduler, training_stats_logger=training_stats_logger,num_of_batches=num_of_batches)
def validate(data_loader, model, criterion, device, epoch, num_of_batches=None, stats_phase=False):
# switch to evaluate mode
model.eval()
return forward(data_loader, model, criterion, device, epoch, num_of_batches=num_of_batches,
training=False, optimizer=None, scheduler=None, stats_phase=stats_phase)
# TODO: separate file
def correct(output, target, topk=(1,)):
"""Computes the correct@k for the specified values of k"""
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t().type_as(target)
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0).item()
res.append(correct_k)
return res
def plot_bn_statistic(model):
# plot histogram
i = 0
for m in model.module.modules():
if isinstance(m, torch.nn.BatchNorm2d):
for p in m._parameters:
if m._parameters[p] is not None:
gaussian_numbers = m._parameters[p].view(-1).cpu().detach().numpy()
plt.hist(gaussian_numbers, bins=256)
file_name = p + '_layer_' + str(i)
directory = './plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
for b in m._buffers:
if m._buffers[b] is not None:
gaussian_numbers = m._buffers[b].view(-1).cpu().detach().numpy()
plt.hist(gaussian_numbers, bins=256)
file_name = b + '_layer_' + str(i)
directory = './plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
i += 1
def migrate_models(model, target_model, best_epoch, model_name='marvis_mobilenet_multi_gpu'):
"""
This code snnipet is meant to adapt pre-trained model to a new model containing buffers
"""
module_list = [m for m in list(model.modules()) if isinstance(m, torch.nn.Conv2d) or
isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.BatchNorm2d)]
if args.gpus is not None:
target_model = torch.nn.DataParallel(target_model, args.gpus)
target_module_list = [m for m in list(target_model.modules()) if isinstance(m, torch.nn.Conv2d) or
isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.BatchNorm2d)]
for idx, m in enumerate(module_list):
for p in m._parameters:
if m._parameters[p] is not None:
target_module_list[idx]._parameters[p].data = m._parameters[p].data.clone()
for b in m._buffers: # For batchnorm stats
if m._buffers[b] is not None:
target_module_list[idx]._buffers[b].data = m._buffers[b].data.clone()
save_dir = os.path.join('./trained_models', model_name)
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
save_checkpoint({
'epoch': best_epoch,
'model': args.model,
'config': args.model_config,
'state_dict': target_model.state_dict(),
'best_prec1': best_epoch
}, True, path=save_dir)
def gather_clamp_statistic(model):
act_layer_num = 0
conv_linear_layer_num = 0
# Activation clamp are taken from the model itself
for layer in list(model.modules()):
if isinstance(layer, actquant.ActQuantBuffers):
layer_name = 'Activation_{}_clamp_val'.format(act_layer_num)
if layer.clamp_val.data is not None:
if layer_name not in clamp_stats_dict:
clamp_stats_dict[layer_name] = []
clamp_stats_dict[layer_name].append(layer.clamp_val.data.item())
else:
clamp_stats_dict[layer_name].append(layer.clamp_val.data.item())
act_layer_num += 1
for layer in list(model.modules()):
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Linear):
for p in layer._parameters:
if layer._parameters[p] is not None:
if p == 'layer_basis':
layer_name = 'Conv_Fc_{}_clamp_val'.format(conv_linear_layer_num)
lb = layer._parameters[p]
if lb is not None:
clamp_val = (2 ** (args.bitwidth - 1) - 1) * lb * layer.layer_b
if layer_name not in clamp_stats_dict:
clamp_stats_dict[layer_name] = []
clamp_stats_dict[layer_name].append(clamp_val.item())
else:
clamp_stats_dict[layer_name].append(clamp_val.item())
conv_linear_layer_num += 1
def plot_clamp_statistic(stats_dict, save_path):
# plot histogram
for k, v in stats_dict.items():
epoch = len(stats_dict[k])
plt.plot(list(range(epoch)), v,'.')
file_name = k
directory = os.path.join(save_path, 'clamp_plot_stats')
# directory = 'clamp_plot_stats'
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
def plot_clamp_statistic_from_file(dict_file, act_layers_list, save_path):
plt.figure()
file_name = os.path.join(save_path,'unified_activation_clamp.png')
stats_dict = np.load(dict_file)
dict_keys = list(stats_dict.item().keys())
for layer in act_layers_list:
act_vals = stats_dict.item()[dict_keys[layer]]
epoch = len(act_vals)
plt.plot(list(range(epoch)), act_vals)
plt.xlabel('epoch')
plt.ylabel('Clamp Value')
plt.savefig(file_name)
plt.show()
def plot_cos_loss(stats_dict, save_path):
for k, v in stats_dict.items():
epoch = len(stats_dict[k])
plt.plot(list(range(epoch)), v,'.')
file_name = k
directory = os.path.join(save_path, 'cos_loss')
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
def gather_cos_loss(model):
num_layers = len(model.module.quantize.cosine_sim_loss)
total_cosine_loss=0
layer_num = 0
for layer, cos_loss in model.module.quantize.cosine_sim_loss.items():
total_cosine_loss += cos_loss
layer_string = "cos_loss_layer_{}".format(layer_num)
if layer_string not in cos_loss_dict:
cos_loss_dict[layer_string] = []
cos_loss_dict[layer_string].append(cos_loss)
else:
cos_loss_dict[layer_string].append(cos_loss)
layer_num += 1
if 'total_cosine_loss' not in cos_loss_dict:
cos_loss_dict['total_cosine_loss'] = []
cos_loss_dict['total_cosine_loss'].append(total_cosine_loss/num_layers)
else:
cos_loss_dict['total_cosine_loss'].append(total_cosine_loss/num_layers)
return
def plot_act_quant_error_statistic(model, save_path):
for layer in model.module.modules():
# if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
if isinstance(layer, actquant.ActQuantBuffers):
i = layer.layer_num
plt.hist(layer.quant_error, bins=256)
file_name = 'layer_' + str(i)
directory = os.path.join(save_path, 'act_quant_error_stats')
if not os.path.isdir(directory):
os.mkdir(directory)
file_name = os.path.join(directory, file_name + '.png')
plt.savefig(file_name)
plt.close()
return
def plot_weight_quant_error_statistic(model, save_path):
i = 0
for layer, stats in model.module.quantize.quant_error.items():
if isinstance(layer, torch.nn.Conv2d) or isinstance(layer, torch.nn.Linear):
plt.hist(np.concatenate(stats), bins=256)
file_name = 'layer_' + str(i)
directory = os.path.join(save_path, 'weight_quant_error_stats')
if not os.path.isdir(directory):
os.mkdir(directory)
full_path = os.path.join(directory, file_name + '.png')
plt.savefig(full_path)
plt.close()
i += 1
return
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel"
] | 0.4.0 | yukuzntcva/Denoising-drone-rotors | 0122b020fc959dd3869b3863989fee3520aede73 |
1.2 | """
Copyright 2020 The Microsoft DeepSpeed Team
DeepSpeed library
Create a new wheel via the following command: python setup.py bdist_wheel
The wheel will be located at: dist/*.whl
"""
import os
import torch
from deepspeed import __version__ as ds_version
from setuptools import setup, find_packages
from torch.utils.cpp_extension import CUDAExtension, BuildExtension
cmdclass = {}
ext_modules = []
cmdclass['build_ext'] = BuildExtension
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if not torch.cuda.is_available():
# Fix to allow docker buils, similar to https://github.com/NVIDIA/apex/issues/486
print(
"[WARNING] Torch did not find cuda available, if cross-compling or running with cpu only "
"you can ignore this message. Adding compute capability for Pascal, Volta, and Turing "
"(compute capabilities 6.0, 6.1, 6.2)")
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
# Fix from apex that might be relevant for us as well, related to https://github.com/NVIDIA/apex/issues/456
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
ext_modules.append(
CUDAExtension(name='fused_lamb_cuda',
sources=['csrc/fused_lamb_cuda.cpp',
'csrc/fused_lamb_cuda_kernel.cu'],
extra_compile_args={
'cxx': [
'-O3',
] + version_dependent_macros,
'nvcc': ['-O3',
'--use_fast_math'] + version_dependent_macros
}))
setup(name='deepspeed',
version=ds_version,
description='DeepSpeed library',
author='DeepSpeed Team',
author_email='[email protected]',
url='http://aka.ms/deepspeed',
packages=find_packages(exclude=["docker",
"third_party",
"csrc"]),
scripts=['bin/deepspeed',
'bin/deepspeed.pt',
'bin/ds',
'bin/ds_ssh'],
classifiers=['Programming Language :: Python :: 3.6'],
ext_modules=ext_modules,
cmdclass=cmdclass)
| [
"torch.cuda.is_available",
"torch.__version__.split",
"torch.utils.cpp_extension.CUDAExtension"
] | 1.2 | sj6077/DeepSpeed | c70b472a68bc9ca387b14a1b35814c582d0ec94b |
1.4 | import os
import sys
import importlib
if importlib.util.find_spec('torch_itl') is None:
path_to_lib = os.getcwd()[:-15]
sys.path.append(path_to_lib)
from torch_itl.estimator import IQR
from torch_itl.kernel import Gaussian, LearnableGaussian
from torch_itl.model import DecomposableIdentity
from torch_itl.sampler import LinearSampler
from torch_itl.datasets import import_data_toy_quantile
import torch
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# %%
# Defining a simple toy dataset:
print("Creating the dataset")
x_train, y_train, _ = import_data_toy_quantile(150)
n = x_train.shape[0]
m = 10
plt.figure()
plt.scatter(x_train, y_train, marker='.')
plt.show()
# %%
# Defining an ITL model, first without a learnable kernel
print("Defining the model")
kernel_input = Gaussian(3.5)
kernel_output = Gaussian(9)
model = DecomposableIdentity(kernel_input, kernel_output, 1)
lbda = 0.001
lbda_cross = 0.01
sampler = LinearSampler(0.1, 0.9, 10, 0)
sampler.m = 10
est = IQR(model, lbda, lbda_cross, sampler)
#%%
# Learning the coefficients of the model
print("Fitting the coefficients of the model")
est.fit_alpha_gd(x_train, y_train, n_epochs=40,
lr=0.001, line_search_fn='strong_wolfe')
#%%
# Plotting the loss along learning
plt.figure()
plt.title("Loss evolution with time")
plt.plot(est.losses)
plt.show()
best_loss = est.losses[-1]
# Plotting the model on test points
probs = est.sampler.sample(30)
x_test = torch.linspace(0, 1.4, 100).view(-1, 1)
y_pred = est.model.forward(x_test, probs).detach().numpy()
colors = [cm.viridis(x.item()) for x in torch.linspace(0, 1, 30)]
plt.figure()
plt.title("Conditional Quantiles output by our model")
plt.scatter(x_train, y_train, marker='.')
for i in range(30):
plt.plot(x_test, y_pred[:, i], c=colors[i])
plt.show()
# %%
# Let's learn the input kernel with ITL
# First define a neural net
n_h = 40
d_out = 10
model_kernel_input = torch.nn.Sequential(
torch.nn.Linear(x_train.shape[1], n_h),
torch.nn.ReLU(),
torch.nn.Linear(n_h, n_h),
torch.nn.Linear(n_h, d_out),
)
gamma = 3
optim_params = dict(lr=0.01, momentum=0, dampening=0,
weight_decay=0, nesterov=False)
kernel_input = LearnableGaussian(gamma, model_kernel_input, optim_params)
est.model.kernel_input = kernel_input
# %%
est.fit_kernel_input(x_train, y_train)
# plot the loss along learning the kernel
#%%
plt.figure()
plt.title("Loss evolution when learning the kernel")
plt.plot(est.model.kernel_input.losses)
plt.show()
# %%
# Now retrain the parameters alpha of the model
est.fit_alpha_gd(x_train, y_train, n_epochs=40,
lr=0.01, line_search_fn='strong_wolfe')
# plot the loss
plt.figure()
plt.title("Loss evolution when learning model coefficients again")
plt.plot(est.losses)
plt.show()
y_pred = est.model.forward(x_test, probs).detach().numpy()
colors = [cm.viridis(x.item()) for x in torch.linspace(0, 1, 30)]
plt.figure()
plt.title('Conditional Quantiles with learned kernel')
plt.scatter(x_train, y_train, marker='.')
for i in range(30):
plt.plot(x_test, y_pred[:, i], c=colors[i])
plt.show()
print('Loss gain from learning the kernel: ',
best_loss - est.losses[-1])
| [
"torch.nn.Linear",
"torch.linspace",
"torch.nn.ReLU"
] | 1.4.0 | mathurinm/torch_itl | e3d92d753bd51ccf585029129110c93bbf9b5fd0 |
1.7 | import math
from torch import nn
import torch.nn.functional as F
import torch
import torchvision.models as models
def kp2gaussian(kp, spatial_size, kp_variance):
"""
Transform a keypoint into gaussian like representation
"""
mean = kp['value']
coordinate_grid = make_coordinate_grid(spatial_size, mean.type())
number_of_leading_dimensions = len(mean.shape) - 1
shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape
coordinate_grid = coordinate_grid.view(*shape)
repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)
coordinate_grid = coordinate_grid.repeat(*repeats)
# Preprocess kp shape
shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)
mean = mean.view(*shape)
mean_sub = (coordinate_grid - mean)
out = torch.exp(-0.5 * (mean_sub ** 2).sum(-1) / kp_variance)
return out
def make_coordinate_grid(spatial_size, type):
"""
Create a meshgrid [-1,1] x [-1,1] of given spatial_size.
"""
h, w = spatial_size
x = torch.arange(w).type(type)
y = torch.arange(h).type(type)
x = (2 * (x / (w - 1)) - 1)
y = (2 * (y / (h - 1)) - 1)
yy = y.view(-1, 1).repeat(1, w)
xx = x.view(1, -1).repeat(h, 1)
meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)
return meshed
class ResBlock2d(nn.Module):
"""
Res block, preserve spatial resolution.
"""
def __init__(self, in_features, kernel_size, padding):
super(ResBlock2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=padding)
self.conv2 = nn.Conv2d(in_channels=in_features, out_channels=in_features, kernel_size=kernel_size,
padding=padding)
self.norm1 = nn.BatchNorm2d(in_features, affine=True)
self.norm2 = nn.BatchNorm2d(in_features, affine=True)
def forward(self, x):
out = self.norm1(x)
out = F.relu(out)
out = self.conv1(out)
out = self.norm2(out)
out = F.relu(out)
out = self.conv2(out)
out += x
return out
class UpBlock2d(nn.Module):
"""
Upsampling block for use in decoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(UpBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
out = self.norm(out)
out = F.relu(out)
return out
class GADEUpBlock2d(nn.Module):
"""
Geometrically-Adaptive Denormalization Upsampling block for use in decoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1, z_size=1280):
super(GADEUpBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
self.fc_1 = nn.Linear(z_size, out_features)
self.fc_2 = nn.Linear(z_size, out_features)
self.conv_f = nn.Conv2d(out_features, out_features, kernel_size=3, stride=1, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, z):
out = F.interpolate(x, scale_factor=2)
out = self.conv(out)
out = self.norm(out)
m = self.sigmoid(self.conv_f(out))
r = self.fc_1(z).unsqueeze(-1).unsqueeze(-1).expand_as(out)
beta = self.fc_2(z).unsqueeze(-1).unsqueeze(-1).expand_as(out)
addin_z = r * out + beta
out = m * addin_z + (1 - m) * out
out = F.relu(out)
return out
class DownBlock2d(nn.Module):
"""
Downsampling block for use in encoder.
"""
def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):
super(DownBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features, kernel_size=kernel_size,
padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
self.pool = nn.AvgPool2d(kernel_size=(2, 2))
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out)
out = self.pool(out)
return out
class SameBlock2d(nn.Module):
"""
Simple block, preserve spatial resolution.
"""
def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):
super(SameBlock2d, self).__init__()
self.conv = nn.Conv2d(in_channels=in_features, out_channels=out_features,
kernel_size=kernel_size, padding=padding, groups=groups)
self.norm = nn.BatchNorm2d(out_features, affine=True)
def forward(self, x):
out = self.conv(x)
out = self.norm(out)
out = F.relu(out)
return out
class Encoder(nn.Module):
"""
Hourglass Encoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Encoder, self).__init__()
down_blocks = []
for i in range(num_blocks):
down_blocks.append(DownBlock2d(in_features if i == 0 else min(max_features, block_expansion * (2 ** i)),
min(max_features, block_expansion * (2 ** (i + 1))),
kernel_size=3, padding=1))
self.down_blocks = nn.ModuleList(down_blocks)
def forward(self, x):
outs = [x]
for down_block in self.down_blocks:
outs.append(down_block(outs[-1]))
return outs
class Decoder(nn.Module):
"""
Hourglass Decoder
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Decoder, self).__init__()
up_blocks = []
for i in range(num_blocks)[::-1]:
in_filters = (1 if i == num_blocks - 1 else 2) * min(max_features, block_expansion * (2 ** (i + 1)))
out_filters = min(max_features, block_expansion * (2 ** i))
up_blocks.append(UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1))
self.up_blocks = nn.ModuleList(up_blocks)
self.out_filters = block_expansion + in_features
def forward(self, x):
out = x.pop()
for up_block in self.up_blocks:
out = up_block(out)
skip = x.pop()
out = torch.cat([out, skip], dim=1)
return out
class Hourglass(nn.Module):
"""
Hourglass architecture.
"""
def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):
super(Hourglass, self).__init__()
self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)
self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)
self.out_filters = self.decoder.out_filters
def forward(self, x):
return self.decoder(self.encoder(x))
class AntiAliasInterpolation2d(nn.Module):
"""
Band-limited downsampling, for better preservation of the input signal.
"""
def __init__(self, channels, scale):
super(AntiAliasInterpolation2d, self).__init__()
sigma = (1 / scale - 1) / 2
kernel_size = 2 * round(sigma * 4) + 1
self.ka = kernel_size // 2
self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka
kernel_size = [kernel_size, kernel_size]
sigma = [sigma, sigma]
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= torch.exp(-(mgrid - mean) ** 2 / (2 * std ** 2))
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
self.scale = scale
inv_scale = 1 / scale
self.int_inv_scale = int(inv_scale)
def forward(self, input):
if self.scale == 1.0:
return input
out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))
out = F.conv2d(out, weight=self.weight, groups=self.groups)
out = out[:, :, ::self.int_inv_scale, ::self.int_inv_scale]
return out
class mymobilenetv2(nn.Module):
def __init__(self, num_classes=1000, image_size=256):
super(mymobilenetv2, self).__init__()
self.model = models.mobilenet_v2(pretrained=True)
self.n_layers = len(self.model.features)
self.dropout = nn.Dropout(0.2)
self.fc = nn.Linear(self.model.last_channel, num_classes)
self.fc.weight.data.zero_()
self.fc.bias.data.zero_()
self.model.classifier = nn.Sequential(
self.dropout,
self.fc,
)
self.scale_factor = 224.0 / image_size
if self.scale_factor != 1:
self.down = AntiAliasInterpolation2d(3, self.scale_factor)
self.feature_maps = {}
def forward(self, x):
if self.scale_factor != 1:
x = self.down(x)
feature = self.model.features[0](x)
for i in range(18):
feature = self.model.features[i+1](feature)
feature = nn.functional.adaptive_avg_pool2d(feature, 1).reshape(feature.shape[0], -1)
code = self.model.classifier(feature)
return code, feature
class ContextualAttention(nn.Module):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/model/networks.py
"""
def __init__(self, ksize=3, stride=1, rate=1, fuse_k=3, softmax_scale=10, fuse=True):
super(ContextualAttention, self).__init__()
self.ksize = ksize
self.stride = stride
self.rate = rate
self.fuse_k = fuse_k
self.softmax_scale = softmax_scale # to fit the PyTorch tensor image value range
self.fuse = fuse
if self.fuse:
fuse_weight = torch.eye(fuse_k).view(1, 1, fuse_k, fuse_k) # 1*1*k*k
self.register_buffer('fuse_weight', fuse_weight)
def forward(self, f, b, mask):
""" Contextual attention layer implementation.
Contextual attention is first introduced in publication:
Generative Image Inpainting with Contextual Attention, Yu et al.
Args:
f: Input feature to match (foreground).
b: Input feature for match (background).
mask: Input mask for b, indicating patches not available.
ksize: Kernel size for contextual attention.
stride: Stride for extracting patches from b.
rate: Dilation for matching.
softmax_scale: Scaled softmax for attention.
Returns:
torch.tensor: output
"""
# get shapes
raw_int_fs = list(f.size())
raw_int_bs = list(b.size())
# extract patches from background with stride and rate
kernel = 2 * self.rate
# raw_w is extracted for reconstruction
raw_w = extract_image_patches(b, ksizes=[kernel, kernel],
strides=[self.rate*self.stride,
self.rate*self.stride],
rates=[1, 1],
padding='same')
raw_w = raw_w.view(raw_int_bs[0], raw_int_bs[1], kernel, kernel, -1)
raw_w = raw_w.permute(0, 4, 1, 2, 3)
raw_w_groups = torch.split(raw_w, 1, dim=0)
# downscaling foreground option: downscaling both foreground and
# background for matching and use original background for reconstruction.
f = F.interpolate(f, scale_factor=1./self.rate, mode='nearest')
b = F.interpolate(b, scale_factor=1./self.rate, mode='nearest')
int_fs = list(f.size())
int_bs = list(b.size())
f_groups = torch.split(f, 1, dim=0) # split tensors along the batch dimension
w = extract_image_patches(b, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
w = w.view(int_bs[0], int_bs[1], self.ksize, self.ksize, -1)
w = w.permute(0, 4, 1, 2, 3)
w_groups = torch.split(w, 1, dim=0)
mask = F.interpolate(mask, scale_factor=1./(self.rate), mode='nearest')
int_ms = list(mask.size())
m = extract_image_patches(mask, ksizes=[self.ksize, self.ksize],
strides=[self.stride, self.stride],
rates=[1, 1],
padding='same')
m = m.view(int_ms[0], int_ms[1], self.ksize, self.ksize, -1)
m = m.permute(0, 4, 1, 2, 3)
mm = reduce_mean(m, axis=[3, 4]).unsqueeze(-1)
y = []
for i, (xi, wi, raw_wi) in enumerate(zip(f_groups, w_groups, raw_w_groups)):
'''
O => output channel as a conv filter
I => input channel as a conv filter
xi : separated tensor along batch dimension of front;
wi : separated patch tensor along batch dimension of back;
raw_wi : separated tensor along batch dimension of back;
'''
# conv for compare
wi = wi[0]
max_wi = torch.sqrt(reduce_sum(torch.pow(wi, 2) + 1e-4, axis=[1, 2, 3], keepdim=True))
wi_normed = wi / max_wi
xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1])
yi = F.conv2d(xi, wi_normed, stride=1)
if self.fuse:
# make all of depth to spatial resolution
yi = yi.view(1, 1, int_bs[2]*int_bs[3], int_fs[2]*int_fs[3])
yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])
yi = F.conv2d(yi, self.fuse_weight, stride=1)
yi = yi.contiguous().view(1, int_bs[2], int_bs[3], int_fs[2], int_fs[3])
yi = yi.permute(0, 2, 1, 4, 3)
yi = yi.contiguous().view(1, 1, int_bs[2]*int_bs[3], int_fs[2]*int_fs[3])
yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])
yi = F.conv2d(yi, self.fuse_weight, stride=1)
yi = yi.contiguous().view(1, int_bs[3], int_bs[2], int_fs[3], int_fs[2])
yi = yi.permute(0, 2, 1, 4, 3).contiguous()
yi = yi.view(1, int_bs[2] * int_bs[3], int_fs[2], int_fs[3])
# softmax to match
yi = yi * mm[i:i+1]
yi = F.softmax(yi*self.softmax_scale, dim=1)
yi = yi * mm[i:i+1]
# deconv for patch pasting
wi_center = raw_wi[0]
yi = F.conv_transpose2d(yi, wi_center, stride=self.rate, padding=1) / 4.0
y.append(yi)
y = torch.cat(y, dim=0)
y.contiguous().view(raw_int_fs)
return y
def extract_image_patches(images, ksizes, strides, rates, padding='same'):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
Extract patches from images and put them in the C output dimension.
:param padding:
:param images: [batch, channels, in_rows, in_cols]. A 4-D Tensor with shape
:param ksizes: [ksize_rows, ksize_cols]. The size of the sliding window for
each dimension of images
:param strides: [stride_rows, stride_cols]
:param rates: [dilation_rows, dilation_cols]
:return: A Tensor
"""
assert len(images.size()) == 4
assert padding in ['same', 'valid']
batch_size, channel, height, width = images.size()
if padding == 'same':
images = same_padding(images, ksizes, strides, rates)
elif padding == 'valid':
pass
else:
raise NotImplementedError('Unsupported padding type: {}.\
Only "same" or "valid" are supported.'.format(padding))
unfold = torch.nn.Unfold(kernel_size=ksizes,
dilation=rates,
padding=0,
stride=strides)
patches = unfold(images)
return patches
def same_padding(images, ksizes, strides, rates):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
assert len(images.size()) == 4
batch_size, channel, rows, cols = images.size()
out_rows = (rows + strides[0] - 1) // strides[0]
out_cols = (cols + strides[1] - 1) // strides[1]
effective_k_row = (ksizes[0] - 1) * rates[0] + 1
effective_k_col = (ksizes[1] - 1) * rates[1] + 1
padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows)
padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols)
# Pad the input
padding_top = int(padding_rows / 2.)
padding_left = int(padding_cols / 2.)
padding_bottom = padding_rows - padding_top
padding_right = padding_cols - padding_left
paddings = (padding_left, padding_right, padding_top, padding_bottom)
images = torch.nn.ZeroPad2d(paddings)(images)
return images
def reduce_mean(x, axis=None, keepdim=False):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.mean(x, dim=i, keepdim=keepdim)
return x
def reduce_sum(x, axis=None, keepdim=False):
"""
Borrowed from https://github.com/daa233/generative-inpainting-pytorch/blob/master/utils/tools.py
"""
if not axis:
axis = range(len(x.shape))
for i in sorted(axis, reverse=True):
x = torch.sum(x, dim=i, keepdim=keepdim)
return x
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Unfold",
"torch.nn.ModuleList",
"torch.nn.BatchNorm2d",
"torch.eye",
"torch.nn.functional.pad",
"torch.exp",
"torch.sum",
"torch.nn.AvgPool2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.relu",
"torch.nn.functional.conv2d",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.pow",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.Sigmoid",
"torch.nn.functional.conv_transpose2d",
"torch.nn.functional.interpolate",
"torch.split",
"torch.nn.ZeroPad2d",
"torch.mean"
] | 1.7.1 | shovelingpig/SAFA | 35cd638ab299e58ba303bf64874287abdbcf9fd6 |
1.9 | # this is derived from ClipDraw code
# CLIPDraw: Exploring Text-to-Drawing Synthesis through Language-Image Encoders
# Kevin Frans, L.B. Soros, Olaf Witkowski
# https://arxiv.org/abs/2106.14843
from DrawingInterface import DrawingInterface
import pydiffvg
import torch
import skimage
import skimage.io
import random
import ttools.modules
import argparse
import math
import torchvision
import torchvision.transforms as transforms
import numpy as np
import PIL.Image
from util import str2bool
def bound(value, low, high):
return max(low, min(high, value))
class LineDrawer(DrawingInterface):
@staticmethod
def add_settings(parser):
parser.add_argument("--strokes", type=int, help="number strokes", default=24, dest='strokes')
parser.add_argument("--stroke_length", type=int, help="stroke length", default=8, dest='stroke_length')
parser.add_argument("--min_stroke_width", type=float, help="min width (percent of height)", default=0.5, dest='min_stroke_width')
parser.add_argument("--max_stroke_width", type=float, help="max width (percent of height)", default=2, dest='max_stroke_width')
parser.add_argument("--allow_paper_color", type=str2bool, help="allow paper color to change", default=False, dest='allow_paper_color')
return parser
def __init__(self, settings):
super(DrawingInterface, self).__init__()
self.canvas_width = settings.size[0]
self.canvas_height = settings.size[1]
self.num_paths = settings.strokes
self.stroke_length = settings.stroke_length
def load_model(self, settings, device):
# Use GPU if available
pydiffvg.set_use_gpu(torch.cuda.is_available())
device = torch.device('cuda')
pydiffvg.set_device(device)
canvas_width, canvas_height = self.canvas_width, self.canvas_height
num_paths = self.num_paths
max_width = settings.max_stroke_width * canvas_height / 100
min_width = settings.min_stroke_width * canvas_height / 100
shapes = []
shape_groups = []
color_vars = []
# background shape
p0 = [0, 0]
p1 = [canvas_width, canvas_height]
path = pydiffvg.Rect(p_min=torch.tensor(p0), p_max=torch.tensor(p1))
shapes.append(path)
# https://encycolorpedia.com/f2eecb
cell_color = torch.tensor([242/255.0, 238/255.0, 203/255.0, 1.0])
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes)-1]), stroke_color = None, fill_color = cell_color)
shape_groups.append(path_group)
if settings.allow_paper_color:
path_group.fill_color.requires_grad = True
color_vars.append(path_group.fill_color)
# Initialize Random Curves
for i in range(num_paths):
num_segments = self.stroke_length
num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2
points = []
radius = 0.5
radius_x = 0.5 #radius * canvas_height / canvas_width
p0 = (0.5 + radius_x * (random.random() - 0.5), 0.5 + radius * (random.random() - 0.5))
points.append(p0)
for j in range(num_segments):
radius = 1.0 / (num_segments + 2)
radius_x = radius * canvas_height / canvas_width
p1 = (p0[0] + radius_x * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5))
p2 = (p1[0] + radius_x * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5))
p3 = (p2[0] + radius_x * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5))
points.append(p1)
points.append(p2)
points.append(p3)
p0 = (bound(p3[0],0,1), bound(p3[1],0,1))
points = torch.tensor(points)
points[:, 0] *= canvas_width
points[:, 1] *= canvas_height
path = pydiffvg.Path(num_control_points = num_control_points, points = points, stroke_width = torch.tensor(max_width/10), is_closed = False)
shapes.append(path)
s_col = [0, 0, 0, 1]
path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes)-1]), fill_color = None, stroke_color = torch.tensor(s_col))
shape_groups.append(path_group)
# Just some diffvg setup
scene_args = pydiffvg.RenderFunction.serialize_scene(\
canvas_width, canvas_height, shapes, shape_groups)
render = pydiffvg.RenderFunction.apply
img = render(canvas_width, canvas_height, 2, 2, 0, None, *scene_args)
points_vars = []
stroke_width_vars = []
for path in shapes[1:]:
path.points.requires_grad = True
points_vars.append(path.points)
path.stroke_width.requires_grad = True
stroke_width_vars.append(path.stroke_width)
# for group in shape_groups:
# group.stroke_color.requires_grad = True
# color_vars.append(group.stroke_color)
self.points_vars = points_vars
self.stroke_width_vars = stroke_width_vars
self.color_vars = color_vars
self.img = img
self.shapes = shapes
self.shape_groups = shape_groups
self.max_width = max_width
self.canvas_width = canvas_width
self.canvas_height = canvas_height
def get_opts(self, decay_divisor):
# Optimizers
points_optim = torch.optim.Adam(self.points_vars, lr=1.0/decay_divisor)
width_optim = torch.optim.Adam(self.stroke_width_vars, lr=0.1/decay_divisor)
opts = [points_optim, width_optim]
if len(self.color_vars) > 0:
color_optim = torch.optim.Adam(self.color_vars, lr=0.01/decay_divisor)
opts.append(color_optim)
return opts
def rand_init(self, toksX, toksY):
# TODO
pass
def init_from_tensor(self, init_tensor):
# TODO
pass
def reapply_from_tensor(self, new_tensor):
# TODO
pass
def get_z_from_tensor(self, ref_tensor):
return None
def get_num_resolutions(self):
return None
def synth(self, cur_iteration):
render = pydiffvg.RenderFunction.apply
scene_args = pydiffvg.RenderFunction.serialize_scene(\
self.canvas_width, self.canvas_height, self.shapes, self.shape_groups)
img = render(self.canvas_width, self.canvas_height, 2, 2, cur_iteration, None, *scene_args)
img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4])
img = img[:, :, :3]
img = img.unsqueeze(0)
img = img.permute(0, 3, 1, 2) # NHWC -> NCHW
self.img = img
return img
@torch.no_grad()
def to_image(self):
img = self.img.detach().cpu().numpy()[0]
img = np.transpose(img, (1, 2, 0))
img = np.clip(img, 0, 1)
img = np.uint8(img * 254)
# img = np.repeat(img, 4, axis=0)
# img = np.repeat(img, 4, axis=1)
pimg = PIL.Image.fromarray(img, mode="RGB")
return pimg
def clip_z(self):
with torch.no_grad():
for path in self.shapes[1:]:
path.stroke_width.data.clamp_(1.0, self.max_width)
for group in self.shape_groups[1:]:
group.stroke_color.data.clamp_(0.0, 1.0)
def get_z(self):
return None
def get_z_copy(self):
return None
def set_z(self, new_z):
return None
@torch.no_grad()
def to_svg(self):
pydiffvg.save_svg("./lineout.svg", self.canvas_width, self.canvas_height, self.shapes, self.shape_groups)
| [
"torch.zeros",
"torch.device",
"torch.no_grad",
"torch.optim.Adam",
"torch.cuda.is_available",
"torch.tensor"
] | 1.9.0 | q1qgames/pixray | 8bd73869af7979068aa7ff8402f5b3ab2b791255 |
1.1 | # Copyright 2019 Shigeki Karita
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Transformer speech recognition model (pytorch)."""
from argparse import Namespace
from distutils.util import strtobool
from itertools import groupby
import logging
import math
import numpy
import torch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.ctc_prefix_score import CTCPrefixScore
from espnet.nets.e2e_asr_common import end_detect
from espnet.nets.e2e_asr_common import ErrorCalculator
from espnet.nets.pytorch_backend.ctc import CTC
from espnet.nets.pytorch_backend.e2e_asr import CTC_LOSS_THRESHOLD
from espnet.nets.pytorch_backend.e2e_asr import Reporter
from espnet.nets.pytorch_backend.nets_utils import get_subsample
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet.nets.pytorch_backend.nets_utils import th_accuracy
from espnet.nets.pytorch_backend.rnn.decoders import CTC_SCORING_RATIO
from espnet.nets.pytorch_backend.transformer.add_sos_eos import add_sos_eos
from espnet.nets.pytorch_backend.transformer.add_sos_eos import mask_uniform
from espnet.nets.pytorch_backend.transformer.attention import (
MultiHeadedAttention, # noqa: H301
RelPositionMultiHeadedAttention, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.decoder_self_mix import Decoder
from espnet.nets.pytorch_backend.transformer.dynamic_conv import DynamicConvolution
from espnet.nets.pytorch_backend.transformer.dynamic_conv2d import DynamicConvolution2D
from espnet.nets.pytorch_backend.transformer.encoder import Encoder
from espnet.nets.pytorch_backend.transformer.initializer import initialize
from espnet.nets.pytorch_backend.transformer.label_smoothing_loss import (
LabelSmoothingLoss, # noqa: H301
)
from espnet.nets.pytorch_backend.transformer.mask import subsequent_mask
from espnet.nets.pytorch_backend.transformer.mask import target_mask
from espnet.nets.pytorch_backend.transformer.plot import PlotAttentionReport
from espnet.nets.scorers.ctc import CTCPrefixScorer
from espnet.utils.fill_missing_args import fill_missing_args
class E2E(ASRInterface, torch.nn.Module):
"""E2E module.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
@staticmethod
def add_arguments(parser):
"""Add arguments."""
group = parser.add_argument_group("transformer model setting")
group.add_argument(
"--transformer-init",
type=str,
default="pytorch",
choices=[
"pytorch",
"xavier_uniform",
"xavier_normal",
"kaiming_uniform",
"kaiming_normal",
],
help="how to initialize transformer parameters",
)
group.add_argument(
"--transformer-input-layer",
type=str,
default="conv2d",
choices=["conv2d", "linear", "embed"],
help="transformer input layer type",
)
group.add_argument(
"--transformer-attn-dropout-rate",
default=None,
type=float,
help="dropout in transformer attention. use --dropout-rate if None is set",
)
group.add_argument(
"--transformer-lr",
default=10.0,
type=float,
help="Initial value of learning rate",
)
group.add_argument(
"--transformer-warmup-steps",
default=25000,
type=int,
help="optimizer warmup steps",
)
group.add_argument(
"--transformer-length-normalized-loss",
default=True,
type=strtobool,
help="normalize loss by length",
)
group.add_argument(
"--transformer-encoder-selfattn-layer-type",
type=str,
default="selfattn",
choices=[
"selfattn",
"rel_selfattn",
"lightconv",
"lightconv2d",
"dynamicconv",
"dynamicconv2d",
"light-dynamicconv2d",
],
help="transformer encoder self-attention layer type",
)
group.add_argument(
"--transformer-decoder-selfattn-layer-type",
type=str,
default="selfattn",
choices=[
"selfattn",
"lightconv",
"lightconv2d",
"dynamicconv",
"dynamicconv2d",
"light-dynamicconv2d",
],
help="transformer decoder self-attention layer type",
)
# Lightweight/Dynamic convolution related parameters.
# See https://arxiv.org/abs/1912.11793v2
# and https://arxiv.org/abs/1901.10430 for detail of the method.
# Configurations used in the first paper are in
# egs/{csj, librispeech}/asr1/conf/tuning/ld_conv/
group.add_argument(
"--wshare",
default=4,
type=int,
help="Number of parameter shargin for lightweight convolution",
)
group.add_argument(
"--ldconv-encoder-kernel-length",
default="21_23_25_27_29_31_33_35_37_39_41_43",
type=str,
help="kernel size for lightweight/dynamic convolution: "
'Encoder side. For example, "21_23_25" means kernel length 21 for '
"First layer, 23 for Second layer and so on.",
)
group.add_argument(
"--ldconv-decoder-kernel-length",
default="11_13_15_17_19_21",
type=str,
help="kernel size for lightweight/dynamic convolution: "
'Decoder side. For example, "21_23_25" means kernel length 21 for '
"First layer, 23 for Second layer and so on.",
)
group.add_argument(
"--ldconv-usebias",
type=strtobool,
default=False,
help="use bias term in lightweight/dynamic convolution",
)
group.add_argument(
"--dropout-rate",
default=0.0,
type=float,
help="Dropout rate for the encoder",
)
# Encoder
group.add_argument(
"--elayers",
default=4,
type=int,
help="Number of encoder layers (for shared recognition part "
"in multi-speaker asr mode)",
)
group.add_argument(
"--eunits",
"-u",
default=300,
type=int,
help="Number of encoder hidden units",
)
# Attention
group.add_argument(
"--adim",
default=320,
type=int,
help="Number of attention transformation dimensions",
)
group.add_argument(
"--aheads",
default=4,
type=int,
help="Number of heads for multi head attention",
)
# Decoder
group.add_argument(
"--dlayers", default=1, type=int, help="Number of decoder layers"
)
group.add_argument(
"--dunits", default=320, type=int, help="Number of decoder hidden units"
)
# Non-autoregressive training
group.add_argument(
"--decoder-mode",
default="AR",
type=str,
choices=["ar", "maskctc"],
help="AR: standard autoregressive training, "
"maskctc: non-autoregressive training based on Mask CTC",
)
return parser
@property
def attention_plot_class(self):
"""Return PlotAttentionReport."""
return PlotAttentionReport
def __init__(self, idim, odim, args, ignore_id=-1):
"""Construct an E2E object.
:param int idim: dimension of inputs
:param int odim: dimension of outputs
:param Namespace args: argument Namespace containing options
"""
torch.nn.Module.__init__(self)
# fill missing arguments for compatibility
args = fill_missing_args(args, self.add_arguments)
if args.transformer_attn_dropout_rate is None:
args.transformer_attn_dropout_rate = args.dropout_rate
self.encoder = Encoder(
idim=idim,
selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_encoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.eunits,
num_blocks=args.elayers,
input_layer=args.transformer_input_layer,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
attention_dropout_rate=args.transformer_attn_dropout_rate,
)
if args.mtlalpha < 1:
self.decoder = Decoder(
odim=odim,
selfattention_layer_type=args.transformer_decoder_selfattn_layer_type,
attention_dim=args.adim,
attention_heads=args.aheads,
conv_wshare=args.wshare,
conv_kernel_length=args.ldconv_decoder_kernel_length,
conv_usebias=args.ldconv_usebias,
linear_units=args.dunits,
num_blocks=args.dlayers,
dropout_rate=args.dropout_rate,
positional_dropout_rate=args.dropout_rate,
self_attention_dropout_rate=args.transformer_attn_dropout_rate,
src_attention_dropout_rate=args.transformer_attn_dropout_rate,
)
self.criterion = LabelSmoothingLoss(
odim,
ignore_id,
args.lsm_weight,
args.transformer_length_normalized_loss,
)
else:
self.decoder = None
self.criterion = None
self.blank = 0
self.decoder_mode = args.decoder_mode
if self.decoder_mode == "maskctc":
self.mask_token = odim - 1
self.sos = odim - 2
self.eos = odim - 2
else:
self.sos = odim - 1
self.eos = odim - 1
self.odim = odim
self.ignore_id = ignore_id
self.subsample = get_subsample(args, mode="asr", arch="transformer")
self.reporter = Reporter()
self.reset_parameters(args)
self.adim = args.adim # used for CTC (equal to d_model)
self.mtlalpha = args.mtlalpha
if args.mtlalpha > 0.0:
self.ctc = CTC(
odim, args.adim, args.dropout_rate, ctc_type=args.ctc_type, reduce=True
)
else:
self.ctc = None
if args.report_cer or args.report_wer:
self.error_calculator = ErrorCalculator(
args.char_list,
args.sym_space,
args.sym_blank,
args.report_cer,
args.report_wer,
)
else:
self.error_calculator = None
self.rnnlm = None
def reset_parameters(self, args):
"""Initialize parameters."""
# initialize parameters
initialize(self, args.transformer_init)
def forward(self, xs_pad, ilens, ys_pad):
"""E2E forward.
:param torch.Tensor xs_pad: batch of padded source sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of source sequences (B)
:param torch.Tensor ys_pad: batch of padded target sequences (B, Lmax)
:return: ctc loss value
:rtype: torch.Tensor
:return: attention loss value
:rtype: torch.Tensor
:return: accuracy in attention decoder
:rtype: float
"""
# 1. forward encoder
xs_pad = xs_pad[:, : max(ilens)] # for data parallel
src_mask = make_non_pad_mask(ilens.tolist()).to(xs_pad.device).unsqueeze(-2)
hs_pad, hs_mask = self.encoder(xs_pad, src_mask)
# print(hs_pad.shape,hs_mask.shape)
self.hs_pad = hs_pad
# 2. forward decoder
if self.decoder is not None:
if self.decoder_mode == "maskctc":
ys_in_pad, ys_out_pad = mask_uniform(
ys_pad, self.mask_token, self.eos, self.ignore_id
)
ys_mask = (ys_in_pad != self.ignore_id).unsqueeze(-2)
else:
ys_in_pad, ys_out_pad = add_sos_eos(
ys_pad, self.sos, self.eos, self.ignore_id
)
ys_mask = target_mask(ys_in_pad, self.ignore_id)
pred_pad, pred_mask, memory_pad ,memory_mask = self.decoder(ys_in_pad, ys_mask, hs_pad, hs_mask)
self.pred_pad = pred_pad
# 3. compute attention loss
loss_att = self.criterion(pred_pad, ys_out_pad)
self.acc = th_accuracy(
pred_pad.view(-1, self.odim), ys_out_pad, ignore_label=self.ignore_id
)
else:
loss_att = None
self.acc = None
# TODO(karita) show predicted text
# TODO(karita) calculate these stats
cer_ctc = None
if self.mtlalpha == 0.0:
loss_ctc = None
else:
batch_size = xs_pad.size(0)
# hs_len = hs_mask.view(batch_size, -1).sum(1)#ctc1
# loss_ctc = self.ctc(hs_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)#ctc1
hs_len = memory_mask.view(batch_size, -1).sum(1)#ctc2
loss_ctc = self.ctc(memory_pad.view(batch_size, -1, self.adim), hs_len, ys_pad)#ctc2
if not self.training and self.error_calculator is not None:
ys_hat = self.ctc.argmax(hs_pad.view(batch_size, -1, self.adim)).data
cer_ctc = self.error_calculator(ys_hat.cpu(), ys_pad.cpu(), is_ctc=True)
# for visualization
if not self.training:
self.ctc.softmax(hs_pad)
# 5. compute cer/wer
if self.training or self.error_calculator is None or self.decoder is None:
cer, wer = None, None
else:
ys_hat = pred_pad.argmax(dim=-1)
cer, wer = self.error_calculator(ys_hat.cpu(), ys_pad.cpu())
# copied from e2e_asr
alpha = self.mtlalpha
if alpha == 0:
self.loss = loss_att
loss_att_data = float(loss_att)
loss_ctc_data = None
elif alpha == 1:
self.loss = loss_ctc
loss_att_data = None
loss_ctc_data = float(loss_ctc)
else:
self.loss = alpha * loss_ctc + (1 - alpha) * loss_att
loss_att_data = float(loss_att)
loss_ctc_data = float(loss_ctc)
loss_data = float(self.loss)
if loss_data < CTC_LOSS_THRESHOLD and not math.isnan(loss_data):
self.reporter.report(
loss_ctc_data, loss_att_data, self.acc, cer_ctc, cer, wer, loss_data
)
else:
logging.warning("loss (=%f) is not correct", loss_data)
return self.loss
def scorers(self):
"""Scorers."""
return dict(decoder=self.decoder, ctc=CTCPrefixScorer(self.ctc, self.eos))
def encode(self, x):
"""Encode acoustic features.
:param ndarray x: source acoustic feature (T, D)
:return: encoder outputs
:rtype: torch.Tensor
"""
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, _ = self.encoder(x, None)
return enc_output.squeeze(0)
def recognize(self, x, recog_args, char_list=None, rnnlm=None, use_jit=False):
"""Recognize input speech.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
# enc_output = self.encode(x).unsqueeze(0)#ctc1
if self.mtlalpha == 1.0:
recog_args.ctc_weight = 1.0
logging.info("Set to pure CTC decoding mode.")
#ctc2
self.eval()
x = torch.as_tensor(x).unsqueeze(0)
enc_output, enc_output_mask = self.encoder(x, None)
_, _, decoder_output ,_ = self.decoder(torch.tensor([[self.sos]]), torch.tensor([[[True]]]), enc_output, enc_output_mask)
#ctc2
if self.mtlalpha > 0 and recog_args.ctc_weight == 1.0:
from itertools import groupby
# lpz = self.ctc.argmax(enc_output)#ctc1
lpz = self.ctc.argmax(decoder_output)
collapsed_indices = [x[0] for x in groupby(lpz[0])]
hyp = [x for x in filter(lambda x: x != self.blank, collapsed_indices)]
nbest_hyps = [{"score": 0.0, "yseq": [self.sos] + hyp}]
if recog_args.beam_size > 1:
raise NotImplementedError("Pure CTC beam search is not implemented.")
# TODO(hirofumi0810): Implement beam search
return nbest_hyps
elif self.mtlalpha > 0 and recog_args.ctc_weight > 0.0:
# lpz = self.ctc.log_softmax(enc_output)
lpz = self.ctc.log_softmax(decoder_output)
lpz = lpz.squeeze(0)
else:
lpz = None
h = enc_output.squeeze(0)
logging.info("input lengths: " + str(h.size(0)))
# search parms
beam = recog_args.beam_size
penalty = recog_args.penalty
ctc_weight = recog_args.ctc_weight
# preprare sos
y = self.sos
vy = h.new_zeros(1).long()
if recog_args.maxlenratio == 0:
maxlen = h.shape[0]
else:
# maxlen >= 1
maxlen = max(1, int(recog_args.maxlenratio * h.size(0)))
minlen = int(recog_args.minlenratio * h.size(0))
logging.info("max output length: " + str(maxlen))
logging.info("min output length: " + str(minlen))
# initialize hypothesis
if rnnlm:
hyp = {"score": 0.0, "yseq": [y], "rnnlm_prev": None}
else:
hyp = {"score": 0.0, "yseq": [y]}
if lpz is not None:
ctc_prefix_score = CTCPrefixScore(lpz.detach().numpy(), 0, self.eos, numpy)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
if ctc_weight != 1.0:
# pre-pruning based on attention scores
ctc_beam = min(lpz.shape[-1], int(beam * CTC_SCORING_RATIO))
else:
ctc_beam = lpz.shape[-1]
hyps = [hyp]
ended_hyps = []
import six
traced_decoder = None
for i in six.moves.range(maxlen):
logging.debug("position " + str(i))
hyps_best_kept = []
for hyp in hyps:
vy[0] = hyp["yseq"][i]
# get nbest local scores and their ids
ys_mask = subsequent_mask(i + 1).unsqueeze(0)
ys = torch.tensor(hyp["yseq"]).unsqueeze(0)
# FIXME: jit does not match non-jit result
if use_jit:
if traced_decoder is None:
traced_decoder = torch.jit.trace(
self.decoder.forward_one_step, (ys, ys_mask, enc_output)
)
local_att_scores = traced_decoder(ys, ys_mask, enc_output)[0]
else:
local_att_scores = self.decoder.forward_one_step(
ys, ys_mask, enc_output
)[0]
if rnnlm:
rnnlm_state, local_lm_scores = rnnlm.predict(hyp["rnnlm_prev"], vy)
local_scores = (
local_att_scores + recog_args.lm_weight * local_lm_scores
)
else:
local_scores = local_att_scores
if lpz is not None:
local_best_scores, local_best_ids = torch.topk(
local_att_scores, ctc_beam, dim=1
)
ctc_scores, ctc_states = ctc_prefix_score(
hyp["yseq"], local_best_ids[0], hyp["ctc_state_prev"]
)
local_scores = (1.0 - ctc_weight) * local_att_scores[
:, local_best_ids[0]
] + ctc_weight * torch.from_numpy(
ctc_scores - hyp["ctc_score_prev"]
)
if rnnlm:
local_scores += (
recog_args.lm_weight * local_lm_scores[:, local_best_ids[0]]
)
local_best_scores, joint_best_ids = torch.topk(
local_scores, beam, dim=1
)
local_best_ids = local_best_ids[:, joint_best_ids[0]]
else:
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1
)
for j in six.moves.range(beam):
new_hyp = {}
new_hyp["score"] = hyp["score"] + float(local_best_scores[0, j])
new_hyp["yseq"] = [0] * (1 + len(hyp["yseq"]))
new_hyp["yseq"][: len(hyp["yseq"])] = hyp["yseq"]
new_hyp["yseq"][len(hyp["yseq"])] = int(local_best_ids[0, j])
if rnnlm:
new_hyp["rnnlm_prev"] = rnnlm_state
if lpz is not None:
new_hyp["ctc_state_prev"] = ctc_states[joint_best_ids[0, j]]
new_hyp["ctc_score_prev"] = ctc_scores[joint_best_ids[0, j]]
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(
hyps_best_kept, key=lambda x: x["score"], reverse=True
)[:beam]
# sort and get nbest
hyps = hyps_best_kept
logging.debug("number of pruned hypothes: " + str(len(hyps)))
if char_list is not None:
logging.debug(
"best hypo: "
+ "".join([char_list[int(x)] for x in hyps[0]["yseq"][1:]])
)
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
logging.info("adding <eos> in the last postion in the loop")
for hyp in hyps:
hyp["yseq"].append(self.eos)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp["yseq"][-1] == self.eos:
# only store the sequence that has more than minlen outputs
# also add penalty
if len(hyp["yseq"]) > minlen:
hyp["score"] += (i + 1) * penalty
if rnnlm: # Word LM needs to add final <eos> score
hyp["score"] += recog_args.lm_weight * rnnlm.final(
hyp["rnnlm_prev"]
)
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
# end detection
if end_detect(ended_hyps, i) and recog_args.maxlenratio == 0.0:
logging.info("end detected at %d", i)
break
hyps = remained_hyps
if len(hyps) > 0:
logging.debug("remeined hypothes: " + str(len(hyps)))
else:
logging.info("no hypothesis. Finish decoding.")
break
if char_list is not None:
for hyp in hyps:
logging.debug(
"hypo: " + "".join([char_list[int(x)] for x in hyp["yseq"][1:]])
)
logging.debug("number of ended hypothes: " + str(len(ended_hyps)))
nbest_hyps = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[
: min(len(ended_hyps), recog_args.nbest)
]
# check number of hypotheis
if len(nbest_hyps) == 0:
logging.warning(
"there is no N-best results, perform recognition "
"again with smaller minlenratio."
)
# should copy becasuse Namespace will be overwritten globally
recog_args = Namespace(**vars(recog_args))
recog_args.minlenratio = max(0.0, recog_args.minlenratio - 0.1)
return self.recognize(x, recog_args, char_list, rnnlm)
logging.info("total log probability: " + str(nbest_hyps[0]["score"]))
logging.info(
"normalized log probability: "
+ str(nbest_hyps[0]["score"] / len(nbest_hyps[0]["yseq"]))
)
return nbest_hyps
def recognize_maskctc(self, x, recog_args, char_list=None):
"""Non-autoregressive decoding using Mask CTC.
:param ndnarray x: input acoustic feature (B, T, D) or (T, D)
:param Namespace recog_args: argment Namespace contraining options
:param list char_list: list of characters
:return: decoding result
:rtype: list
"""
self.eval()
h = self.encode(x).unsqueeze(0)
ctc_probs, ctc_ids = torch.exp(self.ctc.log_softmax(h)).max(dim=-1)
y_hat = torch.stack([x[0] for x in groupby(ctc_ids[0])])
y_idx = torch.nonzero(y_hat != 0).squeeze(-1)
probs_hat = []
cnt = 0
for i, y in enumerate(y_hat.tolist()):
probs_hat.append(-1)
while cnt < ctc_ids.shape[1] and y == ctc_ids[0][cnt]:
if probs_hat[i] < ctc_probs[0][cnt]:
probs_hat[i] = ctc_probs[0][cnt].item()
cnt += 1
probs_hat = torch.from_numpy(numpy.array(probs_hat))
char_mask = "_"
p_thres = recog_args.maskctc_probability_threshold
mask_idx = torch.nonzero(probs_hat[y_idx] < p_thres).squeeze(-1)
confident_idx = torch.nonzero(probs_hat[y_idx] >= p_thres).squeeze(-1)
mask_num = len(mask_idx)
y_in = torch.zeros(1, len(y_idx) + 1, dtype=torch.long) + self.mask_token
y_in[0][confident_idx] = y_hat[y_idx][confident_idx]
y_in[0][-1] = self.eos
logging.info(
"ctc:{}".format(
"".join(
[
char_list[y] if y != self.mask_token else char_mask
for y in y_in[0].tolist()
]
).replace("<space>", " ")
)
)
if not mask_num == 0:
K = recog_args.maskctc_n_iterations
num_iter = K if mask_num >= K and K > 0 else mask_num
for t in range(1, num_iter):
pred, _, _, _ = self.decoder(#ctc2修改 原本:pred, _ = self.decoder(
y_in, (y_in != self.ignore_id).unsqueeze(-2), h, None
)
pred_sc, pred_id = pred[0][mask_idx].max(dim=-1)
cand = torch.topk(pred_sc, mask_num // num_iter, -1)[1]
y_in[0][mask_idx[cand]] = pred_id[cand]
mask_idx = torch.nonzero(y_in[0] == self.mask_token).squeeze(-1)
logging.info(
"msk:{}".format(
"".join(
[
char_list[y] if y != self.mask_token else char_mask
for y in y_in[0].tolist()
]
).replace("<space>", " ")
)
)
pred, pred_mask = self.decoder(
y_in, (y_in != self.ignore_id).unsqueeze(-2), h, None
)
y_in[0][mask_idx] = pred[0][mask_idx].argmax(dim=-1)
logging.info(
"msk:{}".format(
"".join(
[
char_list[y] if y != self.mask_token else char_mask
for y in y_in[0].tolist()
]
).replace("<space>", " ")
)
)
ret = y_in.tolist()[0][:-1]
hyp = {"score": 0.0, "yseq": [self.sos] + ret + [self.eos]}
return [hyp]
def calculate_all_attentions(self, xs_pad, ilens, ys_pad):
"""E2E attention calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax, idim)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: attention weights (B, H, Lmax, Tmax)
:rtype: float ndarray
"""
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad)
ret = dict()
for name, m in self.named_modules():
if (
isinstance(m, MultiHeadedAttention)
or isinstance(m, DynamicConvolution)
or isinstance(m, RelPositionMultiHeadedAttention)
):
ret[name] = m.attn.cpu().numpy()
if isinstance(m, DynamicConvolution2D):
ret[name + "_time"] = m.attn_t.cpu().numpy()
ret[name + "_freq"] = m.attn_f.cpu().numpy()
self.train()
return ret
def calculate_all_ctc_probs(self, xs_pad, ilens, ys_pad):
"""E2E CTC probability calculation.
:param torch.Tensor xs_pad: batch of padded input sequences (B, Tmax)
:param torch.Tensor ilens: batch of lengths of input sequences (B)
:param torch.Tensor ys_pad: batch of padded token id sequence tensor (B, Lmax)
:return: CTC probability (B, Tmax, vocab)
:rtype: float ndarray
"""
ret = None
if self.mtlalpha == 0:
return ret
self.eval()
with torch.no_grad():
self.forward(xs_pad, ilens, ys_pad)
for name, m in self.named_modules():
if isinstance(m, CTC) and m.probs is not None:
ret = m.probs.cpu().numpy()
self.train()
return ret
| [
"torch.nonzero",
"torch.no_grad",
"torch.nn.Module.__init__",
"torch.from_numpy",
"torch.tensor",
"torch.jit.trace",
"torch.as_tensor",
"torch.topk"
] | 1.1.0 | HongYun0901/ESPnet | 44f78734034991fed4f42359f4d15f15504680bd |
1.0 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Blenderbot checkpoint."""
import argparse
import torch
from ...models.bart import BartConfig, BartForConditionalGeneration
from ...utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
PATTERNS = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def rename_state_dict_key(k):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
k = k.replace(parlai_name, hf_name)
if k.startswith("encoder"):
k = k.replace(".attn", ".self_attn")
k = k.replace("norm1", "self_attn_layer_norm")
k = k.replace("norm2", "final_layer_norm")
elif k.startswith("decoder"):
k = k.replace("norm1", "self_attn_layer_norm")
k = k.replace("norm2", "encoder_attn_layer_norm")
k = k.replace("norm3", "final_layer_norm")
return k
def rename_layernorm_keys(sd):
keys = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
v = sd.pop(k)
new_k = k.replace("layernorm_embedding", "layer_norm")
assert new_k not in sd
sd[new_k] = v
IGNORE_KEYS = ["START"]
@torch.no_grad()
def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
"""
Copy/paste/tweak model's weights to our BERT structure.
"""
model = torch.load(checkpoint_path, map_location="cpu")
sd = model["model"]
cfg = BartConfig.from_json_file(config_json_path)
m = BartForConditionalGeneration(cfg)
valid_keys = m.model.state_dict().keys()
failures = []
mapping = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
new_k = rename_state_dict_key(k)
if new_k not in valid_keys:
failures.append([k, new_k])
else:
mapping[new_k] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(sd)
m.model.load_state_dict(mapping, strict=True)
m.half()
m.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
args = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| [
"torch.no_grad",
"torch.load"
] | 1.0 | sunjiao123sun/transformers | c60e0e1ee45f4bf1017736b146c51729f120bb83 |
1.6 | #!/usr/bin/env python3
#
# Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
# Xiaomi Corporation (authors: Haowen Qiu)
#
# See ../../../LICENSE for clarification regarding multiple authors
# To run this single test, use
#
# ctest --verbose -R index_test_py
import unittest
import k2
import torch
class TestIndex(unittest.TestCase):
def test(self):
s0 = '''
0 1 1 0.1
0 2 2 0.2
1 2 3 0.3
2 3 -1 0.4
3
'''
s1 = '''
0 1 -1 0.5
1
'''
s2 = '''
0 2 1 0.6
0 1 2 0.7
1 3 -1 0.8
2 1 3 0.9
3
'''
fsa0 = k2.Fsa.from_str(s0).requires_grad_(True)
fsa1 = k2.Fsa.from_str(s1).requires_grad_(True)
fsa2 = k2.Fsa.from_str(s2).requires_grad_(True)
fsa_vec = k2.create_fsa_vec([fsa0, fsa1, fsa2])
new_fsa21 = k2.index(fsa_vec, torch.tensor([2, 1], dtype=torch.int32))
assert new_fsa21.shape == (2, None, None)
assert torch.allclose(
new_fsa21.arcs.values()[:, :3],
torch.tensor([
# fsa 2
[0, 2, 1],
[0, 1, 2],
[1, 3, -1],
[2, 1, 3],
# fsa 1
[0, 1, -1]
]).to(torch.int32))
scale = torch.arange(new_fsa21.scores.numel())
(new_fsa21.scores * scale).sum().backward()
assert torch.allclose(fsa0.scores.grad, torch.tensor([0., 0, 0, 0]))
assert torch.allclose(fsa1.scores.grad, torch.tensor([4.]))
assert torch.allclose(fsa2.scores.grad, torch.tensor([0., 1., 2., 3.]))
# now select only a single FSA
fsa0.scores.grad = None
fsa1.scores.grad = None
fsa2.scores.grad = None
new_fsa0 = k2.index(fsa_vec, torch.tensor([0], dtype=torch.int32))
assert new_fsa0.shape == (1, None, None)
scale = torch.arange(new_fsa0.scores.numel())
(new_fsa0.scores * scale).sum().backward()
assert torch.allclose(fsa0.scores.grad, torch.tensor([0., 1., 2., 3.]))
assert torch.allclose(fsa1.scores.grad, torch.tensor([0.]))
assert torch.allclose(fsa2.scores.grad, torch.tensor([0., 0., 0., 0.]))
class TestIndexRaggedInt(unittest.TestCase):
def test(self):
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices.append(torch.device('cuda', 0))
for device in devices:
src_row_splits = torch.tensor([0, 2, 3, 3, 6],
dtype=torch.int32,
device=device)
src_shape = k2.create_ragged_shape2(src_row_splits, None, 6)
src_values = torch.tensor([1, 2, 3, 4, 5, 6],
dtype=torch.int32,
device=device)
src = k2.RaggedInt(src_shape, src_values)
# index with ragged int
index_row_splits = torch.tensor([0, 2, 2, 3, 7],
dtype=torch.int32,
device=device)
index_shape = k2.create_ragged_shape2(index_row_splits, None, 7)
index_values = torch.tensor([0, 3, 2, 1, 2, 1, 0],
dtype=torch.int32,
device=device)
ragged_index = k2.RaggedInt(index_shape, index_values)
ans = k2.index_ragged_int(src, ragged_index)
expected_row_splits = torch.tensor([0, 5, 5, 5, 9],
dtype=torch.int32,
device=device)
self.assertTrue(
torch.allclose(ans.row_splits(1), expected_row_splits))
expected_values = torch.tensor([1, 2, 4, 5, 6, 3, 3, 1, 2],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
# index with tensor
tensor_index = torch.tensor([0, 3, 2, 1, 2, 1],
dtype=torch.int32,
device=device)
ans = k2.index_ragged_int(src, tensor_index)
expected_row_splits = torch.tensor([0, 2, 5, 5, 6, 6, 7],
dtype=torch.int32,
device=device)
self.assertTrue(
torch.allclose(ans.row_splits(1), expected_row_splits))
expected_values = torch.tensor([1, 2, 4, 5, 6, 3, 3],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
class TestIndexTensorWithRaggedInt(unittest.TestCase):
def test(self):
devices = [torch.device('cpu')]
if torch.cuda.is_available():
devices.append(torch.device('cuda', 0))
for device in devices:
src = torch.tensor([1, 2, 3, 4, 5, 6, 7],
dtype=torch.int32,
device=device)
index_row_splits = torch.tensor([0, 2, 2, 3, 7],
dtype=torch.int32,
device=device)
index_shape = k2.create_ragged_shape2(index_row_splits, None, 7)
index_values = torch.tensor([0, 3, 2, 3, 5, 1, 3],
dtype=torch.int32,
device=device)
ragged_index = k2.RaggedInt(index_shape, index_values)
ans = k2.index_tensor_with_ragged_int(src, ragged_index)
self.assertTrue(torch.allclose(ans.row_splits(1),
index_row_splits))
expected_values = torch.tensor([1, 4, 3, 4, 6, 2, 4],
dtype=torch.int32,
device=device)
self.assertTrue(torch.allclose(ans.values(), expected_values))
if __name__ == '__main__':
unittest.main()
| [
"torch.device",
"torch.cuda.is_available",
"torch.tensor"
] | 1.6.0 | pzelasko/k2 | 2dbb3e09b152fcf98354c946baa271e5b57c8321 |
1.2 | # Copyright (c) 2019, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: MIT
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/MIT
import os
import json
import torch
import torch.nn as nn
from .base import StochasticAgent
from agents.maze_agents.toy_maze.env import Env
from base.modules.normalization import DatasetNormalizer
from agents.maze_agents.modules.density import VQVAEDensity
from agents.maze_agents.modules import StochasticPolicy, Value
from base.learners.skill_discovery.edl import BaseEDLLearner, BaseEDLSiblingRivalryLearner
class DistanceStochasticAgent(StochasticAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batch_keys += ['goal'] # 'goal' is only used for visualization purposes
def _make_modules(self, policy, skill_embedding, vae):
super()._make_modules(policy, skill_embedding)
self.vae = vae
def step(self, do_eval=False):
super().step(do_eval=do_eval)
self.episode[-1]['goal'] = self.env.goal.detach()
def reset(self, skill=None, *args, **kwargs):
self.reset_skill(skill)
kwargs['goal'] = self.vae.get_centroids(dict(skill=self.curr_skill.view([]))).detach().numpy()
self.env.reset(*args, **kwargs)
self.episode = []
def preprocess_skill(self, curr_skill):
assert curr_skill is not None
return self.skill_embedding(curr_skill).detach()
class SiblingRivalryStochasticAgent(DistanceStochasticAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batch_keys += ['antigoal']
class VQVAEDiscriminator(VQVAEDensity):
def __init__(self, state_size, hidden_size, codebook_size, code_size, beta=0.25, **kwargs):
super().__init__(num_skills=0, state_size=state_size, hidden_size=hidden_size, codebook_size=codebook_size,
code_size=code_size, beta=beta, **kwargs)
self.softmax = nn.Softmax(dim=1)
def _make_normalizer_module(self):
self.normalizer = DatasetNormalizer(self.input_size) if self.normalize_inputs else None
def compute_logprob(self, batch, with_codes=False):
x = batch[self.input_key]
z_e_x = self.encoder(x)
z_q_x, selected_codes = self.vq.straight_through(z_e_x)
x_ = self.decoder(z_q_x)
if self.normalizes_inputs:
x_ = self.normalizer.denormalize(x_)
logprob = -1. * self.mse_loss(x, x_).sum(dim=1)
if with_codes:
return logprob, z_e_x, selected_codes
else:
return logprob
def compute_logprob_under_latent(self, batch, z=None):
x = batch[self.input_key]
if z is None:
z = batch['skill']
z_q_x = self.vq.embedding(z).detach()
x_ = self.decoder(z_q_x).detach()
if self.normalizes_inputs:
x_ = self.normalizer.denormalize(x_)
logprob = -1. * self.mse_loss(x, x_).sum(dim=1)
return logprob
def log_approx_posterior(self, batch):
x, z = batch[self.input_key], batch['skill']
z_e_x = self.encoder(x)
codebook_distances = self.vq.compute_distances(z_e_x)
p = self.softmax(codebook_distances)
p_z = p[torch.arange(0, p.shape[0]), z]
return torch.log(p_z)
def surprisal(self, batch):
with torch.no_grad():
return self.compute_logprob_under_latent(batch).detach()
class EDLLearner(BaseEDLLearner):
def __init__(self, vae_logdir, **kwargs):
self._parse_init_args(vae_logdir, **kwargs)
super().__init__(**kwargs)
def _parse_init_args(self, vae_logdir, **kwargs):
vae_logdir = str(vae_logdir)
if not os.path.isabs(vae_logdir):
root_dir = os.environ.get("ROOT_DIR", os.getcwd()) # useful when loading experiments from a notebook
vae_logdir = os.path.join(root_dir, vae_logdir)
assert os.path.exists(vae_logdir), "Directory not found: {}".format(vae_logdir)
self.vae_args = json.load(open(os.path.join(vae_logdir, "config.json")))["vae_args"]
self.vae_checkpoint_path = os.path.join(vae_logdir, "model.pth.tar")
def create_env(self):
return Env(**self.env_params)
def _make_agent_modules(self):
self.vae = VQVAEDiscriminator(state_size=self._dummy_env.state_size, **self.vae_args)
self.vae.load_checkpoint(self.vae_checkpoint_path)
kwargs = dict(env=self._dummy_env, hidden_size=self.hidden_size, num_layers=self.num_layers,
goal_size=self.vae.code_size, normalize_inputs=self.normalize_inputs)
self.policy = StochasticPolicy(**kwargs)
self.v_module = Value(use_antigoal=False, **kwargs)
def _make_agent(self):
return DistanceStochasticAgent(env=self.create_env(), policy=self.policy, skill_n=self.vae.codebook_size,
skill_embedding=self.vae.vq.embedding, vae=self.vae)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill'])
)
def get_terminal_values(self, batch):
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:]),
)
def get_policy_lprobs_and_nents(self, batch):
log_prob, n_ent, _ = self.policy(
batch['state'],
self.preprocess_skill(batch['skill']),
action_logit=batch['action_logit']
)
return log_prob.sum(dim=1), n_ent
class EDLSiblingRivalryLearner(BaseEDLSiblingRivalryLearner, EDLLearner):
def __init__(self, **kwargs):
self._parse_init_args(**kwargs)
super().__init__(**kwargs)
def _make_agent_modules(self):
self.vae = VQVAEDiscriminator(state_size=self._dummy_env.state_size, **self.vae_args)
self.vae.load_checkpoint(self.vae_checkpoint_path)
kwargs = dict(env=self._dummy_env, hidden_size=self.hidden_size, num_layers=self.num_layers,
goal_size=self.vae.code_size, normalize_inputs=self.normalize_inputs)
self.policy = StochasticPolicy(**kwargs)
self.v_module = Value(use_antigoal=self.use_antigoal, **kwargs)
def _make_agent(self):
return SiblingRivalryStochasticAgent(env=self.create_env(), policy=self.policy, skill_n=self.vae.codebook_size,
skill_embedding=self.vae.vq.embedding, vae=self.vae)
def get_values(self, batch):
return self.v_module(
batch['state'],
self.preprocess_skill(batch['skill']),
batch.get('antigoal', None)
)
def get_terminal_values(self, batch):
if 'antigoal' in batch:
antigoal = batch['antigoal'][-1:]
else:
antigoal = None
return self.v_module(
batch['next_state'][-1:],
self.preprocess_skill(batch['skill'][-1:]),
antigoal
)
| [
"torch.arange",
"torch.log",
"torch.no_grad",
"torch.nn.Softmax"
] | 1.2.0 | victorcampos7/edl | ffdf23d4e102ca7d69a1408bafa267b0c7d8bfa0 |
3 | import os
from tempfile import NamedTemporaryFile
import h5py
import numpy as np
import torch
from pytorch3dunet.datasets.utils import get_train_loaders
from pytorch3dunet.train import _create_optimizer, _create_lr_scheduler
from pytorch3dunet.unet3d.losses import get_loss_criterion
from pytorch3dunet.unet3d.metrics import get_evaluation_metric
from pytorch3dunet.unet3d.model import get_model
from pytorch3dunet.unet3d.trainer import UNet3DTrainer
from pytorch3dunet.unet3d.utils import DefaultTensorboardFormatter
class TestUNet3DTrainer:
def test_ce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'CrossEntropyLoss', 'MeanIoU', 'UNet3D')
def test_wce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'WeightedCrossEntropyLoss', 'MeanIoU', 'UNet3D')
def test_bce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'BCEWithLogitsLoss', 'DiceCoefficient', 'UNet3D')
def test_dice_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'DiceLoss', 'MeanIoU', 'UNet3D')
def test_pce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'PixelWiseCrossEntropyLoss', 'MeanIoU', 'UNet3D',
weight_map=True)
def test_residual_unet(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'CrossEntropyLoss', 'MeanIoU', 'ResidualUNet3D')
def test_2d_unet(self, tmpdir, capsys, train_config_2d):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config_2d, 'CrossEntropyLoss', 'MeanIoU', 'UNet2D',
shape=(3, 1, 128, 128))
def assert_train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map=False, shape=(3, 64, 64, 64)):
max_num_epochs = train_config['trainer']['epochs']
log_after_iters = train_config['trainer']['log_after_iters']
validate_after_iters = train_config['trainer']['validate_after_iters']
max_num_iterations = train_config['trainer']['iters']
trainer = _train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map, shape)
assert trainer.num_iterations == max_num_iterations
assert trainer.max_num_epochs == max_num_epochs
assert trainer.log_after_iters == log_after_iters
assert trainer.validate_after_iters == validate_after_iters
assert trainer.max_num_iterations == max_num_iterations
def _train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map, shape):
binary_loss = loss in ['BCEWithLogitsLoss', 'DiceLoss', 'BCEDiceLoss', 'GeneralizedDiceLoss']
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
train_config['model']['name'] = model
train_config.update({
# get device to train on
'device': device,
'loss': {'name': loss, 'weight': np.random.rand(2).astype(np.float32), 'pos_weight': 3.},
'eval_metric': {'name': val_metric}
})
train_config['model']['final_sigmoid'] = binary_loss
if weight_map:
train_config['loaders']['weight_internal_path'] = 'weight_map'
loss_criterion = get_loss_criterion(train_config)
eval_criterion = get_evaluation_metric(train_config)
model = get_model(train_config)
model = model.to(device)
if loss in ['BCEWithLogitsLoss']:
label_dtype = 'float32'
train_config['loaders']['train']['transformer']['label'][0]['dtype'] = label_dtype
train_config['loaders']['val']['transformer']['label'][0]['dtype'] = label_dtype
train = _create_random_dataset(shape, binary_loss)
val = _create_random_dataset(shape, binary_loss)
train_config['loaders']['train']['file_paths'] = [train]
train_config['loaders']['val']['file_paths'] = [val]
loaders = get_train_loaders(train_config)
optimizer = _create_optimizer(train_config, model)
lr_scheduler = _create_lr_scheduler(train_config, optimizer)
formatter = DefaultTensorboardFormatter()
trainer = UNet3DTrainer(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
device, loaders, tmpdir,
max_num_epochs=train_config['trainer']['epochs'],
log_after_iters=train_config['trainer']['log_after_iters'],
validate_after_iters=train_config['trainer']['log_after_iters'],
max_num_iterations=train_config['trainer']['iters'],
tensorboard_formatter=formatter)
trainer.fit()
# test loading the trainer from the checkpoint
trainer = UNet3DTrainer.from_checkpoint(os.path.join(tmpdir, 'last_checkpoint.pytorch'),
model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
loaders, tensorboard_formatter=formatter)
return trainer
def _create_random_dataset(shape, channel_per_class):
tmp = NamedTemporaryFile(delete=False)
with h5py.File(tmp.name, 'w') as f:
l_shape = w_shape = shape
# make sure that label and weight tensors are 3D
if len(shape) == 4:
l_shape = shape[1:]
w_shape = shape[1:]
if channel_per_class:
l_shape = (2,) + l_shape
f.create_dataset('raw', data=np.random.rand(*shape))
f.create_dataset('label', data=np.random.randint(0, 2, l_shape))
f.create_dataset('weight_map', data=np.random.rand(*w_shape))
return tmp.name
| [
"torch.cuda.is_available"
] | 3 | flavell-lab/pytorch-3dunet | f6b6c13cb0bb6194e95976b0245b76aaa9e9a496 |
1.6 | from torch.nn import functional as F
from torch import nn
import torch
import numpy as np
from utils import layer
from radam import RAdam
from vpn import MVProp
import utils
from torch_critic import Critic as ClassicCritic
class CriticModel(nn.Module):
def __init__(self, env, layer_number, FLAGS):
super().__init__()
self.q_limit = -FLAGS.time_scale
# Set parameters to give critic optimistic initialization near q_init
self.q_init = -0.067
self.q_offset = -np.log(self.q_limit/self.q_init - 1)
self.no_target_net = FLAGS.no_target_net
self.time_scale = FLAGS.time_scale
self.no_attention = FLAGS.no_attention
self.gaussian_attention = FLAGS.gaussian_attention
self.covariance = FLAGS.covariance
self.offset = FLAGS.window_offset
# Dimensions of goal placeholder will differ depending on layer level
if layer_number == FLAGS.layers - 1 or (layer_number == FLAGS.layers -2 and FLAGS.oracle):
self.goal_dim = env.end_goal_dim
else:
self.goal_dim = env.subgoal_dim
self.loss_val = 0
self.state_dim = env.state_dim
# Dimensions of action placeholder will differ depending on layer level
if layer_number == 0:
action_dim = env.action_dim
else:
action_dim = env.subgoal_dim
def forward(self, v_image, actor_pixel_selection):
# v_image shape [batch_size, height, width]
x_coords = actor_pixel_selection[:, 0]
y_coords = actor_pixel_selection[:, 1]
assert (x_coords >= 0).all()
assert (x_coords < v_image.shape[-1]).all(), (torch.min(x_coords), torch.max(x_coords), v_image.shape)
assert (y_coords >= 0).all()
assert (y_coords < v_image.shape[-2]).all()
x_slice = x_coords.long().unsqueeze(1).unsqueeze(2).expand(-1, v_image.shape[1], -1)
value = v_image.gather(2, x_slice)
y_slice = y_coords.long().unsqueeze(1).unsqueeze(2)
values = value.gather(1, y_slice)
return values * self.time_scale
def actor(self, v_image, pos_coords, probs_grid, sigma=None):
if self.gaussian_attention:
assert sigma is not None
if self.covariance:
masked_v = utils.multivariate_gaussian_attention(v_image, pos_coords, cov=sigma)[0]
else:
masked_v = utils.gaussian_attention(v_image, pos_coords, sigma=sigma)[0]
elif self.no_attention:
masked_v = v_image
else:
# Crop V.
masked_v, x_coords, y_coords = utils.attention(v_image, pos_coords, offset=self.offset)
assert masked_v.shape == probs_grid.shape, (v_image.shape, masked_v.shape, probs_grid.shape)
return (masked_v * probs_grid).sum(dim=[1,2]) * self.time_scale
class Critic():
def __init__(self, device, env, layer_number, FLAGS, learning_rate=0.001, gamma=0.98, tau=0.05):
self.device = device # Session in its TF equivalent
self.critic_name = 'vpn_critic_' + str(layer_number)
self.learning_rate = learning_rate
self.q_limit = -FLAGS.time_scale
self.gamma = gamma
self.tau = tau
self.sac = FLAGS.sac
self.td3 = FLAGS.td3
self.vpn = MVProp(self.gamma, FLAGS, env).to(self.device)
self.no_target_net = FLAGS.no_target_net
# Create critic network graph
self.infer_net = CriticModel(env, layer_number, FLAGS).to(device=self.device)
self.no_weights = FLAGS.no_vpn_weights
self.vpn_masking = FLAGS.vpn_masking
self.classic_critic = None
if FLAGS.boost_vpn:
self.classic_critic = ClassicCritic(device, env, layer_number, FLAGS, learning_rate, gamma, tau)
if not self.no_weights:
opt_class = RAdam if FLAGS.radam else torch.optim.Adam
self.optimizer = opt_class(self.vpn.parameters(), learning_rate)
if FLAGS.no_target_net:
self.target_net = self.infer_net
self.vpn_target = self.vpn
else:
self.target_net = self.infer_net
self.vpn_target = MVProp(self.gamma, FLAGS, env).to(self.device)
self.vpn_target.load_state_dict(self.vpn.state_dict())
self.get_pos_image = lambda states, images: env.pos_image(states[..., :2], images[:, 0])
self.get_image_pos = lambda states, images: torch.stack(env.get_image_position(states[..., :2], images), dim=-1)
def get_Q_value(self,state, goal, action, image):
with torch.no_grad():
q = self.infer_net(self.vpn.critic(image), self.get_image_pos(action, image))
return q
def get_target_Q_value(self,state, goal, action, image):
assert not self.no_target_net
with torch.no_grad():
q = self.infer_net(self.target_net.critic(image), self.get_image_pos(action, image))
return q
def update_target_weights(self):
for source, target in zip(self.vpn.parameters(), self.vpn_target.parameters()):
target.data.copy_(self.tau * source + (1.0 - self.tau) * target)
def _value(self, net, vpn_net, images, states, actions, get_extra_loss=False):
pos_image = self.get_pos_image(states, images)
action_image_position = self.get_image_pos(actions, images)
agent_image_position = self.get_image_pos(states, images)
vpn_values, vpn_probs = vpn_net.actor(images, pos_image)
if self.vpn_masking:
vpn_values, extra_loss = vpn_net.mask_image(vpn_values, vpn_probs, pos_image, agent_image_position)
if get_extra_loss:
return net(vpn_values, action_image_position).squeeze(), extra_loss
return net(vpn_values, action_image_position).squeeze()
def update(self, old_states, old_actions, rewards, new_states, old_goals, new_goals, new_actions, is_terminals, is_weights, next_entropy, images, metrics, total_steps_taken=None):
if self.no_weights:
return torch.ones_like(rewards)
if self.classic_critic is not None:
self.classic_critic.update(old_states, old_actions, rewards, new_states, old_goals, new_actions, is_terminals, is_weights, next_entropy, None, metrics)
with torch.no_grad():
wanted_qs = self._value(self.target_net, self.vpn_target, images, new_states, new_actions)
if self.classic_critic is not None:
alpha = 1 - (min(total_steps_taken, 1e-6) / 1e-6)
wanted_qs_classic = torch.stack([net(new_states, new_goals, new_actions) for net in self.classic_critic.target_nets], dim=0)
wanted_qs_classic = torch.min(wanted_qs_classic, dim=0)[0].detach().squeeze()
alpha*(wanted_qs_classic) + (1-alpha)*wanted_qs
wanted_qs = rewards + (1 - is_terminals) * (self.gamma * wanted_qs)
if next_entropy is not None:
wanted_qs -= next_entropy
wanted_qs = torch.clamp(wanted_qs, max=0, min=self.q_limit)
infered_Qs, extra_loss = self._value(self.infer_net, self.vpn, images, old_states, old_actions, get_extra_loss=True)
if is_weights is None:
is_weights = torch.ones_like(wanted_qs)
abs_errors = torch.abs(wanted_qs - infered_Qs).detach()
self.optimizer.zero_grad()
difference = (wanted_qs - infered_Qs)
loss = torch.mean(is_weights * torch.mul(difference, difference), dim=0) + extra_loss
loss.backward()
self.optimizer.step()
metrics[self.critic_name + '/Q_loss'] = loss.item()
metrics[self.critic_name + '/Q_val'] = torch.mean(wanted_qs).item()
return abs_errors
def get_gradients_for_actions(self, state, goal, actor, images):
action, image_location, vpn_values, sigma = actor._action_with_intermediate_results(
actor.infer_net, state, images, pixel_probs=True)
Q = self.infer_net.actor(vpn_values, image_location, action, sigma)
return Q
def state_dict(self):
result = {}
if self.no_weights: return result
result['target_net'] = self.target_net.state_dict()
result['infer_net'] = self.infer_net.state_dict()
result['optimizer'] = self.optimizer.state_dict()
result['vpn'] = self.vpn.state_dict()
result['vpn_target'] = self.vpn_target.state_dict()
return result
def load_state_dict(self, state_dict):
if self.no_weights: return
self.target_net.load_state_dict(state_dict['target_net'])
self.infer_net.load_state_dict(state_dict['infer_net'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.vpn.load_state_dict(state_dict['vpn'])
self.vpn_target.load_state_dict(state_dict['vpn_target'])
| [
"torch.mul",
"torch.min",
"torch.max",
"torch.no_grad",
"torch.clamp",
"torch.abs",
"torch.ones_like",
"torch.mean"
] | 1.6.0 | christsa/hide-rl | 47dc3dfd93b817831473c07137a6a6e7f2eda549 |
1.0 | import unittest
import torch
import pyprob
from pyprob import util
from pyprob.nn import EmbeddingFeedForward, EmbeddingCNN2D5C, EmbeddingCNN3D5C
class NNTestCase(unittest.TestCase):
def test_nn_EmbeddingFeedForward(self):
batch_size = 32
input_shape = [100, 100]
output_shape = [128]
input_batch_shape = [batch_size] + input_shape
output_batch_shape_correct = [batch_size] + output_shape
input_batch = torch.zeros(input_batch_shape)
nn = EmbeddingFeedForward(input_shape=torch.Size(input_shape), output_shape=torch.Size(output_shape))
output_batch = nn(input_batch)
output_batch_shape = list(output_batch.size())
util.eval_print('input_shape', 'output_shape', 'batch_size', 'input_batch_shape', 'output_batch_shape', 'output_batch_shape_correct')
self.assertEqual(output_batch_shape, output_batch_shape_correct)
def test_nn_EmbeddingCNN2D5C(self):
batch_size = 32
input_shape = [3, 100, 100]
output_shape = [128]
input_batch_shape = [batch_size] + input_shape
output_batch_shape_correct = [batch_size] + output_shape
input_batch = torch.zeros(input_batch_shape)
nn = EmbeddingCNN2D5C(input_shape=torch.Size(input_shape), output_shape=torch.Size(output_shape))
output_batch = nn(input_batch)
output_batch_shape = list(output_batch.size())
util.eval_print('input_shape', 'output_shape', 'batch_size', 'input_batch_shape', 'output_batch_shape', 'output_batch_shape_correct')
self.assertEqual(output_batch_shape, output_batch_shape_correct)
def test_nn_EmbeddingCNN3D5C(self):
batch_size = 32
input_shape = [2, 25, 25, 25]
output_shape = [128]
input_batch_shape = [batch_size] + input_shape
output_batch_shape_correct = [batch_size] + output_shape
input_batch = torch.zeros(input_batch_shape)
nn = EmbeddingCNN3D5C(input_shape=torch.Size(input_shape), output_shape=torch.Size(output_shape))
output_batch = nn(input_batch)
output_batch_shape = list(output_batch.size())
util.eval_print('input_shape', 'output_shape', 'batch_size', 'input_batch_shape', 'output_batch_shape', 'output_batch_shape_correct')
self.assertEqual(output_batch_shape, output_batch_shape_correct)
if __name__ == '__main__':
pyprob.set_random_seed(123)
pyprob.set_verbosity(1)
unittest.main(verbosity=2)
| [
"torch.zeros",
"torch.Size"
] | 1.0.0 | probprog/pyprob | 0713ff6d25e5db475a5b97d8d5e87bf70e977599 |
1.1 | import torch
import numpy as np
def masked_mae_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)**2
loss = loss * mask
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mape_loss(y_pred, y_true):
mask = (y_true != 0).float()
mask /= mask.mean()
loss = torch.abs(y_pred - y_true)/y_true
loss = loss * mask
# print(mask) #全1
# print(mask.mean()) #tensor(1.)
# print(mask.sum())
# print((y_true == 0).float().sum()) #tensor(0.)
# trick for nans: https://discuss.pytorch.org/t/how-to-set-nan-in-tensor-to-0/3918/3
loss[loss != loss] = 0
return loss.mean()
def masked_mse(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = (preds-labels)**2
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_rmse(preds, labels, null_val=np.nan):
return torch.sqrt(masked_mse(preds=preds, labels=labels, null_val=null_val))
def masked_mae(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def masked_mape(preds, labels, null_val=np.nan):
if np.isnan(null_val):
mask = ~torch.isnan(labels)
else:
mask = (labels!=null_val)
mask = mask.float()
mask /= torch.mean((mask))
mask = torch.where(torch.isnan(mask), torch.zeros_like(mask), mask)
loss = torch.abs(preds-labels)/labels
loss = loss * mask
loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)
return torch.mean(loss)
def metric(pred, real):
mae = masked_mae(pred,real,0.0).item()
mape = masked_mape(pred,real,0.0).item()
# mape = masked_mape_loss(pred,real).item()
rmse = masked_rmse(pred,real,0.0).item()
return mae,mape,rmse
def get_normalized_adj(A):
"""
Returns the degree normalized adjacency matrix.
"""
A = A + np.diag(np.ones(A.shape[0], dtype=np.float32))
D = np.array(np.sum(A, axis=1)).reshape((-1,))
D[D <= 10e-5] = 10e-5 # Prevent infs
diag = np.reciprocal(np.sqrt(D))
A_wave = np.multiply(np.multiply(diag.reshape((-1, 1)), A),
diag.reshape((1, -1)))
return A_wave | [
"torch.isnan",
"torch.abs",
"torch.zeros_like",
"torch.mean"
] | 1.1 | kevin-xuan/Traffic-Benchmark | b9f8e40b4df9b58f5ad88432dc070cbbbcdc0228 |
1.9 | import torch
from .utils import periodic_dis
def compute_idx_of_sufficient_stat(L, J, dj, dl, dn):
L2 = L * 2
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = [], [], [], [], [], [], []
idx_lists = (idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
# j1=j2, k1=0,1, k2=0 or 1
for j1 in range(J):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 1, 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 0, 0, 0)
if j1 == J - 1:
max_dn = 0
elif j1 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4*max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, (n+1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 0, 0, (n+1))
# k1 = 0,1
# k2 = 0,1 or 2**(j2-j1)
# k2 > k1
# j1+1 <= j2 <= min(j1+dj,J-1)
for j1 in range(J):
for j2 in range(j1 + 1, min(j1 + dj + 1, J)):
if j2 == J - 1:
max_dn = 0
elif j2 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4 * max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 1, 2 ** (j2 - j1), 0, (n+1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 0, 1, 0, (n + 1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 1, 2**(j2-j1), 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, dl, j1, j2, 0, 1, 0, 0)
print("Total number of coefficient: " + str(len(idx_k2)))
return get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
def compute_idx_of_sufficient_stat_PS(L, J, dj, dl, dn):
L2 = L * 2
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = [], [], [], [], [], [], []
idx_lists = (idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
# j1=j2, k1=1, k2=1
for j1 in range(J):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, 0)
if j1 == J - 1:
max_dn = 0
elif j1 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4*max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, (n+1))
print("Total number of coefficient: " + str(len(idx_k2)))
return get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
def add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, dl, j1, j2, k1, k2, dn1, dn2):
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = idx_lists
for ell2 in range(L2):
#for ell2 in range(0,L2,2):
if periodic_dis(0, ell2, L2) <= dl:
idx_j1.append(j1)
idx_j2.append(j2)
idx_k1.append(k1)
idx_k2.append(k2)
idx_ell2.append(ell2)
idx_dn1.append(dn1)
idx_dn2.append(dn2)
def get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2):
idx_wph = dict()
idx_wph['j1'] = torch.tensor(idx_j1).type(torch.long)
idx_wph['k1'] = torch.tensor(idx_k1).type(torch.long)
idx_wph['ell2'] = torch.tensor(idx_ell2).type(torch.long)
idx_wph['j2'] = torch.tensor(idx_j2).type(torch.long)
idx_wph['k2'] = torch.tensor(idx_k2).type(torch.long)
idx_wph['dn1'] = torch.tensor(idx_dn1).type(torch.long)
idx_wph['dn2'] = torch.tensor(idx_dn2).type(torch.long)
return idx_wph
| [
"torch.tensor"
] | 1.9.0 | Eralys/pywph_dev | bb864050c73b168c32a59f37ac0aca71ff159aed |
1.6 | import gc
import os
import pickle as pkl
from captum import attr
import numpy as np
from captum.attr import IntegratedGradients
from datasets import Dataset
import torch
import torch.nn.functional as F
from tqdm.auto import tqdm
import collections
import numpy as np
from transformers import Trainer
import argparse
from omegaconf import OmegaConf
from src.datasets import *
from src.models import *
from src.utils.mapper import configmapper
import pickle as pkl
from IPython.core.display import HTML
from src.utils.viz import format_word_importances, save_to_file
from evaluation.fix_spans import _contiguous_ranges
def postprocess_spans_with_index(
features,
examples,
raw_predictions,
tokenizer,
n_best_size=20,
max_answer_length=30,
squad_v2=False,
):
all_start_logits, all_end_logits = raw_predictions
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(list(examples["id"]))}
features_per_example = collections.defaultdict(list)
columns = ["input_ids", "attention_mask", "token_type_ids"]
features.set_format(type="torch", columns=columns, output_all_columns=True)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
predictions = collections.OrderedDict()
# Logging.
print(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
# Let's loop over all the examples!
for example_index in tqdm(range(len(examples))):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_score = None # Only used if squad_v2 is True.
valid_answers = []
context = examples[example_index]["context"]
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions
# in our logits to span of texts in the original context.
offset_mapping = features[feature_index]["offset_mapping"]
# Update minimum null prediction.
cls_index = list(features[feature_index]["input_ids"]).index(
tokenizer.cls_token_id
)
feature_null_score = start_logits[cls_index] + end_logits[cls_index]
if min_null_score is None or min_null_score < feature_null_score:
min_null_score = feature_null_score
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers,
# either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that
# is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
start_char = offset_mapping[start_index][0]
end_char = offset_mapping[end_index][1]
valid_answers.append(
{
"score": start_logits[start_index] + end_logits[end_index],
"text": context[start_char:end_char],
"start": start_char,
"end": end_char,
"start_index": start_index,
"end_index": end_index,
}
)
if len(valid_answers) > 0:
sorted_answers = sorted(
valid_answers, key=lambda x: x["score"], reverse=True
)
else:
# In the very rare edge case we have not a single non-null prediction,
# we create a fake prediction to avoid failure.
sorted_answers = [{"text": "", "score": 0.0, "start": None, "end": None}]
# Let's pick our final answer: the best one or the null answer (only for squad_v2)
if sorted_answers[0]["score"] <= min_null_score:
sorted_answers = [
{"text": "", "score": min_null_score, "start": None, "end": None},
] + sorted_answers
predictions[examples[example_index]["id"]] = sorted_answers
return predictions
def get_spans_token_indices_above_threshold(
model, feature, example, threshold, tokenizer
):
# print(feature)
trainer = Trainer(
model,
)
# print(feature)
raw_predictions = trainer.predict(feature)
feature.set_format(
type=feature.format["type"], columns=list(feature.features.keys())
)
# print(feature)
predictions = postprocess_spans_with_index(
feature, example, raw_predictions.predictions, tokenizer
)
start_end_indices = []
for span in list(predictions.values())[0]: ## Should Contain Only One Example
if torch.sigmoid(torch.tensor(span["score"])) > threshold:
start_end_indices.append((span["start_index"], span["end_index"]))
return start_end_indices
def get_token_token_indices(model, feature, tokenizer):
trainer = Trainer(model)
predictions = trainer.predict(feature)
preds = predictions.predictions
preds = np.argmax(preds, axis=2)
token_indices = []
input_ids = feature["input_ids"][0]
for j, pred in enumerate(preds[0]): ## Should Contain Only One Example
if pred == 1 and input_ids[j] != tokenizer.pad_token_id: ## Toxic
token_indices.append(j)
return sorted(list(set(token_indices)))
def get_token_model_output(
embedding_outputs, model, attention_masks, name="bert", position=None
):
if name == "bert":
extended_attention_masks = model.bert.get_extended_attention_mask(
attention_masks, embedding_outputs.shape, torch.device("cuda")
)
# print(embedding_outputs,attention_masks,extended_attention_masks)
out = model.bert.encoder(
embedding_outputs, extended_attention_masks, return_dict=None
)[0]
else:
extended_attention_masks = model.roberta.get_extended_attention_mask(
attention_masks, embedding_outputs.shape, torch.device("cuda")
)
out = model.roberta.encoder(
embedding_outputs, extended_attention_masks, return_dict=None
)[0]
out = model.dropout(out)
logits = model.classifier(out)
return F.softmax(logits, dim=2)[:, :, 1] ## Select only Toxic Logits
def get_spans_model_output(
embedding_outputs, model, attention_masks, name="bert", position="start"
):
if name == "bert":
extended_attention_masks = model.bert.get_extended_attention_mask(
attention_masks, embedding_outputs.shape, torch.device("cuda")
).cuda()
out = model.bert.encoder(
embedding_outputs, extended_attention_masks, return_dict=None
)[0]
else:
extended_attention_masks = model.roberta.get_extended_attention_mask(
attention_masks, embedding_outputs.shape, torch.device("cuda")
).cuda()
out = model.roberta.encoder(
embedding_outputs, extended_attention_masks, return_dict=None
)[0]
out = model.qa_outputs(out)
start_logits, end_logits = out.split(1, dim=-1)
pred = (
F.softmax(start_logits, dim=1)
if position == "start"
else F.softmax(end_logits, dim=1)
)
return pred.reshape(-1, embedding_outputs.size(-2))
def get_embedding_outputs(model, input_ids, name="bert"):
if name == "bert":
return model.bert.embeddings(input_ids)
else:
return model.roberta.embeddings(input_ids)
def get_token_wise_attributions(
fn,
model,
embedding_outputs,
attention_masks,
name,
position,
token_index,
n_steps,
internal_batch_size=4,
method="riemann_right",
):
int_grad = IntegratedGradients(
fn,
multiply_by_inputs=True,
)
attributions, approximation_error = int_grad.attribute(
embedding_outputs,
target=token_index,
n_steps=n_steps,
method=method,
additional_forward_args=(model, attention_masks, name, position),
internal_batch_size=internal_batch_size,
return_convergence_delta=True,
)
return {
"attributions": attributions,
"delta": approximation_error,
}
def get_token_wise_importances(input_ids, attributions, tokenizer):
tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
token_wise_attributions = torch.linalg.norm(attributions, dim=1)
token_wise_importances = token_wise_attributions / torch.sum(
token_wise_attributions, dim=0
).reshape(
-1, 1
) # Normalize by sum across seq_length
return (
tokens,
token_wise_importances.squeeze(0).detach().cpu().numpy(),
)
def get_word_wise_importances_spans(
input_ids, offset_mapping, importances, text, tokenizer, name="bert"
):
question = text[0]
context = text[1]
tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
offset_mapping = offset_mapping[0]
question_offsets = tokenizer(
"offense", add_special_tokens=False, return_offsets_mapping=True
)["offset_mapping"]
i = 1
while i < len(offset_mapping) and tokens[i] != "[SEP]":
offset_mapping[i] = question_offsets[i - 1]
i += 1
word_wise_importances = []
word_wise_offsets = []
words = []
is_context = False
if name == "bert":
for i, token in enumerate(tokens):
if token == "[SEP]":
is_context = not is_context
continue
if token == "[CLS]":
is_context = False
continue
if token == "[PAD]":
continue
if token.startswith("##"):
if (
tokens[i - 1] == "[SEP]"
): # Tokens can be broked due to stride after the [SEP]
word_wise_importances.append(
importances[i]
) # We just make new entries for them
word_wise_offsets.append(offset_mapping[i])
if is_context:
words.append(
context[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]]
)
else:
words.append(
question[
word_wise_offsets[-1][0] : word_wise_offsets[-1][1]
]
)
else:
word_wise_importances[-1] += importances[i]
word_wise_offsets[-1] = (
word_wise_offsets[-1][0],
offset_mapping[i][1],
) ## Expand the offsets
if is_context:
words[-1] = context[
word_wise_offsets[-1][0] : word_wise_offsets[-1][1]
]
else:
words[-1] = question[
word_wise_offsets[-1][0] : word_wise_offsets[-1][1]
]
else:
word_wise_importances.append(
importances[i]
) # We just make new entries for them
word_wise_offsets.append(offset_mapping[i])
if is_context:
words.append(
context[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]]
)
else:
words.append(
question[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]]
)
else:
raise NotImplementedError("Not defined for any other model name than 'bert'")
return (
words,
word_wise_importances / np.sum(word_wise_importances),
word_wise_offsets,
)
def get_word_wise_importances(
input_ids, offset_mapping, importances, text, tokenizer, name="bert"
):
tokens = tokenizer.convert_ids_to_tokens(input_ids[0])
offset_mapping = offset_mapping[0]
print(offset_mapping)
word_wise_importances = []
word_wise_offsets = []
words = []
if name == "bert":
for i, token in enumerate(tokens):
if token in ["[SEP]", "[PAD]", "[CLS]"]:
continue
if token.startswith("##"):
if (
tokens[i - 1] == "[SEP]"
): # Tokens can be broked due to stride after the [SEP]
word_wise_importances.append(
importances[i]
) # We just make new entries for them
word_wise_offsets.append(offset_mapping[i])
words.append(
text[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]]
)
else:
word_wise_importances[-1] += importances[i]
word_wise_offsets[-1] = (
word_wise_offsets[-1][0],
offset_mapping[i][1],
)
words[-1] = text[
word_wise_offsets[-1][0] : word_wise_offsets[-1][1]
]
else:
word_wise_importances.append(
importances[i]
) # We just make new entries for them
word_wise_offsets.append(offset_mapping[i])
words.append(text[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]])
else:
for i, token in enumerate(tokens):
if token in ["<s>", "</s>", "<pad>"]:
continue
if (
tokens[i - 1] in ["<s>", "</s>"] and token[i] not in ["<s>", "</s>"]
) or token.startswith("Ġ"):
word_wise_importances.append(
importances[i]
) # We just make new entries for them
word_wise_offsets.append(offset_mapping[i])
words.append(text[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]])
else:
word_wise_importances[-1] += importances[i]
word_wise_offsets[-1] = (
word_wise_offsets[-1][0],
offset_mapping[i][1],
)
words[-1] = text[word_wise_offsets[-1][0] : word_wise_offsets[-1][1]]
return (
words,
word_wise_importances / np.sum(word_wise_importances),
word_wise_offsets,
)
def get_importances(
model,
name,
feature,
example,
fn,
tokenizer,
text,
n_steps,
typ="spans",
threshold=None,
):
columns = ["input_ids", "attention_mask", "token_type_ids"]
feature.set_format(
type="torch", columns=columns, device="cuda", output_all_columns=True
)
embedding_outputs = get_embedding_outputs(model, feature["input_ids"], name)
if typ == "spans":
start_end_indices = get_spans_token_indices_above_threshold(
model, feature, example, threshold, tokenizer
)
print(start_end_indices)
feature.set_format(
type="torch", columns=columns, device="cuda", output_all_columns=True
)
start_indices = list(set([temp[0] for temp in start_end_indices]))
end_indices = list(set([temp[1] for temp in start_end_indices]))
all_token_importances = np.array([])
start_attributions_maps = {}
end_attributions_maps = {}
for start_index in start_indices:
start_attributions = get_token_wise_attributions(
fn,
model,
embedding_outputs,
feature["attention_mask"],
name,
"start",
start_index,
n_steps,
)
start_attributions_maps[start_index] = start_attributions
for end_index in end_indices:
end_attributions = get_token_wise_attributions(
fn,
model,
embedding_outputs,
feature["attention_mask"],
name,
"end",
end_index,
n_steps,
)
end_attributions_maps[end_index] = end_attributions
for indices in start_end_indices:
start_pos = indices[0]
end_pos = indices[1]
total_attributions = (
start_attributions_maps[start_pos]["attributions"][0]
+ end_attributions_maps[end_pos]["attributions"][0]
)
tokens, total_importance_scores = get_token_wise_importances(
feature["input_ids"], total_attributions, tokenizer
)
all_token_importances = np.append(
all_token_importances, total_importance_scores
)
all_token_importances = all_token_importances.reshape(
len(start_end_indices), -1
)
avg_token_importances = np.mean(all_token_importances, axis=0)
word_importances = get_word_wise_importances_spans(
feature["input_ids"],
feature["offset_mapping"],
avg_token_importances,
text,
tokenizer,
name,
)
else:
token_indices = get_token_token_indices(model, feature, tokenizer)
print(token_indices)
feature.set_format(
type="torch", columns=columns, device="cuda", output_all_columns=True
)
all_token_importances = np.array([])
for index in token_indices:
pos = [index]
attributions = get_token_wise_attributions(
fn,
model,
embedding_outputs,
feature["attention_mask"],
name,
None,
pos,
n_steps,
)
attributions = attributions["attributions"][0]
tokens, importance_scores = get_token_wise_importances(
feature["input_ids"], attributions, tokenizer
)
all_token_importances = np.append(all_token_importances, importance_scores)
all_token_importances = all_token_importances.reshape(len(token_indices), -1)
avg_token_importances = np.mean(all_token_importances, axis=0)
word_importances = get_word_wise_importances(
feature["input_ids"],
feature["offset_mapping"],
avg_token_importances,
text,
tokenizer,
name,
)
return {
"word_importances": word_importances,
# batches, batch_size, len of examples
"token_importances": (tokens, avg_token_importances),
# batches,len of layers, batch_size, len of examples
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog="integrated_gradients.py",
description="Script to run IG on a model and an example.",
)
parser.add_argument(
"--config",
type=str,
action="store",
help="The configuration for IG",
)
args = parser.parse_args()
ig_config = OmegaConf.load(args.config)
data_config = ig_config.data_config
dataset = configmapper.get_object("datasets", data_config.name)(data_config)
if ig_config.type == "spans":
example_intermediate = dataset.intermediate_test_dataset["test"][
ig_config.sample_index
]
for key in example_intermediate.keys():
example_intermediate[key] = [example_intermediate[key]]
example = Dataset.from_dict(example_intermediate)
# print(example)
potential_feature_indices = [
i
for i, feature in enumerate(dataset.test_tokenized_inputs["test"])
if feature["example_id"] == example[0]["id"]
]
feature_intermediate = dataset.test_tokenized_inputs["test"][
potential_feature_indices[0]
] # Take First Feature
for key in feature_intermediate.keys():
feature_intermediate[key] = [feature_intermediate[key]]
feature = Dataset.from_dict(feature_intermediate)
fn = get_spans_model_output
with open(ig_config.thresh_file, "r") as f:
thresh = float(f.read().split()[0])
text = (example["question"][0], example["context"][0])
ignore_first_word = True
else:
example_intermediate = dataset.test_dataset["test"][ig_config.sample_index]
for key in example_intermediate.keys():
example_intermediate[key] = [example_intermediate[key]]
example = Dataset.from_dict(example_intermediate)
# print(example)
feature_intermediate = dataset.test_tokenized_inputs["test"][
ig_config.sample_index
]
for key in feature_intermediate.keys():
feature_intermediate[key] = [feature_intermediate[key]]
feature = Dataset.from_dict(feature_intermediate)
# print(feature)
fn = get_token_model_output
thresh = None
text = example["text"][0]
ignore_first_word = False
if not os.path.exists(ig_config.word_out_file):
model_class = configmapper.get_object("models", ig_config.model_name)
model = model_class.from_pretrained(**ig_config.pretrained_args)
model.cuda()
model.eval()
tokenizer = AutoTokenizer.from_pretrained(data_config.model_checkpoint_name)
importances = get_importances(
model,
ig_config.name, # bert or roberta
feature,
example,
fn,
tokenizer,
text,
ig_config.n_steps,
ig_config.type, # 'spans' or 'token'
thresh,
)
if not os.path.exists(ig_config.out_dir + "/" + str(ig_config.sample_index)):
os.makedirs(ig_config.out_dir + "/" + str(ig_config.sample_index))
with open(ig_config.word_out_file, "wb") as f:
pkl.dump(importances["word_importances"], f)
with open(ig_config.token_out_file, "wb") as f:
pkl.dump(importances["token_importances"], f)
words, importances, word_wise_offsets = importances["word_importances"]
else:
with open(ig_config.word_out_file, "rb") as f:
words, importances, word_wise_offsets = pkl.load(f)
ground_spans = _contiguous_ranges(
eval(pd.read_csv(ig_config.ground_truths_file)["spans"][ig_config.sample_index])
)
predicted_spans = _contiguous_ranges(
eval(
pd.read_csv(ig_config.predictions_file, header=None, sep="\t")[1][
ig_config.sample_index
]
)
)
ground_text_spans = []
predicted_text_spans = []
if ignore_first_word:
for span in ground_spans:
ground_text_spans.append(text[1][span[0] : span[1] + 1])
for span in predicted_spans:
predicted_text_spans.append(text[1][span[0] : span[1] + 1])
else:
for span in ground_spans:
ground_text_spans.append(text[span[0] : span[1] + 1])
for span in predicted_spans:
predicted_text_spans.append(text[span[0] : span[1] + 1])
# print(words)
# print(importances)
# print(ground_text_spans)
# print(predicted_text_spans)
html = format_word_importances(
words, importances, ground_text_spans, predicted_text_spans
)
save_to_file(html, ig_config.viz_out_file)
| [
"torch.device",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.linalg.norm",
"torch.sum"
] | 1.6.0 | gchhablani/toxic-spans-detection | 5eeba0c069bef8c707d9c5fef8c6048c98d89ba5 |
1.9 | import argparse
import torch
import glob
from pig.models import PeppaPig
import pig.data
import pytorch_lightning as pl
import logging
from torch.utils.data import DataLoader
from dataclasses import dataclass
import pandas as pd
import numpy as np
import torch
import random
import yaml
from copy import deepcopy
random.seed(666)
torch.manual_seed(666)
BATCH_SIZE=8
def data_statistics():
rows = []
for split in ['train', 'val', 'test']:
for fragment_type in ['dialog', 'narration']:
if pig.data.SPLIT_SPEC[fragment_type][split] is not None:
ds = pig.data.PeppaPigIterableDataset(
target_size=(180, 100),
split=[split],
fragment_type=fragment_type,
duration=2.3)
duration = np.array([clip.duration for clip in ds._raw_clips() ])
rows.append({'Split': split, 'Type': fragment_type,
'Size (h)': duration.sum() / 60 / 60,
'# Clips': len(duration)})
data = pd.DataFrame.from_records(rows)
data.to_csv("results/data_statistics.csv", index=False, header=True)
data.to_latex("results/data_statistics.tex", index=False, header=True, float_format="%.2f")
def load_best_model(dirname, higher_better=True):
info = []
for path in glob.glob(f"{dirname}/checkpoints/*.ckpt"):
cp = torch.load(path, map_location='cpu')
item = cp['callbacks'][pl.callbacks.model_checkpoint.ModelCheckpoint]
if item['best_model_score'] is not None:
info.append(item)
best = sorted(info, key=lambda x: x['best_model_score'], reverse=higher_better)[0]
logging.info(f"Best {best['monitor']}: {best['best_model_score']} at {best['best_model_path']}")
local_model_path = best['best_model_path'].split("/peppa/")[1]
net = PeppaPig.load_from_checkpoint(local_model_path, hparams_file=f"{dirname}/hparams.yaml")
return net, best['best_model_path']
def score_means(data):
rows = []
for item in data:
row = deepcopy(item)
row['triplet_acc_std'] = row['triplet_acc'].std().item()
row['triplet_acc'] = row['triplet_acc'].mean().item()
row['recall_at_10_fixed_std'] = row['recall_at_10_fixed'].mean(dim=1).std().item()
row['recall_at_10_fixed'] = row['recall_at_10_fixed'].mean(dim=1).mean().item()
row['recall_at_10_jitter_std'] = row['recall_at_10_jitter'].mean(dim=1).std().item()
row['recall_at_10_jitter'] = row['recall_at_10_jitter'].mean(dim=1).mean().item()
rows.append(row)
return pd.DataFrame.from_records(rows)
def full_score(model, gpus, split=['val']):
"""Compute all standard scores for the given model. """
trainer = pl.Trainer(gpus=gpus, logger=False, precision=16)
data = []
if split == ['test']:
types = ['narration']
elif split ==['val']:
types = ['dialog', 'narration']
else:
raise NotImplementedError
for fragment_type in types:
for scrambled_video in [False, True]:
logging.info(f"Evaluating: {fragment_type}, scramble={scrambled_video} triplet")
acc = triplet_score(fragment_type, model, trainer, scrambled_video=scrambled_video, split=split)
logging.info(f"Evaluating: {fragment_type}, scramble={scrambled_video} recall_fixed")
rec_fixed = resampled_retrieval_score(fragment_type,
model,
trainer,
duration=2.3,
jitter=False,
jitter_sd=None,
scrambled_video=scrambled_video,
split=split,
one_to_n=True)
logging.info(f"Evaluating: {fragment_type}, scramble={scrambled_video} recall_jitter")
rec_jitter = resampled_retrieval_score(fragment_type,
model,
trainer,
duration=2.3,
jitter=True,
jitter_sd=0.5,
scrambled_video=scrambled_video,
split=split,
one_to_n=True)
data.append(dict(fragment_type=fragment_type,
scrambled_video=scrambled_video,
triplet_acc=acc,
recall_fixed=rec_fixed,
recall_jitter=rec_jitter,
recall_at_10_fixed=rec_fixed[:,10,:],
recall_at_10_jitter=rec_jitter[:,10,:]))
return data
def retrieval_score(fragment_type, model, trainer, duration=2.3, jitter=False, jitter_sd=None, batch_size=BATCH_SIZE, split=['val']):
base_ds = pig.data.PeppaPigDataset(
target_size=model.config["data"]["target_size"],
split=split,
fragment_type=fragment_type,
duration=duration,
jitter=jitter,
jitter_sd=jitter_sd
)
key = lambda x: x.audio_duration
loader = pig.data.grouped_loader(base_ds, key, pig.data.collate, batch_size=batch_size)
V, A = zip(* [(batch.video, batch.audio) for batch
in trainer.predict(model, loader) ])
V = torch.cat(V, dim=0)
A = torch.cat(A, dim=0)
correct = torch.eye(V.shape[0], device=A.device)
rec10 = pig.metrics.recall_at_n(V, A, correct=correct, n=10).mean().item()
return rec10
def resampled_retrieval_score(fragment_type,
model,
trainer,
duration=2.3,
jitter=False,
jitter_sd=None,
batch_size=BATCH_SIZE,
scrambled_video=False,
split=['val'],
one_to_n=False
):
base_ds = pig.data.PeppaPigDataset(
target_size=model.config["data"]["target_size"],
split=split,
fragment_type=fragment_type,
duration=duration,
audio_sample_rate=model.config["data"].get('audio_sample_rate',
pig.data.DEFAULT_SAMPLE_RATE),
jitter=jitter,
jitter_sd=jitter_sd,
scrambled_video=scrambled_video,
)
key = lambda x: x.audio_duration
loader = pig.data.grouped_loader(base_ds, key, pig.data.collate, batch_size=batch_size)
V, A = zip(* [(batch.video, batch.audio) for batch
in trainer.predict(model, loader) ])
V = torch.cat(V, dim=0)
A = torch.cat(A, dim=0)
rec = pig.metrics.resampled_recall_at_1_to_n(V, A, size=100, n_samples=500, N=10)
if one_to_n:
return rec
else:
return rec[:,10,:]
def triplet_score(fragment_type, model, trainer, batch_size=BATCH_SIZE, scrambled_video=False, split=['val']):
from pig.triplet import TripletScorer
scorer = TripletScorer(fragment_type=fragment_type, split=split, target_size=model.config["data"]["target_size"],
audio_sample_rate=model.config["data"].get('audio_sample_rate',
pig.data.DEFAULT_SAMPLE_RATE),
scrambled_video=scrambled_video)
acc = scorer.evaluate(model, trainer=trainer, n_samples=500, batch_size=batch_size)
return acc
def comparative_triplet_score(fragment_type, models, trainer, batch_size=BATCH_SIZE,
scrambled_video=False, split=['val']):
from pig.triplet import TripletScorer, comparative_score_triplets
scorers = [ TripletScorer(fragment_type=fragment_type, split=split,
target_size=model.config["data"]["target_size"],
audio_sample_rate=model.config["data"].get('audio_sample_rate',
pig.data.DEFAULT_SAMPLE_RATE),
scrambled_video=scrambled_video)
for model in models ]
for i in range(len(models)):
scorers[i]._encode(models[i], trainer, batch_size)
result = comparative_score_triplets([ scorer._video for scorer in scorers],
[ scorer._audio for scorer in scorers],
scorers[0]._duration,
n_samples=500)
return result
def pretraining(row):
return { (True, True): "AV",
(True, False): "A",
(False, True): "V",
(False, False): "None"}[row['audio_pretrained'],
row['video_pretrained']]
def format():
data = torch.load("results/full_scores.pt")
data = add_condition(data)
data = score_means(data)
for fragment_type in ['dialog', 'narration']:
table = data.query(f"fragment_type=='{fragment_type}'")
table['pretraining'] = pd.Categorical(table.apply(pretraining, axis=1),
categories=['AV', 'A', 'V', 'None'])
table[['version', 'static', 'jitter', 'pretraining', 'resolution',
'recall_at_10_fixed', 'recall_at_10_jitter', 'triplet_acc']]\
.sort_values(by=['static', 'jitter', 'pretraining', 'resolution'])\
.replace(True, "Yes").replace(False, "")\
.rename(columns=dict(version='ID',
static='Static',
jitter='Jitter',
pretraining='Pretraining',
resolution='Resolution',
recall_at_10_fixed='R@10 (fixed)',
recall_at_10_jitter='R@10 (jitter)',
triplet_acc='Triplet Acc'))\
.to_latex(buf=f"results/scores_{fragment_type}.tex",
index=False,
float_format="%.3f")
def add_condition(data):
rows = []
for row in data:
record = {k:v for k,v in row.items()}
config = yaml.safe_load(open(row['hparams_path']))
record['jitter'] = config['data']['train']['jitter']
record['static'] = config['video'].get('static', False)
record['audio_pretrained'] = config['audio']['pretrained']
record['video_pretrained'] = config['video']['pretrained']
record['resolution'] = 'x'.join(map(str, config['data']['target_size']))
record['freeze_wav2vec'] = config['audio']['freeze_feature_extractor'] \
and config['audio']['freeze_encoder_layers'] == 12
record['sample_rate'] = str(config['data'].get('audio_sample_rate',
pig.data.DEFAULT_SAMPLE_RATE))
rows.append(record)
return rows
def full_run(versions = None, gpus=1):
if versions is None:
conditions = yaml.safe_load(open("conditions.yaml"))
versions = [ version for value in conditions.values() for version in value ]
logging.getLogger().setLevel(logging.INFO)
for version in versions:
rows = []
logging.info(f"Evaluating version {version}")
net, path = load_best_model(f"lightning_logs/version_{version}/")
for row in full_score(net, gpus=gpus, split=['val']):
row['version'] = version
row['checkpoint_path'] = path
row['hparams_path'] = f"lightning_logs/version_{version}/hparams.yaml"
rows.append(row)
torch.save(add_condition(rows), f"results/full_scores_v{version}.pt")
def test_run(gpu=0):
conditions = yaml.safe_load(open("conditions.yaml"))
rows = []
for version in conditions['base']:
logging.info(f"Evaluating version {version}")
net, path = load_best_model(f"lightning_logs/version_{version}/")
for row in full_score(net, gpus=[gpu], split=['test']):
row['version'] = version
row['checkpoint_path'] = path
row['hparams_path'] = f"lightning_logs/version_{version}/hparams.yaml"
rows.append(row)
torch.save(add_condition(rows), f"results/full_test_scores.pt")
def test_table():
data = torch.load(f"results/full_test_scores.pt")
rows = [ datum for datum in data if not datum['scrambled_video'] ]
recall_fixed = torch.cat([ row['recall_at_10_fixed'].mean(dim=1) for row in rows ])
recall_jitter = torch.cat([ row['recall_at_10_jitter'].mean(dim=1) for row in rows ])
triplet_acc = torch.cat([ row['triplet_acc'] for row in rows ])
table = pd.DataFrame.from_records(
[{'R@10 (fixed)':
f"{recall_fixed.mean().item():0.2f} ± {recall_fixed.std().item():0.2f}",
'R@10 (jitter)':
f"{recall_jitter.mean().item():0.2f} ± {recall_jitter.std().item():0.2f}",
'Triplet Acc':
f"{triplet_acc.mean().item():0.2f} ± {triplet_acc.std().item():0.2f}"}]).\
to_latex(buf=f"results/scores_test.tex", index=False)
def duration_effect(gpu=0):
conditions = yaml.safe_load(open("conditions.yaml"))
model_id1 = conditions['pretraining_a']
model_id2 = conditions['static']
out = []
models = []
for model_id in model_id1 + model_id2:
logging.info(f"Loading version {model_id}")
model, _ = load_best_model(f"lightning_logs/version_{model_id}/")
models.append(model)
trainer = pl.Trainer(gpus=[gpu], logger=False, precision=16)
for fragment_type in ['dialog', 'narration']:
logging.info(f"Comparing for {fragment_type}")
result = comparative_triplet_score(fragment_type,
models,
trainer=trainer,
scrambled_video=False,
split=['val'])
result['fragment_type'] = fragment_type
result['model_ids'] = model_id1 + model_id2
out.append(result)
torch.save(out, "results/duration_effect.pt")
| [
"torch.cat",
"torch.save",
"torch.manual_seed",
"torch.eye",
"torch.load"
] | 1.9.1 | mitjanikolaus/peppa | bacfaf3ef09f050dcb503bb4c67e01f8e7ab06f5 |
1.6 | import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
from PIL import Image, ExifTags
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from .utils import xyxy2xywh, xywh2xyxy
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, point_number = 5 ,flip_idx_pair = [[0,1],[3,4]]):
try:
path = str(Path(path)) # os-agnostic
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s' % (path))
n = len(self.img_files)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
# self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic = 0 # load 4 images at a time into a mosaic (only during training)
self.point_number = point_number
self.flip_idx_pair = flip_idx_pair
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Read image shapes (wh)
sp = path.replace('.txt', '.shapes') # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
# Sort by aspect ratio
s = np.array(s, dtype=np.float64)
ar = s[:, 1] / s[:, 0] # aspect ratio
i = ar.argsort()
self.img_files = [self.img_files[i] for i in i]
self.label_files = [self.label_files[i] for i in i]
self.shapes = s[i] # wh
ar = ar[i]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 64.).astype(np.int) * 64
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5 + 2 * point_number), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path
x = list(np.load(np_labels_path, allow_pickle=True))
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
# print(l.shape)
assert l.shape[1] == 5 + 2 * point_number, '> 5 label columns: %s' % file
l[:,1:5][l[:,1:5]<0] =0
l[:,1:5][l[:,1:5]>1] =1
# assert (l[:5] >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:5] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index , self.point_number)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.point_number > 0:
point_label = labels[:, 5:5 + self.point_number * 2].reshape([-1, self.point_number, 2])
no_trans_index = np.where(point_label == -1)
point_label = point_label * np.array([w, h]) + np.array([pad[0], pad[1]])
point_label[no_trans_index] = -1
labels[:, 5:5 + self.point_number * 2] = point_label.reshape([-1, self.point_number * 2])
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
point_number=self.point_number)
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
del_index = np.where(labels[:,3]<8 )[0].tolist() + np.where(labels[:,4]<8 )[0].tolist() #del smalle object
del_index = list(set(del_index))
labels = np.delete(labels, del_index, axis=0)
nL = len(labels)
if nL:
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.point_number > 0:
no_tran_index = np.where(labels == -1)
labels[:,6::2] /= img.shape[0] # height
labels[:,5::2] /= img.shape[1] # width
labels[no_tran_index] = -1
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
point_xy = labels[:, 5:5+self.point_number*2].copy()
no_tran_index = np.where(point_xy == -1)
point_xy = point_xy.reshape([-1, self.point_number, 2])
point_xy[:, :, 0] = 1 - point_xy[:, :, 0]
for pair in self.flip_idx_pair:
id1, id2 = pair
tmp = point_xy[:, id2, :].copy()
point_xy[:, id2, :] = point_xy[:, id1, :]
point_xy[:, id1, :] = tmp
point_xy = point_xy.reshape([-1, self.point_number * 2])
point_xy[no_tran_index] = -1
labels[:, 5:5 + self.point_number * 2] = point_xy
# random up-down flip
# ud_flip = False
# if ud_flip and random.random() < 0.5:
# img = np.flipud(img)
# if nL:
# labels[:, 2] = 1 - labels[:, 2]
# img = np.ascontiguousarray(img)
# img_h,img_w,_ = img.shape
# for label in labels:
# cx,cy,w,h = label[1:5]
# x1,y1,x2,y2 = cx - w/2,cy-h/2,cx+w/2,cy+h/2
# box = [int(x1*img_w),int(y1*img_h),int(x2*img_w),int(y2*img_h)]
# cv2.rectangle(img,(box[0],box[1]),(box[2],box[3]),(0,255,0))
# for i in range(self.point_number):
# cv2.circle(img, (int(label[5+i*2]*img_w), int(label[5+i*2+1]*img_h)), 1, (0, 0, 255), 4)
#
# cv2.imwrite("debug_imgs/{}.jpg".format(index), img)
#
labels_out = torch.zeros((nL, 6 + self.point_number *2))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1]
# img = (img - hyp['mean'])/hyp['std']
img = img.transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r < 1 or (self.augment and r != 1): # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index, point_number = 0 ):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
if point_number > 0 :
point_label = labels[:,5:5+point_number*2].reshape([-1, point_number, 2])
no_trans_index = np.where(point_label==-1)
point_label = point_label * np.array([w,h]) + np.array([padw,padh])
point_label[no_trans_index] = -1
labels[:, 5:5 + point_number * 2] = point_label.reshape([-1,point_number * 2])
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:5], 0, 2 * s, out=labels4[:, 1:5]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2,
point_number = point_number) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=(0.5,2.0), shear=10, border=0 ,
point_number = 0 ):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = 0
if random.uniform(0, 1) > 0.5:
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
# s = random.uniform(1 - scale, 1 + scale)
s = 1.0
if random.uniform(0,1) > 0.2 :
s = random.uniform(scale[0],scale[1])
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
tran = 0
if random.uniform(0, 1) > 0.5:
tran = translate
T[0, 2] = random.uniform(-tran, tran) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-tran, tran) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
sh = 0
if random.uniform(0, 1) > 0.5:
sh = shear
S[0, 1] = math.tan(random.uniform(-sh, sh) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-sh, sh) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
#point
if point_number > 0 :
point_xy = np.ones((n * point_number ,3))
point_xy[:,:2] = targets[:,5:5+point_number*2].reshape(n * point_number , 2)
no_tran_index = np.where(point_xy==-1)
point_xy = (point_xy @ M.T)
point_xy[no_tran_index] = -1
point_xy = point_xy[:, :2]
point_xy = point_xy.reshape(n, point_number * 2)
targets = targets[i]
targets[:, 1:5] = xy[i]
if point_number > 0:
targets[:, 5:5 + point_number * 2 ] = point_xy[i]
return img, targets | [
"torch.cat",
"torch.stack",
"torch.zeros",
"torch.from_numpy"
] | 1.6.0 | jinglingzhua/blinkblink | 1975be380ef08f895af4c1c07992efaed7af49e9 |
1.6 | import argparse
import torch
from cail.env import make_env
from cail.algo.algo import EXP_ALGOS
from cail.utils import evaluation
def run(args):
env = make_env(args.env_id)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
algo = EXP_ALGOS[args.algo](
state_shape=env.observation_space.shape,
action_shape=env.action_space.shape,
device=device,
path=args.weight
)
mean_return = evaluation(
env=env,
algo=algo,
episodes=args.episodes,
render=args.render,
seed=args.seed,
delay=args.delay
)
if __name__ == '__main__':
p = argparse.ArgumentParser()
# required
p.add_argument('--weight', type=str, required=True,
help='path to the well-trained weights of the agent')
p.add_argument('--env-id', type=str, required=True,
help='name of the environment')
p.add_argument('--algo', type=str, required=True,
help='name of the well-trained agent')
# custom
p.add_argument('--render', action='store_true', default=False,
help='render the environment or not')
# default
p.add_argument('--episodes', type=int, default=10,
help='number of episodes used in evaluation')
p.add_argument('--seed', type=int, default=0,
help='random seed')
p.add_argument('--delay', type=float, default=0,
help='number of seconds to delay while rendering, in case the agent moves too fast')
args = p.parse_args()
run(args)
| [
"torch.cuda.is_available"
] | 1.6.0 | Stanford-ILIAD/Confidence-Aware-Imitation-Learning | 1d8af0e4ab87a025885133a2384d5a937329b2f5 |
1.2 | import pytest
import torch.cuda
from torch import nn
from torch.optim import SGD
from yann.callbacks import (
History, HistoryPlotter, HistoryWriter, Logger, Checkpoint
)
from yann.datasets import TinyDigits
from yann.datasets.wrappers import Slice
from yann.modules import Flatten
from yann.train import Trainer
devices = ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']
@pytest.mark.slow
@pytest.mark.parametrize('device', devices)
def test_train(tmpdir, device):
"""Sanity check train run"""
model = nn.Sequential(
nn.Conv2d(1, 20, 3),
nn.ReLU(inplace=True),
nn.Conv2d(20, 20, 3),
nn.ReLU(inplace=True),
Flatten(),
nn.Linear(320, 10)
)
train = Trainer(
root=tmpdir,
model=model,
dataset=Slice(TinyDigits(), 0, 256),
device=device,
optimizer=SGD(model.parameters(), lr=.01, momentum=0.9, weight_decay=.001),
loss=nn.CrossEntropyLoss(),
callbacks=[
History(),
HistoryPlotter(save=True),
HistoryWriter(),
Logger(batch_freq=20),
Checkpoint()
]
)
train(2)
assert train.paths.checkpoints.is_dir()
assert train.history.metrics
export_path = train.export()
assert export_path
assert export_path.is_dir()
# @pytest.mark.slow
# @pytest.mark.parametrize('device', devices)
# def test_train_resolved(tmpdir, device):
# from yann.data.transform import ImageTransformer
#
# train = Trainer(
# root=tmpdir,
# model='densenet121',
# dataset='CIFAR10',
# loss='CrossEntropy',
# optimizer='SGD',
# transform=ImageTransformer(resize=224)
# )
#
# # train(1)
# def test_transforms():
#
# t = Trainer(
# transform={
# 'mask': 'foo',
# 'label': 'foo'
# }
# ) | [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss",
"torch.nn.Linear"
] | 1.2.0 | michalwols/yann | b3c0f35ec7515ddaeb1f04d365af7b6d136f56cf |
1.2 | import base64
import os
import io
import numpy as np
import pathlib
import torch
import random
from PIL import Image
from torchvision import transforms as tvt
from torchvision.transforms.functional import to_pil_image
from torchvision import transforms
from ..utils import truthy
class Transformer:
def __init__(self, load=None, transform=None, to_tensor=None):
self.load = load
self.transform = transform
self.to_tensor = to_tensor
def __call__(self, x):
x = self.load(x)
x = self.transform(x)
return self.to_tensor(x)
def trace(self, x):
loaded = self.load(x)
yield loaded
transformed = self.transform(loaded)
yield transformed
tensor = self.to_tensor(transformed)
yield tensor
def prep(self, x):
return self.transform(self.load(x))
def __repr__(self):
return (
f"{self.__class__.__name__}(\n"
f" load={str(self.load)}\n"
f" transform={str(self.transform)}\n"
f" to_tensor={str(self.to_tensor)}\n"
")")
class ImageTransformer(Transformer):
def __init__(
self,
resize=None,
rotate=None,
crop=None,
warp=None,
mirror=None,
mean=None,
std=None,
color_jitter=None,
interpolation=None,
color_space=None,
load=None,
transform=None,
to_tensor=None
):
interpolation = interpolation or Image.ANTIALIAS
self.resize = resize and tvt.Resize(resize, interpolation=interpolation)
self.rotate = rotate and tvt.RandomRotation(rotate)
self.crop = crop and (
tvt.RandomResizedCrop(crop, interpolation=interpolation)
if warp
else tvt.CenterCrop(crop)
)
self.mirror = mirror and tvt.RandomHorizontalFlip(
.5 if mirror is True else mirror)
if color_jitter is True:
color_jitter = (.4, .2, .1, .05)
self.color_jitter = color_jitter and tvt.ColorJitter(*color_jitter)
self.normalize = (mean or std) and tvt.Normalize(mean=mean, std=std)
super().__init__(
load=load or GetImage(color_space),
transform=tvt.Compose(truthy([
self.resize,
transform,
self.rotate,
self.crop,
self.mirror,
self.color_jitter,
])),
to_tensor=to_tensor or tvt.Compose(truthy([
tvt.ToTensor(),
self.normalize
]))
)
def state_dict(self):
pass
def load_state_dict(self):
pass
class DictTransformer:
def __init__(self, **transforms):
self.transforms = transforms
def __call__(self, data: dict):
return {
k: (self.transforms[k](v) if k in self.transforms else v)
for k, v in data.items()
}
class BatchTransformer:
def __init__(self, transform):
self.transform = transform
def __call__(self, items):
return [self.transform(x) for x in items]
class GetImage:
def __init__(self, space=None):
self.color_space = space
def __call__(self, x):
return get_image(x, self.color_space)
def get_image(x, space=None) -> Image.Image:
if isinstance(x, Image.Image):
return x.convert(space) if space else x
if isinstance(x, np.ndarray):
img = Image.fromarray(x)
return img.convert(space) if space else img
if isinstance(x, torch.Tensor):
img = to_pil_image(x)
return img.convert(space) if space else img
if isinstance(x, (str, pathlib.Path)) and os.path.exists(x):
img = Image.open(x)
return img.convert(space) if space else img
if isinstance(x, str):
if x.startswith('http') or x.startswith('www.'):
import requests
x = requests.get(x).content
elif x.startswith('data') and 'base64,' in x:
# data header for base64 encoded
x = x.split('base64,')[1]
x = base64.b64decode(x)
elif len(x) > 1024:
# assume no header base 64 image
try:
x = base64.b64decode(x)
except:
pass
if hasattr(x, 'read'):
img = Image.open(io.BytesIO(x.read()))
return img.convert(space) if space else img
if isinstance(x, bytes):
img = Image.open(io.BytesIO(x))
return img.convert(space) if space else img
def mixup(inputs, targets, alpha=1):
"""
Args:
inputs: batch of inputs
targets: hot encoded targets
Returns:
mixed up (inputs, targets)
"""
shuffled_indices = torch.randperm(inputs.shape[0], device=inputs.device)
fraction = np.random.beta(alpha, alpha)
return (
fraction * inputs + (1 - fraction) * inputs[shuffled_indices],
fraction * targets + (1 - fraction) * targets[shuffled_indices]
)
def cutout(img, percent=.3, value=0):
pil_img = False
if isinstance(img, Image.Image):
img = np.array(img)
pil_img = True
height, width = img.shape[:2]
mask_height = round(height * percent)
mask_width = round(width * percent)
start_h = random.randint(0, (height - mask_height))
start_w = random.randint(0, (width - mask_width))
img[start_h:start_h + mask_height, start_w:start_w + mask_width] = value
return Image.fromarray(img) if pil_img else img
def cutmix(inputs, targets, beta):
lam = np.random.beta(beta, beta)
rand_index = torch.randperm(inputs.size()[0]).cuda()
target_a = targets
target_b = targets[rand_index]
bbx1, bby1, bbx2, bby2 = rand_bbox(inputs.size(), lam)
inputs[:, :, bbx1:bbx2, bby1:bby2] = inputs[rand_index, :, bbx1:bbx2, bby1:bby2]
# adjust lambda to exactly match pixel ratio
lam = 1 - (
(bbx2 - bbx1) * (bby2 - bby1) / (inputs.size()[-1] * inputs.size()[-2]))
def get_imagenet_transformers(size=224, resize=256, fixres=False):
train_transform = Transformer(
load=GetImage('RGB'),
transform=transforms.Compose([
# transforms.Resize(resize, interpolation=Image.ANTIALIAS),
transforms.RandomResizedCrop(
size,
# scale=(.4, 1),
# interpolation=Image.ANTIALIAS
),
transforms.ColorJitter(
.3, .3, .3
# brightness=.4, contrast=.2, saturation=.1, hue=.05
),
transforms.RandomHorizontalFlip(),
]),
to_tensor=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
)
test_transform = Transformer(
load=train_transform.load,
transform=transforms.Compose([
transforms.Resize(size, interpolation=Image.ANTIALIAS),
transforms.CenterCrop(size)
]),
to_tensor=train_transform.to_tensor
)
return train_transform, test_transform
| [
"torch.randperm"
] | 1.2.0 | michalwols/yann | b3c0f35ec7515ddaeb1f04d365af7b6d136f56cf |
1.5 | from torch.utils.tensorboard import SummaryWriter
import torch
class Logger(SummaryWriter):
def __init__(self, log_iter, use_any_mask, use_thumbs, use_fmap, n_featuremaps, n_thumbs, img_mean, img_std, device,
**kwargs):
self.log_iter = log_iter
self.n_featuremaps = n_featuremaps
self.n_thumbs = n_thumbs
self.use_fmap = use_fmap
self.use_thumbs = use_thumbs
self.use_any_mask = use_any_mask
self.device = device
self.img_mean = torch.tensor(img_mean).view(1, -1, 1, 1).to(device)
self.img_std = torch.tensor(img_std).view(1, -1, 1, 1).to(device)
super(Logger, self).__init__(**kwargs)
self.lists = {}
self.counts = {}
def add_to_list(self, name: str, value):
if name not in self.lists:
self.lists[name] = torch.tensor(0.)
self.counts[name] = 0.
self.lists[name] += value
self.counts[name] += 1
def send_lists(self, n_iter):
for key in self.lists:
self.add_scalar(key, self.lists[key].item() / self.counts[key], n_iter)
self.lists[key] = torch.tensor(0.)
self.counts[key] = 0.
def log_images(self, train: bool, masks: list, n_iter: int):
prefix = "train" if train else "eval"
part_i = 0
if self.use_thumbs:
size_mask = masks[part_i]
for i in range(self.n_thumbs + 1):
s_mask = size_mask[0, i]
self.add_image(f"{prefix}_mask_size:{i}", s_mask, n_iter)
part_i += 1
if self.use_fmap:
for fmap_i in range(self.n_featuremaps):
fm_mask = masks[part_i][0, fmap_i]
self.add_image(f"{prefix}_fmap:{fmap_i}", fm_mask, n_iter)
part_i += 1
if self.use_any_mask:
use_mask = 1 - masks[part_i][0, 0]
self.add_image(f"{prefix}_mask_any", use_mask, n_iter)
def log_loss_tolist(self, loss, comb_loss, qloss, qloss_parts, n_iter):
self.add_to_list("train loss", loss)
self.add_to_list("train combined loss", comb_loss)
qloss_part_i = 0
if self.use_thumbs:
self.add_to_list("train size_loss", qloss_parts[qloss_part_i])
qloss_part_i += 1
if self.use_fmap:
self.add_to_list("train fmap_loss", qloss_parts[qloss_part_i])
qloss_part_i += 1
if self.use_any_mask:
self.add_to_list("train any_loss", qloss_parts[qloss_part_i])
if len(qloss_parts) > 0:
self.add_to_list("train qloss", qloss)
if n_iter % self.log_iter == 0:
self.send_lists(n_iter)
def add_image(self, tag, img_tensor, global_step=None, walltime=None, dataformats='CHW'):
if img_tensor.shape[0] > 3:
img_tensor = img_tensor.permute(1, 0, 2).contiguous().view(1, img_tensor.size(1),
img_tensor.size(2) * img_tensor.size(0))
super().add_image(tag, img_tensor, global_step, walltime, dataformats)
def add_example_images(self, tag, img_tensor, global_step=None, walltime=None, num=3, normalize=True):
if normalize:
img_tensor *= self.img_std
img_tensor += self.img_mean
for i in range(num):
self.add_image(f"{tag}_{i}", img_tensor[i], global_step, walltime)
| [
"torch.tensor"
] | 1.5 | StefOe/selection-masks | e59487bffe3c30bdab7a6425bed01f6adeda4f67 |
1.6 | from math import sqrt
from itertools import product
from collections import namedtuple
import torch
import torch.nn.functional as F
from torch import nn, einsum
from se3_transformer_pytorch.basis import get_basis
from se3_transformer_pytorch.utils import exists, default, uniq, map_values, batched_index_select, masked_mean, to_order, fourier_encode_dist
from se3_transformer_pytorch.reversible import ReversibleSequence, SequentialSequence
from einops import rearrange, repeat
# fiber helpers
FiberEl = namedtuple('FiberEl', ['degrees', 'dim'])
class Fiber(nn.Module):
def __init__(
self,
structure
):
super().__init__()
if isinstance(structure, dict):
structure = structure.items()
self.structure = structure
@property
def dims(self):
return uniq(map(lambda t: t[1], self.structure))
@property
def degrees(self):
return map(lambda t: t[0], self.structure)
@staticmethod
def create(num_degrees, dim):
return Fiber([FiberEl(degree, dim) for degree in range(num_degrees)])
def __getitem__(self, degree):
return dict(self.structure)[degree]
def __iter__(self):
return iter(self.structure)
def __mul__(self, fiber):
return product(self.structure, fiber.structure)
def __and__(self, fiber):
out = []
degrees_out = fiber.degrees
for degree, dim in self:
if degree in fiber.degrees:
dim_out = fiber[degree]
out.append((degree, dim, dim_out))
return out
def get_tensor_device_and_dtype(features):
first_tensor = next(iter(features.items()))[1]
return first_tensor.device, first_tensor.dtype
# classes
class ResidualSE3(nn.Module):
""" only support instance where both Fibers are identical """
def forward(self, x, res):
out = {}
for degree, tensor in x.items():
degree = str(degree)
out[degree] = tensor
if degree in res:
out[degree] = out[degree] + res[degree]
return out
class LinearSE3(nn.Module):
def __init__(
self,
fiber_in,
fiber_out
):
super().__init__()
self.weights = nn.ParameterDict()
for (degree, dim_in, dim_out) in (fiber_in & fiber_out):
key = str(degree)
self.weights[key] = nn.Parameter(torch.randn(dim_in, dim_out) / sqrt(dim_in))
def forward(self, x):
out = {}
for degree, weight in self.weights.items():
out[degree] = einsum('b n d m, d e -> b n e m', x[degree], weight)
return out
class NormSE3(nn.Module):
"""Norm-based SE(3)-equivariant nonlinearity.
Nonlinearities are important in SE(3) equivariant GCNs. They are also quite
expensive to compute, so it is convenient for them to share resources with
other layers, such as normalization. The general workflow is as follows:
> for feature type in features:
> norm, phase <- feature
> output = fnc(norm) * phase
where fnc: {R+}^m -> R^m is a learnable map from m norms to m scalars.
"""
def __init__(
self,
fiber,
nonlin = nn.GELU(),
eps = 1e-12
):
super().__init__()
self.fiber = fiber
self.nonlin = nonlin
self.eps = eps
# Norm mappings: 1 per feature type
self.transform = nn.ModuleDict()
for degree, chan in fiber:
self.transform[str(degree)] = nn.Sequential(nn.LayerNorm(chan), nonlin)
def forward(self, features):
output = {}
for degree, t in features.items():
# Compute the norms and normalized features
norm = t.norm(dim = -1, keepdim = True).clamp(min = self.eps)
phase = t / norm
# Transform on norms
fn = self.transform[degree]
transformed = fn(norm.squeeze(-1))[..., None]
# Nonlinearity on norm
output[degree] = (transformed * phase).view(*t.shape)
return output
class ConvSE3(nn.Module):
"""A tensor field network layer
ConvSE3 stands for a Convolution SE(3)-equivariant layer. It is the
equivalent of a linear layer in an MLP, a conv layer in a CNN, or a graph
conv layer in a GCN.
At each node, the activations are split into different "feature types",
indexed by the SE(3) representation type: non-negative integers 0, 1, 2, ..
"""
def __init__(
self,
fiber_in,
fiber_out,
self_interaction = True,
pool = True,
edge_dim = 0,
fourier_encode_dist = False
):
super().__init__()
self.fiber_in = fiber_in
self.fiber_out = fiber_out
self.edge_dim = edge_dim
self.self_interaction = self_interaction
# Neighbor -> center weights
self.kernel_unary = nn.ModuleDict()
for (di, mi), (do, mo) in (self.fiber_in * self.fiber_out):
self.kernel_unary[f'({di},{do})'] = PairwiseConv(di, mi, do, mo, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)
self.pool = pool
# Center -> center weights
if self_interaction:
assert self.pool, 'must pool edges if followed with self interaction'
self.self_interact = LinearSE3(fiber_in, fiber_out)
self.self_interact_sum = ResidualSE3()
def forward(
self,
inp,
edge_info,
rel_dist = None,
basis = None
):
neighbor_indices, neighbor_masks, edges = edge_info
rel_dist = rearrange(rel_dist, 'b m n -> b m n ()')
kernels = {}
outputs = {}
for (di, mi), (do, mo) in (self.fiber_in * self.fiber_out):
etype = f'({di},{do})'
kernel_fn = self.kernel_unary[etype]
edge_features = torch.cat((rel_dist, edges), dim = -1) if exists(edges) else rel_dist
kernels[etype] = kernel_fn(edge_features, basis = basis)
for degree_out in self.fiber_out.degrees:
output = 0
degree_out_key = str(degree_out)
for degree_in, m_in in self.fiber_in:
x = inp[str(degree_in)]
x = batched_index_select(x, neighbor_indices, dim = 1)
x = x.view(*x.shape[:3], to_order(degree_in) * m_in, 1)
etype = f'({degree_in},{degree_out})'
kernel = kernels[etype]
output = output + einsum('... o i, ... i c -> ... o c', kernel, x)
if self.pool:
output = masked_mean(output, neighbor_masks, dim = 2) if exists(neighbor_masks) else output.mean(dim = 2)
leading_shape = x.shape[:2] if self.pool else x.shape[:3]
output = output.view(*leading_shape, -1, to_order(degree_out))
outputs[degree_out_key] = output
if self.self_interaction:
self_interact_out = self.self_interact(inp)
outputs = self.self_interact_sum(outputs, self_interact_out)
return outputs
class RadialFunc(nn.Module):
"""NN parameterized radial profile function."""
def __init__(
self,
num_freq,
in_dim,
out_dim,
edge_dim = None,
fourier_encode_dist = False,
num_fourier_features = 4,
mid_dim = 128
):
super().__init__()
self.num_freq = num_freq
self.in_dim = in_dim
self.mid_dim = mid_dim
self.out_dim = out_dim
self.edge_dim = default(edge_dim, 0)
self.fourier_encode_dist = fourier_encode_dist
self.num_fourier_features = num_fourier_features if fourier_encode_dist else 0
input_dim = self.edge_dim + 1 + (self.num_fourier_features * 2)
self.net = nn.Sequential(
nn.Linear(input_dim, mid_dim),
nn.LayerNorm(mid_dim),
nn.ReLU(),
nn.Linear(mid_dim, mid_dim),
nn.LayerNorm(mid_dim),
nn.ReLU(),
nn.Linear(mid_dim, num_freq * in_dim * out_dim)
)
self.apply(self.init_)
def init_(self, m):
if m in {nn.Linear}:
nn.init.kaiming_uniform_(m.weight)
def forward(self, x):
if self.fourier_encode_dist:
x = fourier_encode_dist(x, num_encodings = self.num_fourier_features)
x = rearrange(x, 'b n m () d -> b n m d')
y = self.net(x)
return rearrange(y, '... (o i f) -> ... o () i () f', i = self.in_dim, o = self.out_dim)
class PairwiseConv(nn.Module):
"""SE(3)-equivariant convolution between two single-type features"""
def __init__(
self,
degree_in,
nc_in,
degree_out,
nc_out,
edge_dim = 0,
fourier_encode_dist = False
):
super().__init__()
self.degree_in = degree_in
self.degree_out = degree_out
self.nc_in = nc_in
self.nc_out = nc_out
self.num_freq = to_order(min(degree_in, degree_out))
self.d_out = to_order(degree_out)
self.edge_dim = edge_dim
self.rp = RadialFunc(self.num_freq, nc_in, nc_out, edge_dim, fourier_encode_dist)
def forward(self, feat, basis):
R = self.rp(feat)
kernel = torch.sum(R * basis[f'{self.degree_in},{self.degree_out}'], dim = -1)
out = kernel.view(*kernel.shape[:3], self.d_out * self.nc_out, -1)
return out
# feed forwards
class FeedForwardSE3(nn.Module):
def __init__(
self,
fiber,
mult = 4
):
super().__init__()
self.fiber = fiber
fiber_hidden = Fiber(list(map(lambda t: (t[0], t[1] * mult), fiber)))
self.project_in = LinearSE3(fiber, fiber_hidden)
self.nonlin = NormSE3(fiber_hidden)
self.project_out = LinearSE3(fiber_hidden, fiber)
def forward(self, features):
outputs = self.project_in(features)
outputs = self.nonlin(outputs)
outputs = self.project_out(outputs)
return outputs
class FeedForwardBlockSE3(nn.Module):
def __init__(
self,
fiber,
):
super().__init__()
self.fiber = fiber
self.prenorm = NormSE3(fiber)
self.feedforward = FeedForwardSE3(fiber)
self.residual = ResidualSE3()
def forward(self, features):
res = features
out = self.prenorm(features)
out = self.feedforward(out)
return self.residual(out, res)
# attention
class AttentionSE3(nn.Module):
def __init__(
self,
fiber,
dim_head = 64,
heads = 8,
attend_self = False,
edge_dim = None,
fourier_encode_dist = False,
use_null_kv = False
):
super().__init__()
hidden_dim = dim_head * heads
hidden_fiber = Fiber(list(map(lambda t: (t[0], hidden_dim), fiber)))
project_out = not (heads == 1 and len(fiber.dims) == 1 and dim_head == fiber.dims[0])
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = LinearSE3(fiber, hidden_fiber)
self.to_k = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist)
self.to_v = ConvSE3(fiber, hidden_fiber, edge_dim = edge_dim, pool = False, self_interaction = False, fourier_encode_dist = fourier_encode_dist)
self.to_out = LinearSE3(hidden_fiber, fiber) if project_out else nn.Identity()
self.use_null_kv = use_null_kv
if use_null_kv:
self.null_keys = nn.ParameterDict()
self.null_values = nn.ParameterDict()
for degree in fiber.degrees:
m = to_order(degree)
degree_key = str(degree)
self.null_keys[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))
self.null_values[degree_key] = nn.Parameter(torch.zeros(heads, dim_head, m))
self.attend_self = attend_self
if attend_self:
self.to_self_k = LinearSE3(fiber, hidden_fiber)
self.to_self_v = LinearSE3(fiber, hidden_fiber)
def forward(self, features, edge_info, rel_dist, basis):
h, attend_self = self.heads, self.attend_self
device, dtype = get_tensor_device_and_dtype(features)
neighbor_indices, neighbor_mask, edges = edge_info
max_neg_value = -torch.finfo().max
if exists(neighbor_mask):
neighbor_mask = rearrange(neighbor_mask, 'b i j -> b () i j')
neighbor_indices = rearrange(neighbor_indices, 'b i j -> b () i j')
queries = self.to_q(features)
keys, values = self.to_k(features, edge_info, rel_dist, basis), self.to_v(features, edge_info, rel_dist, basis)
if attend_self:
self_keys, self_values = self.to_self_k(features), self.to_self_v(features)
outputs = {}
for degree in features.keys():
q, k, v = map(lambda t: t[degree], (queries, keys, values))
q = rearrange(q, 'b i (h d) m -> b h i d m', h = h)
k, v = map(lambda t: rearrange(t, 'b i j (h d) m -> b h i j d m', h = h), (k, v))
if self.use_null_kv:
null_k, null_v = map(lambda t: t[degree], (self.null_keys, self.null_values))
null_k, null_v = map(lambda t: repeat(t, 'h d m -> b h i () d m', b = q.shape[0], i = q.shape[2]), (null_k, null_v))
k = torch.cat((null_k, k), dim = 3)
v = torch.cat((null_v, v), dim = 3)
if attend_self:
self_k, self_v = map(lambda t: t[degree], (self_keys, self_values))
self_k, self_v = map(lambda t: rearrange(t, 'b n (h d) m -> b h n () d m', h = h), (self_k, self_v))
k = torch.cat((self_k, k), dim = 3)
v = torch.cat((self_v, v), dim = 3)
sim = einsum('b h i d m, b h i j d m -> b h i j', q, k) * self.scale
if exists(neighbor_mask):
num_left_pad = int(attend_self) + int(self.use_null_kv)
mask = F.pad(neighbor_mask, (num_left_pad, 0), value = True)
sim.masked_fill_(~mask, max_neg_value)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h i j d m -> b h i d m', attn, v)
outputs[degree] = rearrange(out, 'b h n d m -> b n (h d) m')
return self.to_out(outputs)
class AttentionBlockSE3(nn.Module):
def __init__(
self,
fiber,
dim_head = 64,
heads = 8,
attend_self = False,
edge_dim = None,
use_null_kv = False,
fourier_encode_dist = False
):
super().__init__()
self.attn = AttentionSE3(fiber, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, use_null_kv = use_null_kv)
self.prenorm = NormSE3(fiber)
self.residual = ResidualSE3()
def forward(self, features, edge_info, rel_dist, basis):
res = features
outputs = self.prenorm(features)
outputs = self.attn(outputs, edge_info, rel_dist, basis)
return self.residual(outputs, res)
# main class
class SE3Transformer(nn.Module):
def __init__(
self,
*,
dim,
num_neighbors = 12,
heads = 8,
dim_head = 64,
depth = 2,
input_degrees = 1,
num_degrees = 2,
output_degrees = 1,
valid_radius = 1e5,
reduce_dim_out = False,
num_tokens = None,
num_edge_tokens = None,
edge_dim = None,
reversible = False,
attend_self = True,
use_null_kv = False,
differentiable_coors = False,
fourier_encode_dist = False
):
super().__init__()
assert num_neighbors > 0, 'neighbors must be at least 1'
self.dim = dim
self.valid_radius = valid_radius
self.token_emb = None
self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None
assert not (exists(num_edge_tokens) and not exists(edge_dim)), 'edge dimension (edge_dim) must be supplied if SE3 transformer is to have edge tokens'
self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None
self.input_degrees = input_degrees
self.num_degrees = num_degrees
self.output_degrees = output_degrees
self.num_neighbors = num_neighbors
fiber_in = Fiber.create(input_degrees, dim)
fiber_hidden = Fiber.create(num_degrees, dim)
fiber_out = Fiber.create(output_degrees, dim)
self.conv_in = ConvSE3(fiber_in, fiber_hidden, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)
layers = nn.ModuleList([])
for _ in range(depth):
layers.append(nn.ModuleList([
AttentionBlockSE3(fiber_hidden, heads = heads, dim_head = dim_head, attend_self = attend_self, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist, use_null_kv = use_null_kv),
FeedForwardBlockSE3(fiber_hidden)
]))
execution_class = ReversibleSequence if reversible else SequentialSequence
self.net = execution_class(layers)
self.conv_out = ConvSE3(fiber_hidden, fiber_out, edge_dim = edge_dim, fourier_encode_dist = fourier_encode_dist)
self.norm = NormSE3(fiber_out)
self.linear_out = LinearSE3(
fiber_out,
Fiber.create(output_degrees, 1)
) if reduce_dim_out else None
self.differentiable_coors = differentiable_coors
def forward(self, feats, coors, mask = None, edges = None, return_type = None, return_pooled = False):
_mask = mask
if self.output_degrees == 1:
return_type = 0
if exists(self.token_emb):
feats = self.token_emb(feats)
assert not (exists(edges) and not exists(self.edge_emb)), 'edge embedding (num_edge_tokens & edge_dim) must be supplied if one were to train on edge types'
if exists(edges):
edges = self.edge_emb(edges)
if torch.is_tensor(feats):
feats = {'0': feats[..., None]}
b, n, d, *_, device = *feats['0'].shape, feats['0'].device
assert d == self.dim, f'feature dimension {d} must be equal to dimension given at init {self.dim}'
assert set(map(int, feats.keys())) == set(range(self.input_degrees)), f'input must have {self.input_degrees} degree'
num_degrees, neighbors = self.num_degrees, self.num_neighbors
neighbors = min(neighbors, n - 1)
# exclude edge of token to itself
exclude_self_mask = rearrange(~torch.eye(n, dtype = torch.bool, device = device), 'i j -> () i j')
indices = repeat(torch.arange(n, device = device), 'i -> b i j', b = b, j = n)
rel_pos = rearrange(coors, 'b n d -> b n () d') - rearrange(coors, 'b n d -> b () n d')
indices = indices.masked_select(exclude_self_mask).reshape(b, n, n - 1)
rel_pos = rel_pos.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, 3)
if exists(mask):
mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')
mask = mask.masked_select(exclude_self_mask).reshape(b, n, n - 1)
if exists(edges):
edges = edges.masked_select(exclude_self_mask[..., None]).reshape(b, n, n - 1, -1)
rel_dist = rel_pos.norm(dim = -1)
# get neighbors and neighbor mask, excluding self
neighbor_rel_dist, nearest_indices = rel_dist.topk(neighbors, dim = -1, largest = False)
neighbor_rel_pos = batched_index_select(rel_pos, nearest_indices, dim = 2)
neighbor_indices = batched_index_select(indices, nearest_indices, dim = 2)
basis = get_basis(neighbor_rel_pos, num_degrees - 1, differentiable = self.differentiable_coors)
neighbor_mask = neighbor_rel_dist <= self.valid_radius
if exists(mask):
neighbor_mask = neighbor_mask & batched_index_select(mask, nearest_indices, dim = 2)
if exists(edges):
edges = batched_index_select(edges, nearest_indices, dim = 2)
# main logic
edge_info = (neighbor_indices, neighbor_mask, edges)
x = feats
# project in
x = self.conv_in(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# transformer layers
x = self.net(x, edge_info = edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# project out
x = self.conv_out(x, edge_info, rel_dist = neighbor_rel_dist, basis = basis)
# norm
x = self.norm(x)
# reduce dim if specified
if exists(self.linear_out):
x = self.linear_out(x)
x = map_values(lambda t: t.squeeze(dim = 2), x)
if return_pooled:
mask_fn = (lambda t: masked_mean(t, _mask, dim = 1)) if exists(_mask) else (lambda t: t.mean(dim = 1))
x = map_values(mask_fn, x)
if '0' in x:
x['0'] = x['0'].squeeze(dim = -1)
if exists(return_type):
return x[str(return_type)]
return x
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.einsum",
"torch.nn.ModuleDict",
"torch.finfo",
"torch.eye",
"torch.nn.functional.pad",
"torch.sum",
"torch.nn.LayerNorm",
"torch.is_tensor",
"torch.nn.Embedding",
"torch.nn.ParameterDict",
"torch.zeros",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Identity",
"torch.nn.ReLU",
"torch.nn.GELU",
"torch.arange",
"torch.randn"
] | 1.6 | SuperXiang/se3-transformer-pytorch | d0db110533c0cd29a243e05e27dbef083ff232f4 |
1.8 | import json
import numpy as np
import os
import random
import time
import torch
from dataclasses import dataclass
from typing import Any
import jiant.utils.python.io as py_io
import jiant.utils.zlog as zlog
@dataclass
class QuickInitContainer:
device: Any
n_gpu: int
log_writer: Any
def quick_init(args, verbose=True) -> QuickInitContainer:
"""Sets up logging, initializes device(s) and random seed, prepares output dir, and saves args."
Args:
args (RunConfiguration): configuration carrying command line args specifying run params.
verbose (bool): whether to print the input run config and the run config as saved.
Returns:
QuickInitContainer specifying the run's device, GPU count, and logging configuration.
"""
if verbose:
print_args(args)
init_server_logging(server_ip=args.server_ip, server_port=args.server_port, verbose=verbose)
device, n_gpu = init_cuda_from_args(
no_cuda=args.no_cuda, local_rank=args.local_rank, fp16=args.fp16, verbose=verbose,
)
args.seed = init_seed(given_seed=args.seed, n_gpu=n_gpu, verbose=verbose)
init_output_dir(output_dir=args.output_dir, force_overwrite=args.force_overwrite)
log_writer = init_log_writer(output_dir=args.output_dir)
save_args(args=args, verbose=verbose)
return QuickInitContainer(device=device, n_gpu=n_gpu, log_writer=log_writer)
def init_server_logging(server_ip, server_port, verbose=True):
"""Sets ups Python Tools for Visual Studio debug (ptvsd) server.
Adapted from Hugging Face template: https://github.com/huggingface/transformers/blob/ac99217
e92c43066af7ec96554054d75532565d7/templates/adding_a_new_example_script/run_xxx.py#L569-L576
"""
if server_ip and server_port:
# Distant debugging, see:
# https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
# noinspection PyUnresolvedReferences,PyPackageRequirements
import ptvsd
if verbose:
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(server_ip, server_port), redirect_output=True)
ptvsd.wait_for_attach()
def init_cuda_from_args(no_cuda, local_rank, fp16, verbose=True):
"""Perform initial CUDA setup for DistributedDataParallel, DataParallel or w/o CUDA configs.
Adapted from Hugging Face template: https://github.com/huggingface/transformers/blob/ac99217e92
c43066af7ec96554054d75532565d7/templates/adding_a_new_example_script/run_xxx.py#L578-L586
Args:
no_cuda (bool): True to ignore CUDA devices (i.e., use CPU instead).
local_rank (int): Which GPU the script should use in DistributedDataParallel mode.
fp16 (bool): True for half-precision mode.
verbose: True to print device, device count, and whether training is distributed or FP16.
Notes:
local_rank == -1 is used to indicate that DistributedDataParallel should be disabled.
n_gpu > 1 is used to indicate that DataParallel should be used. Currently, local_rank == -1
sets n_gpu = 1 even if torch.cuda.device_count() would show more than one GPU is available.
Returns:
(tuple): tuple containing:
device (str): string handle for device.
n_gpu (int): number of GPU devices.
"""
# TODO break local_rank == -1 and no_cuda into separate cases to make the logic easier to read.
if local_rank == -1 or no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(local_rank)
device = torch.device("cuda", local_rank)
n_gpu = 1
if verbose:
print(
"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(local_rank != -1), fp16
)
)
return device, n_gpu
def init_seed(given_seed, n_gpu, verbose=True):
"""Initializes random seeds for sources of randomness. If seed is -1, randomly select seed.
Sets the random seed for sources of randomness (numpy, torch and python random). If seed is
specified as -1, the seed will be randomly selected and used to initialize all random seeds.
The value used to initialize the random seeds is returned.
Args:
given_seed (int): random seed.
n_gpu (int): number of GPUs.
verbose: whether to print random seed.
Returns:
int: value used to initialize random seeds.
"""
used_seed = get_seed(given_seed)
random.seed(used_seed)
np.random.seed(used_seed)
torch.manual_seed(used_seed)
if verbose:
print("Using seed: {}".format(used_seed))
if n_gpu > 0:
# noinspection PyUnresolvedReferences
torch.cuda.manual_seed_all(used_seed)
# MAKE SURE THIS IS SET
return used_seed
def init_output_dir(output_dir, force_overwrite):
"""Create output directory (and all intermediate dirs on the path) if it doesn't exist.
Args:
output_dir (str): output directory path.
force_overwrite (bool): If False and output dir is complete, raise RuntimeError.
Raises:
RuntimeError if overwrite option is not enabled and output dir contains "DONE" signal file.
"""
if not force_overwrite and is_done(output_dir):
raise RuntimeError(f"'{output_dir}' run is already done, and not forcing overwrite")
os.makedirs(output_dir, exist_ok=True)
def init_log_writer(output_dir):
return zlog.ZLogger(os.path.join(output_dir, str(int(time.time()))), overwrite=True)
def print_args(args):
for k, v in vars(args).items():
print(" {}: {}".format(k, v))
def save_args(args, verbose=True):
"""Dumps RunConfiguration to a json file.
Args:
args (RunConfiguration): configuration carrying command line args specifying run params.
verbose (bool): If True, print the arg object that was written to file.
"""
formatted_args = json.dumps(vars(args), indent=2)
with open(os.path.join(args.output_dir, "args.json"), "w") as f:
f.write(formatted_args)
if verbose:
print(formatted_args)
def get_seed(seed):
"""Get random seed if seed is specified as -1, otherwise return seed.
Args:
seed (int): random seed.
Returns:
int: Random seed if seed is specified as -1, otherwise returns the provided input seed.
"""
if seed == -1:
return int(np.random.randint(0, 2 ** 32 - 1))
else:
return seed
def write_done(output_dir):
py_io.write_file("DONE", os.path.join(output_dir, "DONE"))
def is_done(output_dir):
return os.path.exists(os.path.join(output_dir, "DONE"))
| [
"torch.device",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available"
] | 1.8.1 | Inujal/jiant | 095fd4ab7613fe270fd7b7c64b00a90b32b18b5b |
1.7 | """
To visualize the results, demo.py needs two arguments,
--root (compulsary) - root directory of Cityscapes
--model_path (compulsary) - path of the saved_model
Press 'q' to quit the demo.
Press any key to visualize the next image.
"""
import torch
import numpy as np
import cv2
import imutils
from torch.utils.data import DataLoader
from cityscapes import CityScapes
from model import model
from arg_parser import demo
def main(args):
scale = 1
cropsize = [int(2048 * scale), int(1024 * scale)]
ds = CityScapes(args.cityscapes_path, cropsize=cropsize, mode='val', demo=True)
n_classes = ds.n_classes
dl = DataLoader(ds,
batch_size=1,
shuffle=False,
num_workers=0,
pin_memory=True,
drop_last=True)
net = model.get_network(n_classes)
saved_path = args.saved_model
loaded_model = torch.load(saved_path, map_location=torch.device('cuda') if torch.cuda.is_available() else 'cpu')
state_dict = loaded_model['state_dict']
net.load_state_dict(state_dict, strict=False)
if torch.cuda.is_available():
net.cuda()
net.eval()
for images, im, lb in dl:
with torch.no_grad():
images = images.numpy()
lb = lb.numpy()
if torch.cuda.is_available():
im = im.cuda()
preds = net(im).argmax(dim=1).cpu().numpy()
for image, pred, label in zip(images, preds, lb):
label = ds.vis_label(label)
pred = ds.vis_label(pred)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
cv2.imshow('im', imutils.resize(cv2.hconcat([image, label, pred]), width=1920))
if ord('q') == cv2.waitKey(0):
exit()
if __name__ == '__main__':
args = demo()
main(args) | [
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.7.1 | Chris10M/Vision-Project-Image-Segmentation | d32fe9302320c74f238bc125f1d62a4e2ddbca22 |
1.6 | import torch.nn as nn
from torch import optim
from graphgallery.nn.models import TorchKeras
from graphgallery.nn.layers.pytorch import APPNProp, PPNProp, activations
from graphgallery.nn.metrics.pytorch import Accuracy
class APPNP(TorchKeras):
def __init__(self,
in_features,
out_features,
*,
alpha=0.1,
K=10,
ppr_dropout=0.,
hids=[64],
acts=['relu'],
dropout=0.5,
weight_decay=5e-4,
lr=0.01,
bias=True,
approximated=True):
super().__init__()
lin = []
lin.append(nn.Dropout(dropout))
for hid, act in zip(hids, acts):
lin.append(nn.Linear(in_features,
hid,
bias=bias))
lin.append(activations.get(act))
lin.append(nn.Dropout(dropout))
in_features = hid
lin.append(nn.Linear(in_features, out_features, bias=bias))
lin = nn.Sequential(*lin)
self.lin = lin
if approximated:
self.propagation = APPNProp(alpha=alpha, K=K,
dropout=ppr_dropout)
else:
self.propagation = PPNProp(dropout=ppr_dropout)
self.compile(loss=nn.CrossEntropyLoss(),
optimizer=optim.Adam([dict(params=lin[1].parameters(),
weight_decay=weight_decay),
dict(params=lin[2:].parameters(),
weight_decay=0.)], lr=lr),
metrics=[Accuracy()])
self.act_fn = nn.ReLU()
def forward(self, x, adj):
x = self.lin(x)
x = self.propagation(x, adj)
return x
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | Jie-Re/GraphGallery | 37a2e807bb21e5ed986ade935ac9619b62bfdd90 |
1.4 | import torch
from torch import nn
import torch.nn.functional as F
from tianshou.data import Batch
from tianshou.policy import PGPolicy
class A2CPolicy(PGPolicy):
"""docstring for A2CPolicy"""
def __init__(self, actor, critic, optim,
dist_fn=torch.distributions.Categorical,
discount_factor=0.99, vf_coef=.5, ent_coef=.01,
max_grad_norm=None):
super().__init__(None, optim, dist_fn, discount_factor)
self.actor = actor
self.critic = critic
self._w_vf = vf_coef
self._w_ent = ent_coef
self._grad_norm = max_grad_norm
def __call__(self, batch, state=None):
logits, h = self.actor(batch.obs, state=state, info=batch.info)
dist = self.dist_fn(logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def learn(self, batch, batch_size=None, repeat=1):
losses, actor_losses, vf_losses, ent_losses = [], [], [], []
for _ in range(repeat):
for b in batch.split(batch_size):
self.optim.zero_grad()
result = self(b)
dist = result.dist
v = self.critic(b.obs)
a = torch.tensor(b.act, device=dist.logits.device)
r = torch.tensor(b.returns, device=dist.logits.device)
a_loss = -(dist.log_prob(a) * (r - v).detach()).mean()
vf_loss = F.mse_loss(r[:, None], v)
ent_loss = dist.entropy().mean()
loss = a_loss + self._w_vf * vf_loss - self._w_ent * ent_loss
loss.backward()
if self._grad_norm:
nn.utils.clip_grad_norm_(
self.model.parameters(), max_norm=self._grad_norm)
self.optim.step()
actor_losses.append(a_loss.detach().cpu().numpy())
vf_losses.append(vf_loss.detach().cpu().numpy())
ent_losses.append(ent_loss.detach().cpu().numpy())
losses.append(loss.detach().cpu().numpy())
return {
'loss': losses,
'loss/actor': actor_losses,
'loss/vf': vf_losses,
'loss/ent': ent_losses,
}
| [
"torch.nn.functional.mse_loss",
"torch.tensor"
] | 1.4.0 | DZ9/tianshou | 4f843d3f51789f488169131a5b5decba8bab2b31 |
1.6 | import argparse
import math
import os
import random
import time
import logging
from pathlib import Path
import numpy as np
import torch.distributed as dist
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.yolo import Model
from utils.datasets import create_dataloader
from utils.general import (
torch_distributed_zero_first, labels_to_class_weights, plot_labels, check_anchors, labels_to_image_weights,
compute_loss, plot_images, fitness, strip_optimizer, plot_results, get_latest_run, check_dataset, check_file,
check_git_status, check_img_size, increment_dir, print_mutation, plot_evolution, set_logging)
from utils.google_utils import attempt_download
from utils.torch_utils import init_seeds, ModelEMA, select_device, intersect_dicts
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(f'Hyperparameters {hyp}')
log_dir = Path(tb_writer.log_dir) if tb_writer else Path(opt.logdir) / 'evolve' # logging directory
wdir = str(log_dir / 'weights') + os.sep # weights directory
os.makedirs(wdir, exist_ok=True)
last = wdir + 'last.pt'
best = wdir + 'best.pt'
results_file = str(log_dir / 'results.txt')
epochs, batch_size, total_batch_size, weights, rank = \
opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Save run settings
with open(log_dir / 'hyp.yaml', 'w') as f:
yaml.dump(hyp, f, sort_keys=False)
with open(log_dir / 'opt.yaml', 'w') as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
cuda = device.type != 'cpu'
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
nc, names = (1, ['item']) if opt.single_cls else (int(data_dict['nc']), data_dict['names']) # number classes, names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
# Model
pretrained = weights.endswith('.pt')
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create
exclude = ['anchor'] if opt.cfg else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc).to(device) # create
# Freeze
freeze = ['', ] # parameter names to freeze (full or partial)
if any(freeze):
for k, v in model.named_parameters():
if any(x in k for x in freeze):
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_parameters():
v.requires_grad = True
if '.bias' in k:
pg2.append(v) # biases
elif '.weight' in k and '.bn' not in k:
pg1.append(v) # apply weight decay
else:
pg0.append(v) # all else
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.8 + 0.2 # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# Results
if ckpt.get('training_results') is not None:
with open(results_file, 'w') as file:
file.write(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = int(max(model.stride)) # grid size (max stride)
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Exponential moving average
ema = ModelEMA(model) if rank in [-1, 0] else None
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=(opt.local_rank))
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Testloader
if rank in [-1, 0]:
ema.updates = start_epoch * nb // accumulate # set EMA updates
testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,
hyp=hyp, augment=False, cache=opt.cache_images, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers)[0] # only runs on process 0
# Model parameters
hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
model.names = names
# Class frequency
if rank in [-1, 0]:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1.
# model._initialize_biases(cf.to(device))
plot_labels(labels, save_dir=log_dir)
if tb_writer:
# tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384
tb_writer.add_histogram('classes', c, 0)
# Check anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
# Start training
t0 = time.time()
nw = max(3 * nb, 1e3) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
logger.info('Image sizes %g train, %g test' % (imgsz, imgsz_test))
logger.info('Using %g dataloader workers' % dataloader.num_workers)
logger.info('Starting training for %g epochs...' % epochs)
# torch.autograd.set_detect_anomaly(True)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
# Generate indices
if rank in [-1, 0]:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
dataset.indices = random.choices(range(dataset.n), weights=image_weights,
k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = torch.zeros([dataset.n], dtype=torch.int)
if rank == 0:
indices[:] = torch.from_tensor(dataset.indices, dtype=torch.int)
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Autocast
with amp.autocast(enabled=cuda):
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(pred, targets.to(device), model) # scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
# if not torch.isfinite(loss):
# logger.info('WARNING: non-finite loss, ending training ', loss_items)
# return results
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.4g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if ni < 3:
f = str(log_dir / ('train_batch%g.jpg' % ni)) # filename
result = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
if tb_writer and result is not None:
tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
if ema:
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride'])
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
results, maps, times = test.test(opt.data,
batch_size=total_batch_size,
imgsz=imgsz_test,
model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=log_dir)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
# Tensorboard
if tb_writer:
tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
'val/giou_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
if save:
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema.module if hasattr(ema, 'module') else ema.ema,
'optimizer': None if final_epoch else optimizer.state_dict()}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Strip optimizers
n = ('_' if len(opt.name) and not opt.name.isnumeric() else '') + opt.name
fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith('.pt') # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
# Finish
if not opt.evolve:
plot_results(save_dir=log_dir) # save as results.png
logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
dist.destroy_process_group() if rank not in [-1, 0] else None
torch.cuda.empty_cache()
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='yolov5s.pt', help='initial weights path')
parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='', help='hyperparameters path, i.e. data/hyp.scratch.yaml')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='train,test sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--logdir', type=str, default='runs/', help='logging directory')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
opt = parser.parse_args()
# Set DDP variables
opt.total_batch_size = opt.batch_size
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
# Resume
if opt.resume: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader)) # replace
opt.cfg, opt.weights, opt.resume = '', ckpt, True
logger.info('Resuming training from %s' % ckpt)
else:
opt.hyp = opt.hyp or ('data/hyp.finetune.yaml' if opt.weights else 'data/hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
device = select_device(opt.device, batch_size=opt.batch_size)
# DDP mode
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
opt.batch_size = opt.total_batch_size // opt.world_size
logger.info(opt)
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.FullLoader) # load hyps
# Train
if not opt.evolve:
tb_writer = None
if opt.global_rank in [-1, 0]:
logger.info('Start Tensorboard with "tensorboard --logdir %s", view at http://localhost:6006/' % opt.logdir)
tb_writer = SummaryWriter(log_dir=increment_dir(Path(opt.logdir) / 'exp', opt.name)) # runs/exp
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
'momentum': (0.1, 0.6, 0.98), # SGD momentum/Adam beta1
'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
'giou': (1, 0.02, 0.2), # GIoU loss gain
'cls': (1, 0.2, 4.0), # cls loss gain
'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
'iou_t': (0, 0.1, 0.7), # IoU training threshold
'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
'scale': (1, 0.0, 0.9), # image scale (+/- gain)
'shear': (1, 0.0, 10.0), # image shear (+/- deg)
'perspective': (1, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
'flipud': (0, 0.0, 1.0), # image flip up-down (probability)
'fliplr': (1, 0.0, 1.0), # image flip left-right (probability)
'mixup': (1, 0.0, 1.0)} # image mixup (probability)
assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path('runs/evolve/hyp_evolved.yaml') # save best result here
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(100): # generations to evolve
if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.9, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print('Hyperparameter evolution complete. Best results saved as: %s\nCommand to train a new model with these '
'hyperparameters: $ python train.py --hyp %s' % (yaml_file, yaml_file))
| [
"torch.cuda.amp.autocast",
"torch.distributed.destroy_process_group",
"torch.distributed.init_process_group",
"torch.nn.functional.interpolate",
"torch.optim.SGD",
"torch.optim.Adam",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.amp.GradScaler",
"torch.optim.lr_scheduler.LambdaLR",
"torch.distributed.broadcast"
] | 1.6.0 | 1079931505/ME336-Yellow-Team-SUSTech | f4e5391d7be3f7983692457d30c2bdc697dcb76d |
1.5 | import logging
import re
import typing
from abc import ABC, abstractmethod
from collections import Counter, defaultdict
from functools import lru_cache
from operator import itemgetter
from pathlib import Path
from typing import Callable, Dict, List, Optional, Union, cast
import torch
from deprecated import deprecated
from torch.utils.data import Dataset
from torch.utils.data.dataset import ConcatDataset, Subset
import flair
from flair.file_utils import Tqdm
log = logging.getLogger("flair")
def _iter_dataset(dataset: Optional[Dataset]) -> typing.Iterable:
if dataset is None:
return []
from flair.datasets import DataLoader
return map(lambda x: x[0], DataLoader(dataset, batch_size=1, num_workers=0))
def _len_dataset(dataset: Optional[Dataset]) -> int:
if dataset is None:
return 0
from flair.datasets import DataLoader
loader = DataLoader(dataset, batch_size=1, num_workers=0)
return len(loader)
class Dictionary:
"""
This class holds a dictionary that maps strings to IDs, used to generate one-hot encodings of strings.
"""
def __init__(self, add_unk=True):
# init dictionaries
self.item2idx: Dict[bytes, int] = {}
self.idx2item: List[bytes] = []
self.add_unk = add_unk
self.multi_label = False
self.span_labels = False
# in order to deal with unknown tokens, add <unk>
if add_unk:
self.add_item("<unk>")
def remove_item(self, item: str):
bytes_item = item.encode("utf-8")
if bytes_item in self.item2idx:
self.idx2item.remove(bytes_item)
del self.item2idx[bytes_item]
def add_item(self, item: str) -> int:
"""
add string - if already in dictionary returns its ID. if not in dictionary, it will get a new ID.
:param item: a string for which to assign an id.
:return: ID of string
"""
bytes_item = item.encode("utf-8")
if bytes_item not in self.item2idx:
self.idx2item.append(bytes_item)
self.item2idx[bytes_item] = len(self.idx2item) - 1
return self.item2idx[bytes_item]
def get_idx_for_item(self, item: str) -> int:
"""
returns the ID of the string, otherwise 0
:param item: string for which ID is requested
:return: ID of string, otherwise 0
"""
item_encoded = item.encode("utf-8")
if item_encoded in self.item2idx.keys():
return self.item2idx[item_encoded]
elif self.add_unk:
return 0
else:
log.error(f"The string '{item}' is not in dictionary! Dictionary contains only: {self.get_items()}")
log.error(
"You can create a Dictionary that handles unknown items with an <unk>-key by setting add_unk = True in the construction."
)
raise IndexError
def get_idx_for_items(self, items: List[str]) -> List[int]:
"""
returns the IDs for each item of the list of string, otherwise 0 if not found
:param items: List of string for which IDs are requested
:return: List of ID of strings
"""
if not hasattr(self, "item2idx_not_encoded"):
d = dict([(key.decode("UTF-8"), value) for key, value in self.item2idx.items()])
self.item2idx_not_encoded = defaultdict(int, d)
if not items:
return []
results = itemgetter(*items)(self.item2idx_not_encoded)
if isinstance(results, int):
return [results]
return list(results)
def get_items(self) -> List[str]:
items = []
for item in self.idx2item:
items.append(item.decode("UTF-8"))
return items
def __len__(self) -> int:
return len(self.idx2item)
def get_item_for_index(self, idx):
return self.idx2item[idx].decode("UTF-8")
def set_start_stop_tags(self):
self.add_item("<START>")
self.add_item("<STOP>")
def start_stop_tags_are_set(self):
if {"<START>".encode(), "<STOP>".encode()}.issubset(self.item2idx.keys()):
return True
else:
return False
def save(self, savefile):
import pickle
with open(savefile, "wb") as f:
mappings = {"idx2item": self.idx2item, "item2idx": self.item2idx}
pickle.dump(mappings, f)
def __setstate__(self, d):
self.__dict__ = d
# set 'add_unk' if the dictionary was created with a version of Flair older than 0.9
if "add_unk" not in self.__dict__.keys():
self.__dict__["add_unk"] = True if b"<unk>" in self.__dict__["idx2item"] else False
@classmethod
def load_from_file(cls, filename: Union[str, Path]):
import pickle
f = open(filename, "rb")
mappings = pickle.load(f, encoding="latin1")
idx2item = mappings["idx2item"]
item2idx = mappings["item2idx"]
f.close()
# set 'add_unk' depending on whether <unk> is a key
add_unk = True if b"<unk>" in idx2item else False
dictionary: Dictionary = Dictionary(add_unk=add_unk)
dictionary.item2idx = item2idx
dictionary.idx2item = idx2item
return dictionary
@classmethod
def load(cls, name: str):
from flair.file_utils import cached_path
hu_path: str = "https://flair.informatik.hu-berlin.de/resources/characters"
if name == "chars" or name == "common-chars":
char_dict = cached_path(f"{hu_path}/common_characters", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-large" or name == "common-chars-large":
char_dict = cached_path(f"{hu_path}/common_characters_large", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-xl" or name == "common-chars-xl":
char_dict = cached_path(f"{hu_path}/common_characters_xl", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
if name == "chars-lemmatizer" or name == "common-chars-lemmatizer":
char_dict = cached_path(f"{hu_path}/common_characters_lemmatizer", cache_dir="datasets")
return Dictionary.load_from_file(char_dict)
return Dictionary.load_from_file(name)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Dictionary):
return False
return self.item2idx == o.item2idx and self.idx2item == o.idx2item and self.add_unk == o.add_unk
def __str__(self):
tags = ", ".join(self.get_item_for_index(i) for i in range(min(len(self), 50)))
return f"Dictionary with {len(self)} tags: {tags}"
class Label:
"""
This class represents a label. Each label has a value and optionally a confidence score. The
score needs to be between 0.0 and 1.0. Default value for the score is 1.0.
"""
def __init__(self, value: Optional[str], score: float = 1.0):
self._value = value
self._score = score
super().__init__()
def set_value(self, value: str, score: float = 1.0):
self.value = value
self.score = score
def spawn(self, value: str, score: float = 1.0):
return Label(value, score)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if not value and value != "":
raise ValueError("Incorrect label value provided. Label value needs to be set.")
else:
self._value = value
@property
def score(self):
return self._score
@score.setter
def score(self, score):
self._score = score
def to_dict(self):
return {"value": self.value, "confidence": self.score}
def __str__(self):
return f"{self._value} ({round(self._score, 4)})"
def __repr__(self):
return f"{self._value} ({round(self._score, 4)})"
def __eq__(self, other):
return self.value == other.value and self.score == other.score
@property
def identifier(self):
return ""
class SpanLabel(Label):
def __init__(self, span, value: Optional[str], score: float = 1.0):
super().__init__(value, score)
self.span = span
def spawn(self, value: str, score: float = 1.0):
return SpanLabel(self.span, value, score)
def to_dict(self):
return {"span": self.span, "value": self.value, "confidence": self.score}
def __str__(self):
return f"{self._value} [{self.span.id_text}] ({round(self._score, 4)})"
def __repr__(self):
return f"{self._value} [{self.span.id_text}] ({round(self._score, 4)})"
def __hash__(self):
return hash(self.__repr__())
def __len__(self):
return len(self.span)
def __eq__(self, other):
return self.value == other.value and self.score == other.score and self.span.id_text == other.span.id_text
@property
def identifier(self):
return f"{self.span.id_text}"
class RelationLabel(Label):
def __init__(self, head, tail, value: Optional[str], score: float = 1.0):
super().__init__(value, score)
self.head = head
self.tail = tail
def spawn(self, value: str, score: float = 1.0):
return RelationLabel(self.head, self.tail, value, score)
def __str__(self):
return f"{self._value} [{self.head.id_text} -> {self.tail.id_text}] ({round(self._score, 4)})"
def __repr__(self):
return f"{self._value} from {self.head.id_text} -> {self.tail.id_text} ({round(self._score, 4)})"
def __len__(self):
return len(self.head) + len(self.tail)
def __eq__(self, other):
return (
self.value == other.value
and self.score == other.score
and self.head.id_text == other.head.id_text
and self.tail.id_text == other.tail.id_text
)
@property
def identifier(self):
return f"{self.head.id_text} -> {self.tail.id_text}"
class EntityLinkingLabel(Label):
def __init__(self, span, cui, concept_name, ontology = None, score: float = 1):
super().__init__(cui, score)
self.span = span
self.ontology = ontology
self.concept_name = concept_name
def spawn(self, value: str, score: float = 1):
return EntityLinkingLabel(self.span, value, self.ontology, self.concept_name, score)
def __str__(self):
return f"{self._value} {self.concept_name} [{self.span}] ({round(self._score, 4)})"
def __repr__(self):
return f"{self._value} {self.concept_name} [{self.span}] ({round(self._score, 4)})"
def __len__(self):
return len(self.span)
def __eq__(self, other):
return (
self.value == other.value
and self.span.id_text == other.span.id_text
and self.concept_name == other.concept_name
and self.ontology == other.ontology
and self.score == other.score
)
@property
def identifier(self):
return f"{self.cui}"
class DataPoint:
"""
This is the parent class of all data points in Flair (including Token, Sentence, Image, etc.). Each DataPoint
must be embeddable (hence the abstract property embedding() and methods to() and clear_embeddings()). Also,
each DataPoint may have Labels in several layers of annotation (hence the functions add_label(), get_labels()
and the property 'label')
"""
def __init__(self):
self.annotation_layers = {}
self._embeddings: Dict[str, torch.Tensor] = {}
@property
@abstractmethod
def embedding(self):
pass
def set_embedding(self, name: str, vector: torch.Tensor):
self._embeddings[name] = vector
def get_embedding(self, names: Optional[List[str]] = None) -> torch.Tensor:
embeddings = self.get_each_embedding(names)
if embeddings:
return torch.cat(embeddings, dim=0)
return torch.tensor([], device=flair.device)
def get_each_embedding(self, embedding_names: Optional[List[str]] = None) -> List[torch.Tensor]:
embeddings = []
for embed_name in sorted(self._embeddings.keys()):
if embedding_names and embed_name not in embedding_names:
continue
embed = self._embeddings[embed_name].to(flair.device)
embeddings.append(embed)
return embeddings
def to(self, device: str, pin_memory: bool = False):
for name, vector in self._embeddings.items():
if str(vector.device) != str(device):
if pin_memory:
self._embeddings[name] = vector.to(device, non_blocking=True).pin_memory()
else:
self._embeddings[name] = vector.to(device, non_blocking=True)
def clear_embeddings(self, embedding_names: List[str] = None):
if embedding_names is None:
self._embeddings = {}
else:
for name in embedding_names:
if name in self._embeddings.keys():
del self._embeddings[name]
def add_label(self, typename: str, value: str, score: float = 1.0):
if typename not in self.annotation_layers:
self.annotation_layers[typename] = [Label(value, score)]
else:
self.annotation_layers[typename].append(Label(value, score))
return self
def add_complex_label(self, typename: str, label: Label):
if typename in self.annotation_layers and label in self.annotation_layers[typename]:
return self
if typename not in self.annotation_layers:
self.annotation_layers[typename] = [label]
else:
self.annotation_layers[typename].append(label)
return self
def set_label(self, typename: str, value: str, score: float = 1.0):
self.annotation_layers[typename] = [Label(value, score)]
return self
def remove_labels(self, typename: str):
if typename in self.annotation_layers.keys():
del self.annotation_layers[typename]
def get_labels(self, typename: str = None):
if typename is None:
return self.labels
return self.annotation_layers[typename] if typename in self.annotation_layers else []
@property
def labels(self) -> List[Label]:
all_labels = []
for key in self.annotation_layers.keys():
all_labels.extend(self.annotation_layers[key])
return all_labels
DT = typing.TypeVar("DT", bound=DataPoint)
DT2 = typing.TypeVar("DT2", bound=DataPoint)
class Token(DataPoint):
"""
This class represents one word in a tokenized sentence. Each token may have any number of tags. It may also point
to its head in a dependency tree.
"""
def __init__(
self,
text: str,
idx: int = None,
head_id: int = None,
whitespace_after: bool = True,
start_position: int = None,
):
super().__init__()
self.text: str = text
self.idx: Optional[int] = idx
self.head_id: Optional[int] = head_id
self.whitespace_after: bool = whitespace_after
self.start_pos = start_position
self.end_pos = start_position + len(text) if start_position is not None else None
self.sentence: Optional[Sentence] = None
self._embeddings: Dict = {}
self.tags_proba_dist: Dict[str, List[Label]] = {}
def add_tag_label(self, tag_type: str, tag: Label):
self.set_label(tag_type, tag.value, tag.score)
def add_tags_proba_dist(self, tag_type: str, tags: List[Label]):
self.tags_proba_dist[tag_type] = tags
def add_tag(self, tag_type: str, tag_value: str, confidence=1.0):
self.set_label(tag_type, tag_value, confidence)
def get_tag(self, label_type, zero_tag_value=""):
if len(self.get_labels(label_type)) == 0:
return Label(zero_tag_value)
return self.get_labels(label_type)[0]
def get_tags_proba_dist(self, tag_type: str) -> List[Label]:
if tag_type in self.tags_proba_dist:
return self.tags_proba_dist[tag_type]
return []
def get_head(self):
return self.sentence.get_token(self.head_id)
@property
def start_position(self) -> Optional[int]:
return self.start_pos
@property
def end_position(self) -> Optional[int]:
return self.end_pos
@property
def embedding(self):
return self.get_embedding()
def __str__(self) -> str:
return "Token: {} {}".format(self.idx, self.text) if self.idx is not None else "Token: {}".format(self.text)
def __repr__(self) -> str:
return "Token: {} {}".format(self.idx, self.text) if self.idx is not None else "Token: {}".format(self.text)
class Span(DataPoint):
"""
This class represents one textual span consisting of Tokens.
"""
def __init__(self, tokens: List[Token]):
super().__init__()
self.tokens = tokens
@property
def start_pos(self) -> int:
assert self.tokens[0].start_position is not None
return self.tokens[0].start_position
@property
def end_pos(self) -> int:
assert self.tokens[-1].end_position is not None
return self.tokens[-1].end_position
@property
def text(self) -> str:
return " ".join([t.text for t in self.tokens])
def to_original_text(self) -> str:
pos = self.tokens[0].start_pos
if pos is None:
return " ".join([t.text for t in self.tokens])
str = ""
for t in self.tokens:
if t.start_pos is None:
return " ".join([t.text for t in self.tokens])
while t.start_pos > pos:
str += " "
pos += 1
str += t.text
pos += len(t.text)
return str
def to_plain_string(self):
plain = ""
for token in self.tokens:
plain += token.text
if token.whitespace_after:
plain += " "
return plain.rstrip()
def __str__(self) -> str:
ids = ",".join([str(t.idx) for t in self.tokens])
label_string = " ".join([str(label) for label in self.labels])
labels = f" [− Labels: {label_string}]" if self.labels else ""
return 'Span [{}]: "{}"{}'.format(ids, self.text, labels)
@property
def id_text(self) -> str:
return f"{' '.join([t.text for t in self.tokens])} ({','.join([str(t.idx) for t in self.tokens])})"
def __repr__(self) -> str:
ids = ",".join([str(t.idx) for t in self.tokens])
return (
'<{}-span ({}): "{}">'.format(self.tag, ids, self.text)
if len(self.labels) > 0
else '<span ({}): "{}">'.format(ids, self.text)
)
def __getitem__(self, idx: int) -> Token:
return self.tokens[idx]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
@property
def tag(self):
return self.labels[0].value
@property
def score(self):
return self.labels[0].score
@property
def position_string(self):
return "-".join([str(token.idx) for token in self])
@property
def embedding(self):
return torch.empty()
def to(self, device: str, pin_memory: bool = False):
pass
def clear_embeddings(self, embedding_names: List[str] = None):
pass
def add_tag(self, tag_type: str, tag_value: str, confidence=1.0):
assert self.tokens[0].sentence is not None
self.tokens[0].sentence.add_complex_label(tag_type, SpanLabel(self, value=tag_value, score=confidence))
class Tokenizer(ABC):
r"""An abstract class representing a :class:`Tokenizer`.
Tokenizers are used to represent algorithms and models to split plain text into
individual tokens / words. All subclasses should overwrite :meth:`tokenize`, which
splits the given plain text into tokens. Moreover, subclasses may overwrite
:meth:`name`, returning a unique identifier representing the tokenizer's
configuration.
"""
@abstractmethod
def tokenize(self, text: str) -> List[Token]:
raise NotImplementedError()
@property
def name(self) -> str:
return self.__class__.__name__
class Sentence(DataPoint):
"""
A Sentence is a list of tokens and is used to represent a sentence or text fragment.
"""
def __init__(
self,
text: Union[str, List[str]] = [],
use_tokenizer: Union[bool, Tokenizer, Callable] = True,
language_code: str = None,
start_position: int = None,
):
"""
Class to hold all meta related to a text (tokens, predictions, language code, ...)
:param text: original string (sentence), or a list of string tokens (words)
:param use_tokenizer: a custom tokenizer (default is :class:`SpaceTokenizer`)
more advanced options are :class:`SegTokTokenizer` to use segtok or :class:`SpacyTokenizer`
to use Spacy library if available). Check the implementations of abstract class Tokenizer or
implement your own subclass (if you need it). If instead of providing a Tokenizer, this parameter
is just set to True (deprecated), :class:`SegtokTokenizer` will be used.
:param language_code: Language of the sentence
:param start_position: Start char offset of the sentence in the superordinate document
"""
super().__init__()
self.tokens: List[Token] = []
self.language_code: Optional[str] = language_code
self.start_pos = start_position
self.end_pos = start_position + len(text) if start_position is not None else None
# the tokenizer used for this sentence
if isinstance(use_tokenizer, Tokenizer):
tokenizer = use_tokenizer
elif callable(use_tokenizer):
from flair.tokenization import TokenizerWrapper
tokenizer = TokenizerWrapper(use_tokenizer)
elif type(use_tokenizer) == bool:
from flair.tokenization import SegtokTokenizer, SpaceTokenizer
tokenizer = SegtokTokenizer() if use_tokenizer else SpaceTokenizer()
else:
raise AssertionError(
"Unexpected type of parameter 'use_tokenizer'. "
+ "Parameter should be bool, Callable[[str], List[Token]] (deprecated), Tokenizer"
)
# if text is passed, instantiate sentence with tokens (words)
if isinstance(text, (list, tuple)):
[self.add_token(self._restore_windows_1252_characters(token)) for token in text]
else:
text = self._restore_windows_1252_characters(text)
[self.add_token(token) for token in tokenizer.tokenize(text)]
# log a warning if the dataset is empty
if text == "":
log.warning("Warning: An empty Sentence was created! Are there empty strings in your dataset?")
self.tokenized: Optional[str] = None
# some sentences represent a document boundary (but most do not)
self.is_document_boundary: bool = False
# internal variables to denote position inside dataset
self._previous_sentence: Optional[Sentence] = None
self._next_sentence: Optional[Sentence] = None
self._position_in_dataset: Optional[typing.Tuple[Dataset, int]] = None
def get_token(self, token_id: int) -> Optional[Token]:
for token in self.tokens:
if token.idx == token_id:
return token
return None
def add_token(self, token: Union[Token, str]):
if type(token) is str:
token = Token(token)
token = cast(Token, token)
token.text = token.text.replace("\u200c", "")
token.text = token.text.replace("\u200b", "")
token.text = token.text.replace("\ufe0f", "")
token.text = token.text.replace("\ufeff", "")
# data with zero-width characters cannot be handled
if token.text == "":
return
self.tokens.append(token)
# set token idx if not set
token.sentence = self
if token.idx is None:
token.idx = len(self.tokens)
def get_label_names(self):
label_names = []
for label in self.labels:
label_names.append(label.value)
return label_names
def _convert_span_labels(self, label_type: str, min_score=-1):
current_span: List[Token] = []
tags: Dict[str, float] = defaultdict(lambda: 0.0)
previous_tag_value: str = "O"
for token in self:
tag: Label = token.get_tag(label_type)
tag_value = tag.value
# non-set tags are OUT tags
if tag_value == "" or tag_value == "O" or tag_value == "_":
tag_value = "O-"
# anything that is not a BIOES tag is a SINGLE tag
if tag_value[0:2] not in ["B-", "I-", "O-", "E-", "S-"]:
tag_value = "S-" + tag_value
# anything that is not OUT is IN
in_span = False
if tag_value[0:2] not in ["O-"]:
in_span = True
# single and begin tags start a new span
starts_new_span = False
if tag_value[0:2] in ["B-", "S-"]:
starts_new_span = True
if previous_tag_value[0:2] in ["S-"] and previous_tag_value[2:] != tag_value[2:] and in_span:
starts_new_span = True
if (starts_new_span or not in_span) and len(current_span) > 0:
scores = [t.get_labels(label_type)[0].score for t in current_span]
span_score = sum(scores) / len(scores)
if span_score > min_score:
span = Span(current_span)
value = sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0]
self.add_complex_label(
typename=label_type,
label=SpanLabel(span=span, value=value, score=span_score),
)
current_span = []
tags = defaultdict(lambda: 0.0)
if in_span:
current_span.append(token)
weight = 1.1 if starts_new_span else 1.0
tags[tag_value[2:]] += weight
# remember previous tag
previous_tag_value = tag_value
if len(current_span) > 0:
scores = [t.get_labels(label_type)[0].score for t in current_span]
span_score = sum(scores) / len(scores)
if span_score > min_score:
span = Span(current_span)
value = sorted(tags.items(), key=lambda k_v: k_v[1], reverse=True)[0][0]
self.add_complex_label(
typename=label_type,
label=SpanLabel(span=span, value=value, score=span_score),
)
@property
def embedding(self):
return self.get_embedding()
def to(self, device: str, pin_memory: bool = False):
# move sentence embeddings to device
super().to(device=device, pin_memory=pin_memory)
# also move token embeddings to device
for token in self:
token.to(device, pin_memory)
def clear_embeddings(self, embedding_names: List[str] = None):
super().clear_embeddings(embedding_names)
# clear token embeddings
for token in self:
token.clear_embeddings(embedding_names)
@lru_cache(maxsize=1) # cache last context, as training repeats calls
def left_context(self, context_length: int, respect_document_boundaries: bool = True):
sentence = self
left_context: List[str] = []
while True:
sentence = sentence.previous_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
left_context = [t.text for t in sentence.tokens] + left_context
if len(left_context) > context_length:
left_context = left_context[-context_length:]
break
return left_context
@lru_cache(maxsize=1) # cache last context, as training repeats calls
def right_context(self, context_length: int, respect_document_boundaries: bool = True):
sentence = self
right_context: List[str] = []
while True:
sentence = sentence.next_sentence()
if sentence is None:
break
if respect_document_boundaries and sentence.is_document_boundary:
break
right_context += [t.text for t in sentence.tokens]
if len(right_context) > context_length:
right_context = right_context[:context_length]
break
return right_context
def to_tagged_string(self, main_tag=None) -> str:
list = []
for token in self.tokens:
list.append(token.text)
tags: List[str] = []
for label_type in token.annotation_layers.keys():
if main_tag is not None and main_tag != label_type:
continue
if token.get_labels(label_type)[0].value == "O":
continue
if token.get_labels(label_type)[0].value == "_":
continue
tags.append(token.get_labels(label_type)[0].value)
all_tags = "<" + "/".join(tags) + ">"
if all_tags != "<>":
list.append(all_tags)
return " ".join(list)
def to_tokenized_string(self) -> str:
if self.tokenized is None:
self.tokenized = " ".join([t.text for t in self.tokens])
return self.tokenized
def to_plain_string(self):
plain = ""
for token in self.tokens:
plain += token.text
if token.whitespace_after:
plain += " "
return plain.rstrip()
def infer_space_after(self):
"""
Heuristics in case you wish to infer whitespace_after values for tokenized text. This is useful for some old NLP
tasks (such as CoNLL-03 and CoNLL-2000) that provide only tokenized data with no info of original whitespacing.
:return:
"""
last_token = None
quote_count: int = 0
# infer whitespace after field
for token in self.tokens:
if token.text == '"':
quote_count += 1
if quote_count % 2 != 0:
token.whitespace_after = False
elif last_token is not None:
last_token.whitespace_after = False
if last_token is not None:
if token.text in [".", ":", ",", ";", ")", "n't", "!", "?"]:
last_token.whitespace_after = False
if token.text.startswith("'"):
last_token.whitespace_after = False
if token.text in ["("]:
token.whitespace_after = False
last_token = token
return self
def to_original_text(self) -> str:
str = ""
pos = 0
for t in self.tokens:
if t.start_pos is None:
return self.to_tokenized_string()
while t.start_pos > pos:
str += " "
pos += 1
str += t.text
pos += len(t.text)
return str
def to_dict(self, tag_type: str = None):
labels = []
if tag_type:
labels = [label.to_dict() for label in self.get_labels(tag_type)]
return {"text": self.to_original_text(), tag_type: labels}
if self.labels:
labels = [label.to_dict() for label in self.labels]
return {"text": self.to_original_text(), "all labels": labels}
@typing.overload
def __getitem__(self, idx: int) -> Token:
...
@typing.overload
def __getitem__(self, s: slice) -> Span:
...
def __getitem__(self, subscript):
if isinstance(subscript, slice):
return Span(self.tokens[subscript])
else:
return self.tokens[subscript]
def __iter__(self):
return iter(self.tokens)
def __len__(self) -> int:
return len(self.tokens)
def __repr__(self):
tagged_string = self.to_tagged_string()
tokenized_string = self.to_tokenized_string()
# add Sentence labels to output if they exist
sentence_labels = f" − Sentence-Labels: {self.annotation_layers}" if self.annotation_layers != {} else ""
# add Token labels to output if they exist
token_labels = f' − Token-Labels: "{tagged_string}"' if tokenized_string != tagged_string else ""
return f'Sentence: "{tokenized_string}" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'
def __copy__(self):
s = Sentence()
for token in self.tokens:
nt = Token(token.text)
for tag_type in token.tags:
nt.add_label(
tag_type,
token.get_tag(tag_type).value,
token.get_tag(tag_type).score,
)
s.add_token(nt)
return s
def __str__(self) -> str:
tagged_string = self.to_tagged_string()
tokenized_string = self.to_tokenized_string()
# add Sentence labels to output if they exist
sentence_labels = f" − Sentence-Labels: {self.annotation_layers}" if self.annotation_layers != {} else ""
# add Token labels to output if they exist
token_labels = f' − Token-Labels: "{tagged_string}"' if tokenized_string != tagged_string else ""
return f'Sentence: "{tokenized_string}" [− Tokens: {len(self)}{token_labels}{sentence_labels}]'
def get_language_code(self) -> str:
if self.language_code is None:
import langdetect
try:
self.language_code = langdetect.detect(self.to_plain_string())
except Exception:
self.language_code = "en"
return self.language_code
@staticmethod
def _restore_windows_1252_characters(text: str) -> str:
def to_windows_1252(match):
try:
return bytes([ord(match.group(0))]).decode("windows-1252")
except UnicodeDecodeError:
# No character at the corresponding code point: remove it
return ""
return re.sub(r"[\u0080-\u0099]", to_windows_1252, text)
def next_sentence(self):
"""
Get the next sentence in the document (works only if context is set through dataloader or elsewhere)
:return: next Sentence in document if set, otherwise None
"""
if self._next_sentence is not None:
return self._next_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] + 1
if index < len(dataset):
return dataset[index]
return None
def previous_sentence(self):
"""
Get the previous sentence in the document (works only if context is set through dataloader or elsewhere)
:return: previous Sentence in document if set, otherwise None
"""
if self._previous_sentence is not None:
return self._previous_sentence
if self._position_in_dataset is not None:
dataset = self._position_in_dataset[0]
index = self._position_in_dataset[1] - 1
if index >= 0:
return dataset[index]
return None
def is_context_set(self) -> bool:
"""
Return True or False depending on whether context is set (for instance in dataloader or elsewhere)
:return: True if context is set, else False
"""
return self._previous_sentence is not None or self._position_in_dataset is not None
def get_labels(self, label_type: str = None):
# if no label if specified, return all labels
if label_type is None:
return self.labels
# if the label type exists in the Sentence, return it
if label_type in self.annotation_layers:
return self.annotation_layers[label_type]
# otherwise check if the label exists on the token-level
# in this case, create span-labels and return those
if label_type in set().union(*(token.annotation_layers.keys() for token in self)):
return [
SpanLabel(Span([token]), token.get_tag(label_type).value, token.get_tag(label_type).score)
for token in self
if label_type in token.annotation_layers
]
# return empty list if none of the above
return []
class DataPair(DataPoint, typing.Generic[DT, DT2]):
def __init__(self, first: DT, second: DT2):
super().__init__()
self.first = first
self.second = second
def to(self, device: str, pin_memory: bool = False):
self.first.to(device, pin_memory)
self.second.to(device, pin_memory)
def clear_embeddings(self, embedding_names: List[str] = None):
self.first.clear_embeddings(embedding_names)
self.second.clear_embeddings(embedding_names)
@property
def embedding(self):
return torch.cat([self.first.embedding, self.second.embedding])
def __str__(self):
return f"DataPair:\n − First {self.first}\n − Second {self.second}\n − Labels: {self.labels}"
def to_plain_string(self):
return f"DataPair: First {self.first} || Second {self.second}"
def to_original_text(self):
return f"{self.first.to_original_text()} || {self.second.to_original_text()}"
def __len__(self):
return len(self.first) + len(self.second)
TextPair = DataPair[Sentence, Sentence]
class Image(DataPoint):
def __init__(self, data=None, imageURL=None):
super().__init__()
self.data = data
self._embeddings: Dict = {}
self.imageURL = imageURL
@property
def embedding(self):
return self.get_embedding()
def __str__(self):
image_repr = self.data.size() if self.data else ""
image_url = self.imageURL if self.imageURL else ""
return f"Image: {image_repr} {image_url}"
class FlairDataset(Dataset):
@abstractmethod
def is_in_memory(self) -> bool:
pass
class Corpus:
def __init__(
self,
train: Dataset = None,
dev: Dataset = None,
test: Dataset = None,
name: str = "corpus",
sample_missing_splits: Union[bool, str] = True,
):
# set name
self.name: str = name
# abort if no data is provided
if not train and not dev and not test:
raise RuntimeError("No data provided when initializing corpus object.")
# sample test data from train if none is provided
if test is None and sample_missing_splits and train and not sample_missing_splits == "only_dev":
train_length = _len_dataset(train)
test_size: int = round(train_length / 10)
test, train = randomly_split_into_two_datasets(train, test_size)
# sample dev data from train if none is provided
if dev is None and sample_missing_splits and train and not sample_missing_splits == "only_test":
train_length = _len_dataset(train)
dev_size: int = round(train_length / 10)
dev, train = randomly_split_into_two_datasets(train, dev_size)
# set train dev and test data
self._train: Optional[Dataset] = train
self._test: Optional[Dataset] = test
self._dev: Optional[Dataset] = dev
@property
def train(self) -> Optional[Dataset]:
return self._train
@property
def dev(self) -> Optional[Dataset]:
return self._dev
@property
def test(self) -> Optional[Dataset]:
return self._test
def downsample(
self,
percentage: float = 0.1,
downsample_train=True,
downsample_dev=True,
downsample_test=True,
):
if downsample_train and self._train is not None:
self._train = self._downsample_to_proportion(self._train, percentage)
if downsample_dev and self._dev is not None:
self._dev = self._downsample_to_proportion(self._dev, percentage)
if downsample_test and self._test is not None:
self._test = self._downsample_to_proportion(self._test, percentage)
return self
def filter_empty_sentences(self):
log.info("Filtering empty sentences")
if self._train is not None:
self._train = Corpus._filter_empty_sentences(self._train)
if self._test is not None:
self._test = Corpus._filter_empty_sentences(self._test)
if self._dev is not None:
self._dev = Corpus._filter_empty_sentences(self._dev)
log.info(self)
def filter_long_sentences(self, max_charlength: int):
log.info("Filtering long sentences")
if self._train is not None:
self._train = Corpus._filter_long_sentences(self._train, max_charlength)
if self._test is not None:
self._test = Corpus._filter_long_sentences(self._test, max_charlength)
if self._dev is not None:
self._dev = Corpus._filter_long_sentences(self._dev, max_charlength)
log.info(self)
@staticmethod
def _filter_long_sentences(dataset, max_charlength: int) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in Tqdm.tqdm(enumerate(_iter_dataset(dataset))):
if len(sentence.to_plain_string()) > max_charlength:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
@staticmethod
def _filter_empty_sentences(dataset) -> Dataset:
# find out empty sentence indices
empty_sentence_indices = []
non_empty_sentence_indices = []
for index, sentence in enumerate(_iter_dataset(dataset)):
if len(sentence) == 0:
empty_sentence_indices.append(index)
else:
non_empty_sentence_indices.append(index)
# create subset of non-empty sentence indices
subset = Subset(dataset, non_empty_sentence_indices)
return subset
def make_vocab_dictionary(self, max_tokens=-1, min_freq=1) -> Dictionary:
"""
Creates a dictionary of all tokens contained in the corpus.
By defining `max_tokens` you can set the maximum number of tokens that should be contained in the dictionary.
If there are more than `max_tokens` tokens in the corpus, the most frequent tokens are added first.
If `min_freq` is set the a value greater than 1 only tokens occurring more than `min_freq` times are considered
to be added to the dictionary.
:param max_tokens: the maximum number of tokens that should be added to the dictionary (-1 = take all tokens)
:param min_freq: a token needs to occur at least `min_freq` times to be added to the dictionary (-1 = there is no limitation)
:return: dictionary of tokens
"""
tokens = self._get_most_common_tokens(max_tokens, min_freq)
vocab_dictionary: Dictionary = Dictionary()
for token in tokens:
vocab_dictionary.add_item(token)
return vocab_dictionary
def _get_most_common_tokens(self, max_tokens, min_freq) -> List[str]:
tokens_and_frequencies = Counter(self._get_all_tokens())
tokens: List[str] = []
for token, freq in tokens_and_frequencies.most_common():
if (min_freq != -1 and freq < min_freq) or (max_tokens != -1 and len(tokens) == max_tokens):
break
tokens.append(token)
return tokens
def _get_all_tokens(self) -> List[str]:
assert self.train
tokens = list(map((lambda s: s.tokens), _iter_dataset(self.train)))
tokens = [token for sublist in tokens for token in sublist]
return list(map((lambda t: t.text), tokens))
@staticmethod
def _downsample_to_proportion(dataset: Dataset, proportion: float):
sampled_size: int = round(_len_dataset(dataset) * proportion)
splits = randomly_split_into_two_datasets(dataset, sampled_size)
return splits[0]
def obtain_statistics(self, label_type: str = None, pretty_print: bool = True) -> Union[dict, str]:
"""
Print statistics about the class distribution (only labels of sentences are taken into account) and sentence
sizes.
"""
json_data = {
"TRAIN": self._obtain_statistics_for(self.train, "TRAIN", label_type),
"TEST": self._obtain_statistics_for(self.test, "TEST", label_type),
"DEV": self._obtain_statistics_for(self.dev, "DEV", label_type),
}
if pretty_print:
import json
return json.dumps(json_data, indent=4)
return json_data
@staticmethod
def _obtain_statistics_for(sentences, name, tag_type) -> dict:
if len(sentences) == 0:
return {}
classes_to_count = Corpus._count_sentence_labels(sentences)
tags_to_count = Corpus._count_token_labels(sentences, tag_type)
tokens_per_sentence = Corpus._get_tokens_per_sentence(sentences)
label_size_dict = {}
for label, c in classes_to_count.items():
label_size_dict[label] = c
tag_size_dict = {}
for tag, c in tags_to_count.items():
tag_size_dict[tag] = c
return {
"dataset": name,
"total_number_of_documents": len(sentences),
"number_of_documents_per_class": label_size_dict,
"number_of_tokens_per_tag": tag_size_dict,
"number_of_tokens": {
"total": sum(tokens_per_sentence),
"min": min(tokens_per_sentence),
"max": max(tokens_per_sentence),
"avg": sum(tokens_per_sentence) / len(sentences),
},
}
@staticmethod
def _get_tokens_per_sentence(sentences):
return list(map(lambda x: len(x.tokens), sentences))
@staticmethod
def _count_sentence_labels(sentences):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for label in sent.labels:
label_count[label.value] += 1
return label_count
@staticmethod
def _count_token_labels(sentences, label_type):
label_count = defaultdict(lambda: 0)
for sent in sentences:
for token in sent.tokens:
if label_type in token.annotation_layers.keys():
label = token.get_tag(label_type)
label_count[label.value] += 1
return label_count
def __str__(self) -> str:
return "Corpus: %d train + %d dev + %d test sentences" % (
_len_dataset(self.train) if self.train else 0,
_len_dataset(self.dev) if self.dev else 0,
_len_dataset(self.test) if self.test else 0,
)
def make_label_dictionary(self, label_type: str, min_count: int = -1) -> Dictionary:
"""
Creates a dictionary of all labels assigned to the sentences in the corpus.
:return: dictionary of labels
"""
label_dictionary: Dictionary = Dictionary(add_unk=True)
label_dictionary.span_labels = False
assert self.train
datasets = [self.train]
data: ConcatDataset = ConcatDataset(datasets)
log.info("Computing label dictionary. Progress:")
all_label_types: typing.Counter[str] = Counter()
label_occurrence: typing.Counter[str] = Counter()
all_sentence_labels: List[str] = []
for sentence in Tqdm.tqdm(_iter_dataset(data)):
# check if sentence itself has labels
labels = sentence.get_labels(label_type)
all_label_types.update(sentence.annotation_layers.keys())
# go through all labels and increment count
for label in labels:
if label.value not in all_sentence_labels:
label_occurrence[label.value] += 1
# check if there are any span labels
if type(label) == SpanLabel and len(label.span) > 1:
label_dictionary.span_labels = True
if not label_dictionary.multi_label:
if len(labels) > 1:
label_dictionary.multi_label = True
erfasst_count = 0
unked_count = 0
for label, count in label_occurrence.most_common():
if count >= min_count:
label_dictionary.add_item(label)
erfasst_count += count
else:
unked_count += count
if len(label_dictionary.idx2item) == 0:
log.error(
f"Corpus contains only the labels: {', '.join([f'{label[0]} (#{label[1]})' for label in all_label_types.most_common()])}"
)
log.error(f"You specified as label_type='{label_type}' which is not in this dataset!")
raise Exception
log.info(
f"Corpus contains the labels: {', '.join([label[0] + f' (#{label[1]})' for label in all_label_types.most_common()])}"
)
log.info(f"{erfasst_count} instances in dict, {unked_count} instances are UNK'ed")
log.info(f"Most commonly observed '{label_type}'-labels are {label_occurrence.most_common(20)}")
log.info(f"Created (for label '{label_type}') {label_dictionary}")
return label_dictionary
def get_label_distribution(self):
class_to_count = defaultdict(lambda: 0)
for sent in self.train:
for label in sent.labels:
class_to_count[label.value] += 1
return class_to_count
def get_all_sentences(self) -> ConcatDataset:
parts = []
if self.train:
parts.append(self.train)
if self.dev:
parts.append(self.dev)
if self.test:
parts.append(self.test)
return ConcatDataset(parts)
@deprecated(version="0.8", reason="Use 'make_label_dictionary' instead.")
def make_tag_dictionary(self, tag_type: str) -> Dictionary:
# Make the tag dictionary
tag_dictionary: Dictionary = Dictionary(add_unk=False)
tag_dictionary.add_item("O")
for sentence in _iter_dataset(self.get_all_sentences()):
for token in sentence.tokens:
tag_dictionary.add_item(token.get_tag(tag_type).value)
tag_dictionary.add_item("<START>")
tag_dictionary.add_item("<STOP>")
return tag_dictionary
class MultiCorpus(Corpus):
def __init__(self, corpora: List[Corpus], name: str = "multicorpus", **corpusargs):
self.corpora: List[Corpus] = corpora
train_parts = []
dev_parts = []
test_parts = []
for corpus in self.corpora:
if corpus.train:
train_parts.append(corpus.train)
if corpus.dev:
dev_parts.append(corpus.dev)
if corpus.test:
test_parts.append(corpus.test)
super(MultiCorpus, self).__init__(
ConcatDataset(train_parts) if len(train_parts) > 0 else None,
ConcatDataset(dev_parts) if len(dev_parts) > 0 else None,
ConcatDataset(test_parts) if len(test_parts) > 0 else None,
name=name,
**corpusargs,
)
def __str__(self):
output = (
f"MultiCorpus: "
f"{len(self.train) if self.train else 0} train + "
f"{len(self.dev) if self.dev else 0} dev + "
f"{len(self.test) if self.test else 0} test sentences\n - "
)
output += "\n - ".join([f"{type(corpus).__name__} {str(corpus)} - {corpus.name}" for corpus in self.corpora])
return output
def iob2(tags):
"""
Check that tags have a valid IOB format.
Tags in IOB1 format are converted to IOB2.
"""
for i, tag in enumerate(tags):
if tag.value == "O":
continue
split = tag.value.split("-")
if len(split) != 2 or split[0] not in ["I", "B"]:
return False
if split[0] == "B":
continue
elif i == 0 or tags[i - 1].value == "O": # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
elif tags[i - 1].value[1:] == tag.value[1:]:
continue
else: # conversion IOB1 to IOB2
tags[i].value = "B" + tag.value[1:]
return True
def iob_iobes(tags):
"""
IOB -> IOBES
"""
for i, tag in enumerate(tags):
if tag.value == "O" or tag.value == "":
tag.value = "O"
continue
t, label = tag.value.split("-", 1)
if len(tags) == i + 1 or tags[i + 1].value == "O":
next_same = False
else:
nt, next_label = tags[i + 1].value.split("-", 1)
next_same = nt == "I" and next_label == label
if t == "B":
if not next_same:
tag.value = tag.value.replace("B-", "S-")
elif t == "I":
if not next_same:
tag.value = tag.value.replace("I-", "E-")
else:
raise Exception("Invalid IOB format!")
def randomly_split_into_two_datasets(dataset, length_of_first):
import random
indices = [i for i in range(len(dataset))]
random.shuffle(indices)
first_dataset = indices[:length_of_first]
second_dataset = indices[length_of_first:]
first_dataset.sort()
second_dataset.sort()
return Subset(dataset, first_dataset), Subset(dataset, second_dataset)
| [
"torch.cat",
"torch.utils.data.dataset.ConcatDataset",
"torch.tensor",
"torch.utils.data.dataset.Subset",
"torch.empty"
] | 1.5.0 | piamarlene/flair | 4f72d538fa49649aac88c7b5130250180ba64e43 |
1.10 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import torch
import torch.cuda.amp as amp
import torch.nn as nn
import torch.nn.functional as F
##
# version 1: use pytorch autograd
class MishV1(nn.Module):
def __init__(self):
super(MishV1, self).__init__()
def forward(self, feat):
return feat * torch.tanh(F.softplus(feat))
##
# version 2: use derived formula to compute grad
class MishFunctionV2(torch.autograd.Function):
@staticmethod
@amp.custom_fwd
def forward(ctx, feat):
# exp = torch.exp(feat)
# exp_plus = exp + 1
# exp_plus_pow = torch.pow(exp_plus, 2)
# tanhX = (exp_plus_pow - 1) / (exp_plus_pow + 1)
# out = feat * tanhX
# grad = tanhX + 4 * feat * exp * exp_plus / torch.pow(1 + exp_plus_pow, 2)
tanhX = torch.tanh(F.softplus(feat))
out = feat * tanhX
grad = tanhX + feat * (1 - torch.pow(tanhX, 2)) * torch.sigmoid(feat)
ctx.grad = grad
return out
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
grad = ctx.grad
grad *= grad_output
return grad
class MishV2(nn.Module):
def __init__(self):
super(MishV2, self).__init__()
def forward(self, feat):
return MishFunctionV2.apply(feat)
##
# version 3: write with cuda which requires less memory and can be faster
import mish_cpp
class MishFunctionV3(torch.autograd.Function):
@staticmethod
@amp.custom_fwd
def forward(ctx, feat):
ctx.feat = feat
return mish_cpp.mish_forward(feat)
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
feat = ctx.feat
return mish_cpp.mish_backward(grad_output, feat)
class MishV3(nn.Module):
def __init__(self):
super(MishV3, self).__init__()
def forward(self, feat):
return MishFunctionV3.apply(feat)
if __name__ == "__main__":
# import torchvision
# net = torchvision.models.resnet50(pretrained=True)
# sd = {k: v for k, v in net.state_dict().items() if k.startswith('conv1.') or k.startswith('bn1.')}
class Net(nn.Module):
def __init__(self, act='mishv1'):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 64, 7, 2, 3)
self.bn1 = nn.BatchNorm2d(64)
if act == 'mishv1':
self.act1 = MishV1()
elif act == 'mishv2':
self.act1 = MishV2()
elif act == 'mishv3':
self.act1 = MishV3()
self.dense = nn.Linear(64, 10, bias=False)
self.crit = nn.CrossEntropyLoss()
# state = self.state_dict()
# state.update(sd)
# self.load_state_dict(state)
# torch.nn.init.constant_(self.dense.weight, 1)
def forward(self, feat, label):
feat = self.conv1(feat)
feat = self.bn1(feat)
feat = self.act1(feat)
feat = torch.mean(feat, dim=(2, 3))
logits = self.dense(feat)
loss = self.crit(logits, label)
return loss
net1 = Net(act='mishv1')
net2 = Net(act='mishv3')
net2.load_state_dict(net1.state_dict())
net1.cuda()
net2.cuda()
opt1 = torch.optim.SGD(net1.parameters(), lr=1e-1)
opt2 = torch.optim.SGD(net2.parameters(), lr=1e-1)
bs = 32
for i in range(2000):
inten = torch.randn(bs, 3, 224, 224).cuda().detach()
label = torch.randint(0, 10, (bs, )).cuda().detach()
loss1 = net1(inten, label)
opt1.zero_grad()
loss1.backward()
opt1.step()
loss2 = net2(inten, label)
opt2.zero_grad()
loss2.backward()
opt2.step()
if i % 200 == 0:
print('====')
print('loss diff: ', loss1.item() - loss2.item())
print('weight diff: ', torch.sum(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.functional.softplus",
"torch.nn.BatchNorm2d",
"torch.abs",
"torch.randn",
"torch.nn.Conv2d",
"torch.randint",
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.pow"
] | 1.10.1 | napoler/pytorch-loss | 36a599d868844491633f3e0091f73759922a4557 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import namedtuple
import numpy as np
import pytest
import torch
from flash.core.data.utilities.classification import (
CommaDelimitedMultiLabelTargetFormatter,
get_target_formatter,
MultiBinaryTargetFormatter,
MultiLabelTargetFormatter,
MultiNumericTargetFormatter,
SingleBinaryTargetFormatter,
SingleLabelTargetFormatter,
SingleNumericTargetFormatter,
SpaceDelimitedTargetFormatter,
)
Case = namedtuple("Case", ["target", "formatted_target", "target_formatter_type", "labels", "num_classes"])
cases = [
# Single
Case([0, 1, 2], [0, 1, 2], SingleNumericTargetFormatter, None, 3),
Case([[1, 0, 0], [0, 1, 0], [0, 0, 1]], [0, 1, 2], SingleBinaryTargetFormatter, None, 3),
Case(["blue", "green", "red"], [0, 1, 2], SingleLabelTargetFormatter, ["blue", "green", "red"], 3),
# Multi
Case([[0, 1], [1, 2], [2, 0]], [[1, 1, 0], [0, 1, 1], [1, 0, 1]], MultiNumericTargetFormatter, None, 3),
Case([[1, 1, 0], [0, 1, 1], [1, 0, 1]], [[1, 1, 0], [0, 1, 1], [1, 0, 1]], MultiBinaryTargetFormatter, None, 3),
Case(
[["blue", "green"], ["green", "red"], ["red", "blue"]],
[[1, 1, 0], [0, 1, 1], [1, 0, 1]],
MultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
["blue,green", "green,red", "red,blue"],
[[1, 1, 0], [0, 1, 1], [1, 0, 1]],
CommaDelimitedMultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
["blue green", "green red", "red blue"],
[[1, 1, 0], [0, 1, 1], [1, 0, 1]],
SpaceDelimitedTargetFormatter,
["blue", "green", "red"],
3,
),
# Ambiguous
Case([[0], [1, 2], [2, 0]], [[1, 0, 0], [0, 1, 1], [1, 0, 1]], MultiNumericTargetFormatter, None, 3),
Case([[1, 0, 0], [0, 1, 1], [1, 0, 1]], [[1, 0, 0], [0, 1, 1], [1, 0, 1]], MultiBinaryTargetFormatter, None, 3),
Case(
[["blue"], ["green", "red"], ["red", "blue"]],
[[1, 0, 0], [0, 1, 1], [1, 0, 1]],
MultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
["blue", "green,red", "red,blue"],
[[1, 0, 0], [0, 1, 1], [1, 0, 1]],
CommaDelimitedMultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
["blue", "green red", "red blue"],
[[1, 0, 0], [0, 1, 1], [1, 0, 1]],
SpaceDelimitedTargetFormatter,
["blue", "green", "red"],
3,
),
# Special cases
Case(["blue ", " green", "red"], [0, 1, 2], SingleLabelTargetFormatter, ["blue", "green", "red"], 3),
Case(
["blue", "green, red", "red, blue"],
[[1, 0, 0], [0, 1, 1], [1, 0, 1]],
CommaDelimitedMultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
["blue", "green ,red", "red ,blue"],
[[1, 0, 0], [0, 1, 1], [1, 0, 1]],
CommaDelimitedMultiLabelTargetFormatter,
["blue", "green", "red"],
3,
),
Case(
[f"class_{i}" for i in range(10000)],
list(range(10000)),
SingleLabelTargetFormatter,
[f"class_{i}" for i in range(10000)],
10000,
),
# Array types
Case(torch.tensor([[0], [1]]), [0, 1], SingleNumericTargetFormatter, None, 2),
Case(torch.tensor([0, 1, 2]), [0, 1, 2], SingleNumericTargetFormatter, None, 3),
Case(np.array([0, 1, 2]), [0, 1, 2], SingleNumericTargetFormatter, None, 3),
]
@pytest.mark.parametrize("case", cases)
def test_case(case):
formatter = get_target_formatter(case.target)
assert isinstance(formatter, case.target_formatter_type)
assert formatter.labels == case.labels
assert formatter.num_classes == case.num_classes
assert [formatter(t) for t in case.target] == case.formatted_target
@pytest.mark.parametrize("case", cases)
def test_speed(case):
repeats = int(1e5 / len(case.target)) # Approx. a hundred thousand targets
if torch.is_tensor(case.target):
targets = case.target.repeat(repeats, *(1 for _ in range(case.target.ndim - 1)))
elif isinstance(case.target, np.ndarray):
targets = np.repeat(case.target, repeats)
else:
targets = case.target * repeats
start = time.perf_counter()
formatter = get_target_formatter(targets)
end = time.perf_counter()
assert (end - start) / len(targets) < 1e-5 # 0.01ms per target
start = time.perf_counter()
_ = [formatter(t) for t in targets]
end = time.perf_counter()
assert (end - start) / len(targets) < 1e-5 # 0.01ms per target
| [
"torch.is_tensor",
"torch.tensor"
] | 1.7.1 | AjinkyaIndulkar/lightning-flash | e65020c7e5bd779d477a198865b0a84ac4f39720 |
0.6 |
import glob, torch
from haven import haven_utils as hu
import numpy as np
import os
from PIL import Image
import torch.utils.data as data
import torchvision.transforms as transforms
import kornia.augmentation as K
import PIL
class COVIDDataset(data.Dataset):
def __init__(self, split, datadir, exp_dict):
if split in ['train', 'val']:
path = os.path.join(datadir, "Dataset", "TrainingSet", "LungInfection-Train", "Doctor-label")
elif split == 'test':
path = os.path.join(datadir, "Dataset", "TestingSet", "LungInfection-Test")
self.n_classes = exp_dict['dataset']['n_classes']
self.images = glob.glob(os.path.join(path, 'Imgs', '*'))
self.gts = glob.glob(os.path.join(path, 'GT', '*'))
self.size = 352
self.split = split
self.images = sorted(self.images)
self.gts = sorted(self.gts)
if split == 'train':
s = 0
e = int(0.9*len(self.images))
elif split == 'val':
s = int(0.9*len(self.images))
e = len(self.images)
elif split == 'test':
s = 0
e = len(self.images)
self.images = self.images[s:e]
self.dataset_size = len(self.images)
self.img_transform = transforms.Compose([
transforms.Resize((self.size, self.size)),
# transforms.rotate(-90),
# transforms.CenterCrop((384, 385)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
if split == 'train':
self.gt_transform = transforms.Compose([
transforms.Resize((self.size, self.size), interpolation=PIL.Image.NEAREST),
# transforms.rotate(-90),
# transforms.CenterCrop((384, 385)),
# transforms.ToTensor()
])
else:
self.gt_transform = transforms.Compose([
# transforms.Resize((self.size, self.size), interpolation=PIL.Image.NEAREST),
# transforms.rotate(-90),
# transforms.CenterCrop((384, 385)),
# transforms.ToTensor()
])
def __getitem__(self, index):
image = rgb_loader(self.images[index])
gt = binary_loader(self.gts[index])
image = self.img_transform(image)
gt = self.gt_transform(gt)
tgt_mask = np.array(gt)
assert(len(np.setdiff1d(np.unique(tgt_mask),[0,127,255] ))==0)
mask = np.zeros(tgt_mask.shape)
if self.n_classes == 2:
mask[tgt_mask!= 0] = 1
elif self.n_classes == 3:
mask[tgt_mask== 127] = 1
mask[tgt_mask== 255] = 2
mask = torch.LongTensor(mask)
# gt = self.gt_transform(gt)
# cc = K.CenterCrop((384, 385))
# image = cc(image)
# mask = cc(mask[None].float()).long()
from src.modules.lcfcn import lcfcn_loss
points = lcfcn_loss.get_points_from_mask(mask.numpy().squeeze(), bg_points=-1)
points = torch.LongTensor(points)
# hu.save_image('tmp.png', hu.denormalize(image, 'rgb'), points=points, radius=2)
# hu.save_image('tmp.png', hu.denormalize(image, 'rgb'), mask=gt.numpy(), radius=2)
if self.n_classes == 2:
assert (len(np.setdiff1d(np.unique(mask), [0, 1])) == 0)
if self.n_classes == 3:
assert (len(np.setdiff1d(np.unique(mask), [0, 1, 2])) == 0)
# points = cc(torch.LongTensor(points)[None].float()).long()[0]
batch = {'images':image,
'masks': mask[None],
'points':points,
'meta':{'name':index,
'hash':hu.hash_dict({'id':self.images[index]}),
# 'hash':self.images[index],
'shape':mask.squeeze().shape,
'index':index,
'split':self.split,
# 'size':self.size
}}
# return image, gt, name, np.array(F.interpolate(image, gt.size, mode='bilinear'))
return batch
def __len__(self):
return self.dataset_size
def rgb_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def binary_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('L') | [
"torch.LongTensor"
] | 0.6.3 | JanAlexanderPersonal/covid19_weak_supervision | 5599e48c9945f1e08a2731740bc8f6e44a031703 |
0.6 | import sys; sys.path.append("../../_EXTRAS"); import misc as ms
import socket
import timeit
from datetime import datetime
import scipy.misc as sm
from collections import OrderedDict
import glob
# PyTorch includes
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.nn.functional import upsample
# Tensorboard include
# from tensorboardX import SummaryWriter
# Custom includes
from dataloaders.combine_dbs import CombineDBs as combine_dbs
import dataloaders.pascal as pascal
import dataloaders.sbd as sbd
from dataloaders import custom_transforms as tr
import networks.deeplab_resnet as resnet
from layers.loss import class_balanced_cross_entropy_loss
from dataloaders.helpers import *
# Set gpu_id to -1 to run in CPU mode, otherwise set the id of the corresponding gpu
gpu_id = 0
device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
print('Using GPU: {} '.format(gpu_id))
# Setting parameters
use_sbd = False
nEpochs = 100 # Number of epochs for training
resume_epoch = 0 # Default is 0, change if want to resume
p = OrderedDict() # Parameters to include in report
classifier = 'psp' # Head classifier to use
p['trainBatch'] = 5 # Training batch size
testBatch = 5 # Testing batch size
useTest = 1 # See evolution of the test set when training?
nTestInterval = 10 # Run on test set every nTestInterval epochs
snapshot = 20 # Store a model every snapshot epochs
relax_crop = 50 # Enlarge the bounding box by relax_crop pixels
nInputChannels = 4 # Number of input channels (RGB + heatmap of extreme points)
zero_pad_crop = True # Insert zero padding when cropping the image
p['nAveGrad'] = 1 # Average the gradient of several iterations
p['lr'] = 1e-8 # Learning rate
p['wd'] = 0.0005 # Weight decay
p['momentum'] = 0.9 # Momentum
# Results and model directories (a new directory is generated for every run)
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if resume_epoch == 0:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
else:
run_id = 0
save_dir = os.path.join(save_dir_root, 'run_' + str(run_id))
if not os.path.exists(os.path.join(save_dir, 'models')):
os.makedirs(os.path.join(save_dir, 'models'))
# Network definition
modelName = 'dextr_pascal'
net = resnet.resnet101(1, pretrained=True, nInputChannels=nInputChannels, classifier=classifier)
if resume_epoch == 0:
print("Initializing from pretrained Deeplab-v2 model")
else:
print("Initializing weights from: {}".format(
os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth')))
net.load_state_dict(
torch.load(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth'),
map_location=lambda storage, loc: storage))
train_params = [{'params': resnet.get_1x_lr_params(net), 'lr': p['lr']},
{'params': resnet.get_10x_lr_params(net), 'lr': p['lr'] * 10}]
net.to(device)
# Training the network
if resume_epoch != nEpochs:
# Logging into Tensorboard
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
# writer = SummaryWriter(log_dir=log_dir)
# Use the following optimizer
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
p['optimizer'] = str(optimizer)
# Preparation of the data loaders
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25)),
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=5, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
composed_transforms_ts = transforms.Compose([
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
voc_train = pascal.VOCSegmentation(split='train', transform=composed_transforms_tr)
voc_val = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts)
if use_sbd:
sbd = sbd.SBDSegmentation(split=['train', 'val'], transform=composed_transforms_tr, retname=True)
db_train = combine_dbs([voc_train, sbd], excluded=[voc_val])
else:
db_train = voc_train
p['dataset_train'] = str(db_train)
p['transformations_train'] = [str(tran) for tran in composed_transforms_tr.transforms]
p['dataset_test'] = str(db_train)
p['transformations_test'] = [str(tran) for tran in composed_transforms_ts.transforms]
trainloader = DataLoader(db_train, batch_size=p['trainBatch'], shuffle=True, num_workers=2)
testloader = DataLoader(voc_val, batch_size=testBatch, shuffle=False, num_workers=2)
generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_ts = 0.0
aveGrad = 0
print("Training Network")
# Main Training and Testing Loop
for epoch in range(resume_epoch, nEpochs):
start_time = timeit.default_timer()
net.train()
for ii, sample_batched in enumerate(trainloader):
import ipdb; ipdb.set_trace() # breakpoint 5ab142bd //
inputs, gts = sample_batched['concat'], sample_batched['crop_gt']
# Forward-Backward of the mini-batch
inputs.requires_grad_()
inputs, gts = inputs.to(device), gts.to(device)
output = net.forward(inputs)
output = upsample(output, size=(512, 512), mode='bilinear', align_corners=True)
# Compute the losses, side outputs and fuse
loss = class_balanced_cross_entropy_loss(output, gts, size_average=False, batch_average=True)
running_loss_tr += loss.item()
# Print stuff
if ii % num_img_tr == num_img_tr - 1:
running_loss_tr = running_loss_tr / num_img_tr
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, ii*p['trainBatch']+inputs.data.shape[0]))
print('Loss: %f' % running_loss_tr)
running_loss_tr = 0
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time)+"\n")
# Backward the averaged gradient
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
# Update the weights once in p['nAveGrad'] forward passes
if aveGrad % p['nAveGrad'] == 0:
writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if useTest and epoch % nTestInterval == (nTestInterval - 1):
net.eval()
with torch.no_grad():
for ii, sample_batched in enumerate(testloader):
inputs, gts = sample_batched['concat'], sample_batched['crop_gt']
# Forward pass of the mini-batch
inputs, gts = inputs.to(device), gts.to(device)
output = net.forward(inputs)
output = upsample(output, size=(512, 512), mode='bilinear', align_corners=True)
# Compute the losses, side outputs and fuse
loss = class_balanced_cross_entropy_loss(output, gts, size_average=False)
running_loss_ts += loss.item()
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
running_loss_ts = running_loss_ts / num_img_ts
print('[Epoch: %d, numImages: %5d]' % (epoch, ii*testBatch+inputs.data.shape[0]))
writer.add_scalar('data/test_loss_epoch', running_loss_ts, epoch)
print('Loss: %f' % running_loss_ts)
running_loss_ts = 0
writer.close()
# Generate result of the validation images
net.eval()
composed_transforms_ts = transforms.Compose([
tr.CropFromMask(crop_elems=('image', 'gt'), relax=relax_crop, zero_pad=zero_pad_crop),
tr.FixedResize(resolutions={'gt': None, 'crop_image': (512, 512), 'crop_gt': (512, 512)}),
tr.ExtremePoints(sigma=10, pert=0, elem='crop_gt'),
tr.ToImage(norm_elem='extreme_points'),
tr.ConcatInputs(elems=('crop_image', 'extreme_points')),
tr.ToTensor()])
db_test = pascal.VOCSegmentation(split='val', transform=composed_transforms_ts, retname=True)
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
save_dir_res = os.path.join(save_dir, 'Results')
if not os.path.exists(save_dir_res):
os.makedirs(save_dir_res)
print('Testing Network')
with torch.no_grad():
# Main Testing Loop
for ii, sample_batched in enumerate(testloader):
inputs, gts, metas = sample_batched['concat'], sample_batched['gt'], sample_batched['meta']
# Forward of the mini-batch
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
for jj in range(int(inputs.size()[0])):
pred = np.transpose(outputs.data.numpy()[jj, :, :, :], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
gt = tens2image(gts[jj, :, :, :])
bbox = get_bbox(gt, pad=relax_crop, zero_pad=zero_pad_crop)
result = crop2fullmask(pred, bbox, gt, zero_pad=zero_pad_crop, relax=relax_crop)
# Save the result, attention to the index jj
sm.imsave(os.path.join(save_dir_res, metas['image'][jj] + '-' + metas['object'][jj] + '.png'), result)
| [
"torch.optim.SGD",
"torch.utils.data.DataLoader",
"torch.nn.functional.upsample"
] | 0.6.3 | JanAlexanderPersonal/covid19_weak_supervision | 5599e48c9945f1e08a2731740bc8f6e44a031703 |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
"""Dataset and evaluator for CLEVR-Ref+"""
import copy
from collections import defaultdict
from pathlib import Path
import torch
import torch.utils.data
from transformers import AutoTokenizer
import mdetr.util.dist as dist
from mdetr.datasets.clevr import make_clevr_transforms
from mdetr.util.box_ops import generalized_box_iou
from .coco import ModulatedDetection, make_coco_transforms
class ClevrRefDetection(ModulatedDetection):
pass
class ClevrRefEvaluator(object):
def __init__(self, refexp_gt, iou_types, k=(1, 5, 10), thresh_iou=0.5):
assert isinstance(k, (list, tuple))
refexp_gt = copy.deepcopy(refexp_gt)
self.refexp_gt = refexp_gt
self.iou_types = iou_types
self.img_ids = self.refexp_gt.imgs.keys()
self.predictions = {}
self.k = k
self.thresh_iou = thresh_iou
def accumulate(self):
pass
def update(self, predictions):
self.predictions.update(predictions)
def synchronize_between_processes(self):
all_predictions = dist.all_gather(self.predictions)
merged_predictions = {}
for p in all_predictions:
merged_predictions.update(p)
self.predictions = merged_predictions
def summarize(self):
if dist.is_main_process():
dataset2score = {
"clevrref": {k: 0.0 for k in self.k},
}
dataset2count = {"clevrref": 0.0}
for image_id in self.img_ids:
ann_ids = self.refexp_gt.getAnnIds(imgIds=image_id)
if len(ann_ids) != 1:
continue
img_info = self.refexp_gt.loadImgs(image_id)[0]
target = self.refexp_gt.loadAnns(ann_ids[0])
prediction = self.predictions[image_id]
assert prediction is not None
sorted_scores_boxes = sorted(
zip(prediction["scores_refexp"].tolist(), prediction["boxes"].tolist()), reverse=True
)
sorted_scores, sorted_boxes = zip(*sorted_scores_boxes)
sorted_boxes = torch.cat([torch.as_tensor(x).view(1, 4) for x in sorted_boxes])
target_bbox = target[0]["bbox"]
converted_bbox = [
target_bbox[0],
target_bbox[1],
target_bbox[2] + target_bbox[0],
target_bbox[3] + target_bbox[1],
]
giou = generalized_box_iou(sorted_boxes, torch.as_tensor(converted_bbox).view(-1, 4))
for k in self.k:
if max(giou[:k]) >= self.thresh_iou:
dataset2score["clevrref"][k] += 1.0
dataset2count["clevrref"] += 1.0
for key, value in dataset2score.items():
for k in self.k:
try:
value[k] /= dataset2count[key]
except:
pass
results = {}
for key, value in dataset2score.items():
results[key] = sorted([v for k, v in value.items()])
print(f" Dataset: {key} - Precision @ 1, 5, 10: {results[key]} \n")
return results
return None
def build(image_set, args):
tokenizer = AutoTokenizer.from_pretrained(
args.text_encoder_type, local_files_only=True
)
img_dir = Path(args.clevr_img_path) / f"{image_set}"
ann_file = Path(args.clevr_ann_path) / f"{image_set}.json"
dataset = ClevrRefDetection(
img_dir,
ann_file,
transforms=make_clevr_transforms(image_set, cautious=True),
return_masks=args.masks,
return_tokens=True,
tokenizer=tokenizer,
)
return dataset
| [
"torch.as_tensor"
] | 1.7.0 | rstrudel/mdetr | 177724cc60c7d63628dd14a5f26b21ea2cea45e3 |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
"""Postprocessors class to transform MDETR output according to the downstream task"""
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from mdetr.util import box_ops
class PostProcessFlickr(nn.Module):
"""This module converts the model's output for Flickr30k entities evaluation.
This processor is intended for recall@k evaluation with respect to each phrase in the sentence.
It requires a description of each phrase (as a binary mask), and returns a sorted list of boxes for each phrase.
"""
@torch.no_grad()
def forward(self, outputs, target_sizes, positive_map, items_per_batch_element):
"""Perform the computation.
Args:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
positive_map: tensor [total_nbr_phrases x max_seq_len] for each phrase in the batch, contains a binary
mask of the tokens that correspond to that sentence. Note that this is a "collapsed" batch,
meaning that all the phrases of all the batch elements are stored sequentially.
items_per_batch_element: list[int] number of phrases corresponding to each batch element.
"""
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
batch_size = target_sizes.shape[0]
prob = F.softmax(out_logits, -1)
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
# and from relative [0, 1] to absolute [0, height] coordinates
boxes = boxes * scale_fct[:, None, :]
cum_sum = np.cumsum(items_per_batch_element)
curr_batch_index = 0
# binarize the map if not already binary
pos = positive_map > 1e-6
predicted_boxes = [[] for _ in range(batch_size)]
# The collapsed batch dimension must match the number of items
assert len(pos) == cum_sum[-1]
if len(pos) == 0:
return predicted_boxes
# if the first batch elements don't contain elements, skip them.
while cum_sum[curr_batch_index] == 0:
curr_batch_index += 1
for i in range(len(pos)):
# scores are computed by taking the max over the scores assigned to the positive tokens
scores, _ = torch.max(pos[i].unsqueeze(0) * prob[curr_batch_index, :, :], dim=-1)
_, indices = torch.sort(scores, descending=True)
assert items_per_batch_element[curr_batch_index] > 0
predicted_boxes[curr_batch_index].append(boxes[curr_batch_index][indices].to("cpu").tolist())
if i == len(pos) - 1:
break
# check if we need to move to the next batch element
while i >= cum_sum[curr_batch_index] - 1:
curr_batch_index += 1
assert curr_batch_index < len(cum_sum)
return predicted_boxes
class PostProcessPhrasecut(nn.Module):
"""This module converts the model's output for Phrasecut evaluation.
Contrary to most postprocessors, it doesn't operate directly on model's output, but must be called
after the regular box processor, and optionally the segmentation post-processor (if masks are being computed).
Args:
score_thresh(float): minimum score at which the box/mask will be kept
"""
def __init__(self, score_thresh=0.7):
super().__init__()
self.score_thresh = score_thresh
@torch.no_grad()
def forward(self, results):
"""Perform the computation
Args:
results: Dictionnary that comes from PostProcess and optionally PostProcessSegm
"""
final_results = []
for elem in results:
keep = elem["scores"] > self.score_thresh
# convert boxes to [x0, y0, w, h]
boxes = elem["boxes"][keep].view(-1, 4)
boxes[..., 2:] -= boxes[..., :2]
res = {"boxes": boxes.tolist()}
if "masks" in elem:
res["masks"] = elem["masks"][keep].any(0).squeeze(0).cpu().numpy()
final_results.append(res)
return final_results
class PostProcess(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
@torch.no_grad()
def forward(self, outputs, target_sizes):
"""Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
out_logits, out_bbox = outputs["pred_logits"], outputs["pred_boxes"]
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = F.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
labels = torch.ones_like(labels)
scores = 1 - prob[:, :, -1]
# convert to [x0, y0, x1, y1] format
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
assert len(scores) == len(labels) == len(boxes)
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
if "pred_isfinal" in outputs:
is_final = outputs["pred_isfinal"].sigmoid()
scores_refexp = scores * is_final.view_as(scores)
assert len(results) == len(scores_refexp)
for i in range(len(results)):
results[i]["scores_refexp"] = scores_refexp[i]
return results
class PostProcessSegm(nn.Module):
"""Similar to PostProcess but for segmentation masks.
This processor is to be called sequentially after PostProcess.
Args:
threshold: threshold that will be applied to binarize the segmentation masks.
"""
def __init__(self, threshold=0.5):
super().__init__()
self.threshold = threshold
@torch.no_grad()
def forward(self, results, outputs, orig_target_sizes, max_target_sizes):
"""Perform the computation
Parameters:
results: already pre-processed boxes (output of PostProcess)
outputs: raw outputs of the model
orig_target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
max_target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
after data augmentation.
"""
assert len(orig_target_sizes) == len(max_target_sizes)
max_h, max_w = max_target_sizes.max(0)[0].tolist()
outputs_masks = outputs["pred_masks"].squeeze(2)
outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False)
# Check if all sizes are the same, in which case we can do the interpolation more efficiently
min_h, min_w = max_target_sizes.min(0)[0].tolist()
min_orig_h, min_orig_w = orig_target_sizes.min(0)[0].tolist()
max_orig_h, max_orig_w = orig_target_sizes.max(0)[0].tolist()
if min_h == max_h and min_w == max_w and min_orig_h == max_orig_h and min_orig_w == max_orig_w:
outputs_masks = (
F.interpolate(outputs_masks, size=(min_orig_h, min_orig_w), mode="bilinear").sigmoid() > self.threshold
).cpu()
for i, cur_mask in enumerate(outputs_masks):
results[i]["masks"] = cur_mask.unsqueeze(1)
return results
for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):
img_h, img_w = t[0], t[1]
results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)
results[i]["masks"] = (
F.interpolate(results[i]["masks"].float(), size=tuple(tt.tolist()), mode="bilinear").sigmoid()
> self.threshold
).cpu()
return results
def build_postprocessors(args, dataset_name) -> Dict[str, nn.Module]:
postprocessors: Dict[str, nn.Module] = {"bbox": PostProcess()}
if args.masks:
postprocessors["segm"] = PostProcessSegm()
if dataset_name == "flickr":
postprocessors["flickr_bbox"] = PostProcessFlickr()
if dataset_name == "phrasecut":
postprocessors["phrasecut"] = PostProcessPhrasecut()
return postprocessors
| [
"torch.stack",
"torch.nn.functional.softmax",
"torch.no_grad",
"torch.nn.functional.interpolate",
"torch.ones_like",
"torch.sort"
] | 1.7.0 | rstrudel/mdetr | 177724cc60c7d63628dd14a5f26b21ea2cea45e3 |
1.7 | # Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import datetime
import json
import os
from collections import OrderedDict, defaultdict
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch._six
import mdetr.util.dist as dist
from .coco_eval import merge
from .lvis import LVIS
#################################################################
# From LVIS, with following changes:
# * fixed LVISEval constructor to accept empty dt
# * Removed logger
# * LVIS results supports numpy inputs
#################################################################
class Params:
def __init__(self, iou_type):
"""Params for LVIS evaluation API."""
self.img_ids = []
self.cat_ids = []
# np.arange causes trouble. the data point on arange is slightly
# larger than the true value
self.iou_thrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
self.rec_thrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.max_dets = 300
self.area_rng = [
[0 ** 2, 1e5 ** 2],
[0 ** 2, 32 ** 2],
[32 ** 2, 96 ** 2],
[96 ** 2, 1e5 ** 2],
]
self.area_rng_lbl = ["all", "small", "medium", "large"]
self.use_cats = 1
# We bin categories in three bins based how many images of the training
# set the category is present in.
# r: Rare : < 10
# c: Common : >= 10 and < 100
# f: Frequent: >= 100
self.img_count_lbl = ["r", "c", "f"]
self.iou_type = iou_type
class LVISResults(LVIS):
def __init__(self, lvis_gt, results, max_dets=300):
"""Constructor for LVIS results.
Args:
lvis_gt (LVIS class instance, or str containing path of
annotation file)
results (str containing path of result file or a list of dicts)
max_dets (int): max number of detections per image. The official
value of max_dets for LVIS is 300.
"""
super(LVISResults, self).__init__()
assert isinstance(lvis_gt, LVIS)
self.dataset["images"] = [img for img in lvis_gt.dataset["images"]]
if isinstance(results, str):
result_anns = self._load_json(results)
elif type(results) == np.ndarray:
result_anns = self.loadNumpyAnnotations(results)
else:
result_anns = results
if max_dets >= 0:
result_anns = self.limit_dets_per_image(result_anns, max_dets)
if len(result_anns) > 0 and "bbox" in result_anns[0]:
self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"])
for id, ann in enumerate(result_anns):
x1, y1, w, h = ann["bbox"]
x2 = x1 + w
y2 = y1 + h
if "segmentation" not in ann:
ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann["area"] = w * h
ann["id"] = id + 1
elif len(result_anns) > 0 and "segmentation" in result_anns[0]:
self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"])
for id, ann in enumerate(result_anns):
# Only support compressed RLE format as segmentation results
ann["area"] = mask_util.area(ann["segmentation"])
if "bbox" not in ann:
ann["bbox"] = mask_util.toBbox(ann["segmentation"])
ann["id"] = id + 1
self.dataset["annotations"] = result_anns
self._create_index()
# #FIXME: disabling this check for now
# img_ids_in_result = [ann["image_id"] for ann in result_anns]
# assert set(img_ids_in_result) == (
# set(img_ids_in_result) & set(self.get_img_ids())
# ), "Results do not correspond to current LVIS set."
def limit_dets_per_image(self, anns, max_dets):
img_ann = defaultdict(list)
for ann in anns:
img_ann[ann["image_id"]].append(ann)
for img_id, _anns in img_ann.items():
if len(_anns) <= max_dets:
continue
_anns = sorted(_anns, key=lambda ann: ann["score"], reverse=True)
img_ann[img_id] = _anns[:max_dets]
return [ann for anns in img_ann.values() for ann in anns]
def get_top_results(self, img_id, score_thrs):
ann_ids = self.get_ann_ids(img_ids=[img_id])
anns = self.load_anns(ann_ids)
return list(filter(lambda ann: ann["score"] > score_thrs, anns))
class LVISEval:
def __init__(self, lvis_gt, lvis_dt=None, iou_type="segm"):
"""Constructor for LVISEval.
Args:
lvis_gt (LVIS class instance, or str containing path of annotation file)
lvis_dt (LVISResult class instance, or str containing path of result file,
or list of dict)
iou_type (str): segm or bbox evaluation
"""
if iou_type not in ["bbox", "segm"]:
raise ValueError("iou_type: {} is not supported.".format(iou_type))
if isinstance(lvis_gt, LVIS):
self.lvis_gt = lvis_gt
elif isinstance(lvis_gt, str):
self.lvis_gt = LVIS(lvis_gt)
else:
raise TypeError("Unsupported type {} of lvis_gt.".format(lvis_gt))
if isinstance(lvis_dt, LVISResults):
self.lvis_dt = lvis_dt
elif isinstance(lvis_dt, (str, list)):
self.lvis_dt = LVISResults(self.lvis_gt, lvis_dt)
elif lvis_dt is not None:
raise TypeError("Unsupported type {} of lvis_dt.".format(lvis_dt))
# per-image per-category evaluation results
self.eval_imgs = defaultdict(list)
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iou_type=iou_type) # parameters
self.results = OrderedDict()
self.stats = []
self.ious = {} # ious between all gts and dts
self.params.img_ids = sorted(self.lvis_gt.get_img_ids())
self.params.cat_ids = sorted(self.lvis_gt.get_cat_ids())
def _to_mask(self, anns, lvis):
for ann in anns:
rle = lvis.ann_to_rle(ann)
ann["segmentation"] = rle
def _prepare(self):
"""Prepare self._gts and self._dts for evaluation based on params."""
cat_ids = self.params.cat_ids if self.params.cat_ids else None
gts = self.lvis_gt.load_anns(self.lvis_gt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids))
dts = self.lvis_dt.load_anns(self.lvis_dt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids))
# convert ground truth to mask if iou_type == 'segm'
if self.params.iou_type == "segm":
self._to_mask(gts, self.lvis_gt)
self._to_mask(dts, self.lvis_dt)
# set ignore flag
for gt in gts:
if "ignore" not in gt:
gt["ignore"] = 0
for gt in gts:
self._gts[gt["image_id"], gt["category_id"]].append(gt)
# For federated dataset evaluation we will filter out all dt for an
# image which belong to categories not present in gt and not present in
# the negative list for an image. In other words detector is not penalized
# for categories about which we don't have gt information about their
# presence or absence in an image.
img_data = self.lvis_gt.load_imgs(ids=self.params.img_ids)
# per image map of categories not present in image
img_nl = {d["id"]: d["neg_category_ids"] for d in img_data}
# per image list of categories present in image
img_pl = defaultdict(set)
for ann in gts:
img_pl[ann["image_id"]].add(ann["category_id"])
# per image map of categoires which have missing gt. For these
# categories we don't penalize the detector for flase positives.
self.img_nel = {d["id"]: d["not_exhaustive_category_ids"] for d in img_data}
for dt in dts:
img_id, cat_id = dt["image_id"], dt["category_id"]
if cat_id not in img_nl[img_id] and cat_id not in img_pl[img_id]:
continue
self._dts[img_id, cat_id].append(dt)
self.freq_groups = self._prepare_freq_group()
def _prepare_freq_group(self):
freq_groups = [[] for _ in self.params.img_count_lbl]
cat_data = self.lvis_gt.load_cats(self.params.cat_ids)
for idx, _cat_data in enumerate(cat_data):
frequency = _cat_data["frequency"]
freq_groups[self.params.img_count_lbl.index(frequency)].append(idx)
return freq_groups
def evaluate(self):
"""
Run per image evaluation on given images and store results
(a list of dict) in self.eval_imgs.
"""
self.params.img_ids = list(np.unique(self.params.img_ids))
if self.params.use_cats:
cat_ids = self.params.cat_ids
else:
cat_ids = [-1]
self._prepare()
self.ious = {
(img_id, cat_id): self.compute_iou(img_id, cat_id) for img_id in self.params.img_ids for cat_id in cat_ids
}
# loop through images, area range, max detection number
self.eval_imgs = [
self.evaluate_img(img_id, cat_id, area_rng)
for cat_id in cat_ids
for area_rng in self.params.area_rng
for img_id in self.params.img_ids
]
def _get_gt_dt(self, img_id, cat_id):
"""Create gt, dt which are list of anns/dets. If use_cats is true
only anns/dets corresponding to tuple (img_id, cat_id) will be
used. Else, all anns/dets in image are used and cat_id is not used.
"""
if self.params.use_cats:
gt = self._gts[img_id, cat_id]
dt = self._dts[img_id, cat_id]
else:
gt = [_ann for _cat_id in self.params.cat_ids for _ann in self._gts[img_id, cat_id]]
dt = [_ann for _cat_id in self.params.cat_ids for _ann in self._dts[img_id, cat_id]]
return gt, dt
def compute_iou(self, img_id, cat_id):
gt, dt = self._get_gt_dt(img_id, cat_id)
if len(gt) == 0 and len(dt) == 0:
return []
# Sort detections in decreasing order of score.
idx = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in idx]
iscrowd = [int(False)] * len(gt)
if self.params.iou_type == "segm":
ann_type = "segmentation"
elif self.params.iou_type == "bbox":
ann_type = "bbox"
else:
raise ValueError("Unknown iou_type for iou computation.")
gt = [g[ann_type] for g in gt]
dt = [d[ann_type] for d in dt]
# compute iou between each dt and gt region
# will return array of shape len(dt), len(gt)
ious = mask_util.iou(dt, gt, iscrowd)
return ious
def evaluate_img(self, img_id, cat_id, area_rng):
"""Perform evaluation for single category and image."""
gt, dt = self._get_gt_dt(img_id, cat_id)
if len(gt) == 0 and len(dt) == 0:
return None
# Add another filed _ignore to only consider anns based on area range.
for g in gt:
if g["ignore"] or (g["area"] < area_rng[0] or g["area"] > area_rng[1]):
g["_ignore"] = 1
else:
g["_ignore"] = 0
# Sort gt ignore last
gt_idx = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
gt = [gt[i] for i in gt_idx]
# Sort dt highest score first
dt_idx = np.argsort([-d["score"] for d in dt], kind="mergesort")
dt = [dt[i] for i in dt_idx]
# load computed ious
ious = self.ious[img_id, cat_id][:, gt_idx] if len(self.ious[img_id, cat_id]) > 0 else self.ious[img_id, cat_id]
num_thrs = len(self.params.iou_thrs)
num_gt = len(gt)
num_dt = len(dt)
# Array to store the "id" of the matched dt/gt
gt_m = np.zeros((num_thrs, num_gt))
dt_m = np.zeros((num_thrs, num_dt))
gt_ig = np.array([g["_ignore"] for g in gt])
dt_ig = np.zeros((num_thrs, num_dt))
for iou_thr_idx, iou_thr in enumerate(self.params.iou_thrs):
if len(ious) == 0:
break
for dt_idx, _dt in enumerate(dt):
iou = min([iou_thr, 1 - 1e-10])
# information about best match so far (m=-1 -> unmatched)
# store the gt_idx which matched for _dt
m = -1
for gt_idx, _ in enumerate(gt):
# if this gt already matched continue
if gt_m[iou_thr_idx, gt_idx] > 0:
continue
# if _dt matched to reg gt, and on ignore gt, stop
if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:
break
# continue to next gt unless better match made
if ious[dt_idx, gt_idx] < iou:
continue
# if match successful and best so far, store appropriately
iou = ious[dt_idx, gt_idx]
m = gt_idx
# No match found for _dt, go to next _dt
if m == -1:
continue
# if gt to ignore for some reason update dt_ig.
# Should not be used in evaluation.
dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]
# _dt match found, update gt_m, and dt_m with "id"
dt_m[iou_thr_idx, dt_idx] = gt[m]["id"]
gt_m[iou_thr_idx, m] = _dt["id"]
# For LVIS we will ignore any unmatched detection if that category was
# not exhaustively annotated in gt.
dt_ig_mask = [
d["area"] < area_rng[0] or d["area"] > area_rng[1] or d["category_id"] in self.img_nel[d["image_id"]]
for d in dt
]
dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt
dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt
# Based on dt_ig_mask ignore any unmatched detection by updating dt_ig
dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == 0, dt_ig_mask))
# store results for given image and category
return {
"image_id": img_id,
"category_id": cat_id,
"area_rng": area_rng,
"dt_ids": [d["id"] for d in dt],
"gt_ids": [g["id"] for g in gt],
"dt_matches": dt_m,
"gt_matches": gt_m,
"dt_scores": [d["score"] for d in dt],
"gt_ignore": gt_ig,
"dt_ignore": dt_ig,
}
def accumulate(self):
"""Accumulate per image evaluation results and store the result in
self.eval.
"""
if not self.eval_imgs:
print("Warning: Please run evaluate first.")
if self.params.use_cats:
cat_ids = self.params.cat_ids
else:
cat_ids = [-1]
num_thrs = len(self.params.iou_thrs)
num_recalls = len(self.params.rec_thrs)
num_cats = len(cat_ids)
num_area_rngs = len(self.params.area_rng)
num_imgs = len(self.params.img_ids)
# -1 for absent categories
precision = -np.ones((num_thrs, num_recalls, num_cats, num_area_rngs))
recall = -np.ones((num_thrs, num_cats, num_area_rngs))
# Initialize dt_pointers
dt_pointers = {}
for cat_idx in range(num_cats):
dt_pointers[cat_idx] = {}
for area_idx in range(num_area_rngs):
dt_pointers[cat_idx][area_idx] = {}
# Per category evaluation
for cat_idx in range(num_cats):
Nk = cat_idx * num_area_rngs * num_imgs
for area_idx in range(num_area_rngs):
Na = area_idx * num_imgs
E = [self.eval_imgs[Nk + Na + img_idx] for img_idx in range(num_imgs)]
# Remove elements which are None
E = [e for e in E if e is not None]
if len(E) == 0:
continue
# Append all scores: shape (N,)
dt_scores = np.concatenate([e["dt_scores"] for e in E], axis=0)
dt_ids = np.concatenate([e["dt_ids"] for e in E], axis=0)
dt_idx = np.argsort(-dt_scores, kind="mergesort")
dt_scores = dt_scores[dt_idx]
dt_ids = dt_ids[dt_idx]
dt_m = np.concatenate([e["dt_matches"] for e in E], axis=1)[:, dt_idx]
dt_ig = np.concatenate([e["dt_ignore"] for e in E], axis=1)[:, dt_idx]
gt_ig = np.concatenate([e["gt_ignore"] for e in E])
# num gt anns to consider
num_gt = np.count_nonzero(gt_ig == 0)
if num_gt == 0:
continue
tps = np.logical_and(dt_m, np.logical_not(dt_ig))
fps = np.logical_and(np.logical_not(dt_m), np.logical_not(dt_ig))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
dt_pointers[cat_idx][area_idx] = {
"dt_ids": dt_ids,
"tps": tps,
"fps": fps,
}
for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
num_tp = len(tp)
rc = tp / num_gt
if num_tp:
recall[iou_thr_idx, cat_idx, area_idx] = rc[-1]
else:
recall[iou_thr_idx, cat_idx, area_idx] = 0
# np.spacing(1) ~= eps
pr = tp / (fp + tp + np.spacing(1))
pr = pr.tolist()
# Replace each precision value with the maximum precision
# value to the right of that recall level. This ensures
# that the calculated AP value will be less suspectable
# to small variations in the ranking.
for i in range(num_tp - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
rec_thrs_insert_idx = np.searchsorted(rc, self.params.rec_thrs, side="left")
pr_at_recall = [0.0] * num_recalls
try:
for _idx, pr_idx in enumerate(rec_thrs_insert_idx):
pr_at_recall[_idx] = pr[pr_idx]
except Exception:
pass
precision[iou_thr_idx, :, cat_idx, area_idx] = np.array(pr_at_recall)
self.eval = {
"params": self.params,
"counts": [num_thrs, num_recalls, num_cats, num_area_rngs],
"date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"precision": precision,
"recall": recall,
"dt_pointers": dt_pointers,
}
def _summarize(self, summary_type, iou_thr=None, area_rng="all", freq_group_idx=None):
aidx = [idx for idx, _area_rng in enumerate(self.params.area_rng_lbl) if _area_rng == area_rng]
if summary_type == "ap":
s = self.eval["precision"]
if iou_thr is not None:
tidx = np.where(iou_thr == self.params.iou_thrs)[0]
s = s[tidx]
if freq_group_idx is not None:
s = s[:, :, self.freq_groups[freq_group_idx], aidx]
else:
s = s[:, :, :, aidx]
else:
s = self.eval["recall"]
if iou_thr is not None:
tidx = np.where(iou_thr == self.params.iou_thrs)[0]
s = s[tidx]
s = s[:, :, aidx]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
return mean_s
def summarize(self):
"""Compute and display summary metrics for evaluation results."""
if not self.eval:
raise RuntimeError("Please run accumulate() first.")
max_dets = self.params.max_dets
self.results["AP"] = self._summarize("ap")
self.results["AP50"] = self._summarize("ap", iou_thr=0.50)
self.results["AP75"] = self._summarize("ap", iou_thr=0.75)
self.results["APs"] = self._summarize("ap", area_rng="small")
self.results["APm"] = self._summarize("ap", area_rng="medium")
self.results["APl"] = self._summarize("ap", area_rng="large")
self.results["APr"] = self._summarize("ap", freq_group_idx=0)
self.results["APc"] = self._summarize("ap", freq_group_idx=1)
self.results["APf"] = self._summarize("ap", freq_group_idx=2)
self.stats = np.zeros((9,))
self.stats[0] = self._summarize("ap")
self.stats[1] = self._summarize("ap", iou_thr=0.50)
self.stats[2] = self._summarize("ap", iou_thr=0.75)
self.stats[3] = self._summarize("ap", area_rng="small")
self.stats[4] = self._summarize("ap", area_rng="medium")
self.stats[5] = self._summarize("ap", area_rng="large")
self.stats[6] = self._summarize("ap", freq_group_idx=0)
self.stats[7] = self._summarize("ap", freq_group_idx=1)
self.stats[8] = self._summarize("ap", freq_group_idx=2)
key = "AR@{}".format(max_dets)
self.results[key] = self._summarize("ar")
for area_rng in ["small", "medium", "large"]:
key = "AR{}@{}".format(area_rng[0], max_dets)
self.results[key] = self._summarize("ar", area_rng=area_rng)
self.print_results()
def run(self):
"""Wrapper function which calculates the results."""
self.evaluate()
self.accumulate()
self.summarize()
def print_results(self):
template = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} catIds={:>3s}] = {:0.3f}"
for key, value in self.results.items():
max_dets = self.params.max_dets
if "AP" in key:
title = "Average Precision"
_type = "(AP)"
else:
title = "Average Recall"
_type = "(AR)"
if len(key) > 2 and key[2].isdigit():
iou_thr = float(key[2:]) / 100
iou = "{:0.2f}".format(iou_thr)
else:
iou = "{:0.2f}:{:0.2f}".format(self.params.iou_thrs[0], self.params.iou_thrs[-1])
if len(key) > 2 and key[2] in ["r", "c", "f"]:
cat_group_name = key[2]
else:
cat_group_name = "all"
if len(key) > 2 and key[2] in ["s", "m", "l"]:
area_rng = key[2]
else:
area_rng = "all"
print(template.format(title, _type, iou, area_rng, max_dets, cat_group_name, value))
def get_results(self):
if not self.results:
print("Warning: results is empty. Call run().")
return self.results
#################################################################
# end of straight copy from lvis, just fixing constructor
#################################################################
class LvisEvaluator(object):
def __init__(self, lvis_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
# lvis_gt = copy.deepcopy(lvis_gt)
self.lvis_gt = lvis_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = LVISEval(lvis_gt, iou_type=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
lvis_dt = LVISResults(self.lvis_gt, results)
lvis_eval = self.coco_eval[iou_type]
lvis_eval.lvis_dt = lvis_dt
lvis_eval.params.img_ids = list(img_ids)
lvis_eval.evaluate()
eval_imgs = lvis_eval.eval_imgs
eval_imgs = np.asarray(eval_imgs).reshape(
len(lvis_eval.params.cat_ids), len(lvis_eval.params.area_rng), len(lvis_eval.params.img_ids)
)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_lvis_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for lvis_eval in self.coco_eval.values():
lvis_eval.accumulate()
def summarize(self):
for iou_type, lvis_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
lvis_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_lvis_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_lvis_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_lvis_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_lvis_detection(self, predictions):
lvis_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return lvis_results
def prepare_for_lvis_segmentation(self, predictions):
lvis_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return lvis_results
def _merge_lists(listA, listB, maxN, key):
result = []
indA, indB = 0, 0
while (indA < len(listA) or indB < len(listB)) and len(result) < maxN:
if (indB < len(listB)) and (indA >= len(listA) or key(listA[indA]) < key(listB[indB])):
result.append(listB[indB])
indB += 1
else:
result.append(listA[indA])
indA += 1
return result
# Adapted from https://github.com/achalddave/large-vocab-devil/blob/9aaddc15b00e6e0d370b16743233e40d973cd53f/scripts/evaluate_ap_fixed.py
class LvisEvaluatorFixedAP(object):
def __init__(self, gt: LVIS, topk=10000, fixed_ap=True):
self.results = []
self.by_cat = {}
self.gt = gt
self.topk = topk
self.fixed_ap = fixed_ap
def update(self, predictions):
cur_results = self.prepare(predictions)
if self.fixed_ap:
by_cat = defaultdict(list)
for ann in cur_results:
by_cat[ann["category_id"]].append(ann)
for cat, cat_anns in by_cat.items():
if cat not in self.by_cat:
self.by_cat[cat] = []
cur = sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk]
self.by_cat[cat] = _merge_lists(self.by_cat[cat], cur, self.topk, key=lambda x: x["score"])
else:
by_id = defaultdict(list)
for ann in cur_results:
by_id[ann["image_id"]].append(ann)
for id_anns in by_id.values():
self.results.extend(sorted(id_anns, key=lambda x: x["score"], reverse=True)[:300])
def synchronize_between_processes(self):
if self.fixed_ap:
all_cats = dist.all_gather(self.by_cat)
self.by_cat = defaultdict(list)
for cats in all_cats:
for cat, cat_anns in cats.items():
self.by_cat[cat].extend(cat_anns)
else:
self.results = sum(dist.all_gather(self.results), [])
def prepare(self, predictions):
lvis_results = []
for original_id, prediction in predictions:
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return lvis_results
def summarize(self):
if not dist.is_main_process():
return
if self.fixed_ap:
self._summarize_fixed()
else:
self._summarize_standard()
def _summarize_standard(self):
results = LVISResults(self.gt, self.results)
lvis_eval = LVISEval(self.gt, results, iou_type="bbox")
lvis_eval.run()
lvis_eval.print_results()
def _summarize_fixed(self):
results = []
missing_dets_cats = set()
for cat, cat_anns in self.by_cat.items():
if len(cat_anns) < self.topk:
missing_dets_cats.add(cat)
results.extend(sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk])
if missing_dets_cats:
print(
f"\n===\n"
f"{len(missing_dets_cats)} classes had less than {self.topk} detections!\n"
f"Outputting {self.topk} detections for each class will improve AP further.\n"
f"If using detectron2, please use the lvdevil/infer_topk.py script to "
f"output a results file with {self.topk} detections for each class.\n"
f"==="
)
results = LVISResults(self.gt, results, max_dets=-1)
lvis_eval = LVISEval(self.gt, results, iou_type="bbox")
params = lvis_eval.params
params.max_dets = -1 # No limit on detections per image.
lvis_eval.run()
lvis_eval.print_results()
metrics = {k: v for k, v in lvis_eval.results.items() if k.startswith("AP")}
print("copypaste: %s,%s", ",".join(map(str, metrics.keys())), "path")
class LvisDumper(object):
def __init__(self, topk=10000, fixed_ap=True, out_path="lvis_eval"):
self.results = []
self.by_cat = {}
self.topk = topk
self.fixed_ap = fixed_ap
self.out_path = out_path
if dist.is_main_process():
if not os.path.exists(self.out_path):
os.mkdir(self.out_path)
def update(self, predictions):
cur_results = self.prepare(predictions)
if self.fixed_ap:
by_cat = defaultdict(list)
for ann in cur_results:
by_cat[ann["category_id"]].append(ann)
for cat, cat_anns in by_cat.items():
if cat not in self.by_cat:
self.by_cat[cat] = []
cur = sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk]
self.by_cat[cat] = _merge_lists(self.by_cat[cat], cur, self.topk, key=lambda x: x["score"])
else:
by_id = defaultdict(list)
for ann in cur_results:
by_id[ann["image_id"]].append(ann)
for id_anns in by_id.values():
self.results.extend(sorted(id_anns, key=lambda x: x["score"], reverse=True)[:300])
def synchronize_between_processes(self):
if self.fixed_ap:
all_cats = dist.all_gather(self.by_cat)
self.by_cat = defaultdict(list)
for cats in all_cats:
for cat, cat_anns in cats.items():
self.by_cat[cat].extend(cat_anns)
else:
self.results = sum(dist.all_gather(self.results), [])
def prepare(self, predictions):
lvis_results = []
for original_id, prediction in predictions:
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
lvis_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return lvis_results
def summarize(self):
if not dist.is_main_process():
return
if self.fixed_ap:
self._summarize_fixed()
else:
self._summarize_standard()
def _summarize_standard(self):
json_path = os.path.join(self.out_path, "results.json")
print("dumping to ", json_path)
with open(json_path, "w") as f:
json.dump(self.results, f)
print("dumped")
def _summarize_fixed(self):
results = []
missing_dets_cats = set()
for cat, cat_anns in self.by_cat.items():
if len(cat_anns) < self.topk:
missing_dets_cats.add(cat)
results.extend(sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk])
if missing_dets_cats:
print(
f"\n===\n"
f"{len(missing_dets_cats)} classes had less than {self.topk} detections!\n"
f"Outputting {self.topk} detections for each class will improve AP further.\n"
f"If using detectron2, please use the lvdevil/infer_topk.py script to "
f"output a results file with {self.topk} detections for each class.\n"
f"==="
)
json_path = os.path.join(self.out_path, "results.json")
print("dumping to ", json_path)
with open(json_path, "w") as f:
json.dump(results, f)
print("dumped")
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def create_common_lvis_eval(lvis_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
lvis_eval.eval_imgs = eval_imgs
lvis_eval.params.img_ids = img_ids
| [
"torch.stack"
] | 1.7.0 | rstrudel/mdetr | 177724cc60c7d63628dd14a5f26b21ea2cea45e3 |
1.7 | import torch
from torch import nn
from .operation import Operation
class Conv2d(Operation):
"""
module.weight: c_out x c_in x k_h x k_w
module.bias: c_out x 1
Argument shapes
in_data: n x (c_in)(kernel_size) x out_size
out_grads: n x c_out x out_size
kernel_size = (k_h)(k_w)
out_size = output feature map size
"""
@staticmethod
def batch_grads_weight(
module: nn.Module, in_data: torch.Tensor, out_grads: torch.Tensor
):
grads = torch.bmm(
out_grads, in_data.transpose(2, 1)
) # n x c_out x (c_in)(kernel_size)
return grads.view(
-1, *module.weight.size()
) # n x c_out x c_in x k_h x k_w
@staticmethod
def batch_grads_bias(module: nn.Module, out_grads: torch.tensor):
return out_grads.sum(axis=2) # n x c_out
@staticmethod
def cov_diag_weight(module, in_data, out_grads):
grads = torch.bmm(
out_grads, in_data.transpose(2, 1)
) # n x c_out x (c_in)(kernel_size)
rst = grads.mul(grads).sum(dim=0) # c_out x (c_in)(kernel_size)
return rst.view_as(module.weight) # c_out x c_in x k_h x k_w
@staticmethod
def cov_diag_bias(module, out_grads):
grads = out_grads.sum(axis=2) # n x c_out
return grads.mul(grads).sum(axis=0) # c_out x 1
@staticmethod
def cov_kron_A(module, in_data):
m = in_data.transpose(0, 1).flatten(
start_dim=1
) # (c_in)(kernel_size) x n(out_size)
return torch.matmul(
m, m.T
) # (c_in)(kernel_size) x (c_in)(kernel_size)
@staticmethod
def cov_kron_B(module, out_grads):
out_size = out_grads.shape[-1]
m = out_grads.transpose(0,
1).flatten(start_dim=1) # c_out x n(out_size)
return torch.matmul(m, m.T).div(out_size) # c_out x c_out
@staticmethod
def gram_A(module, in_data1, in_data2):
# n x (c_in)(kernel_size)(out_size)
m1 = in_data1.flatten(start_dim=1)
m2 = in_data2.flatten(start_dim=1)
return torch.matmul(m1, m2.T) # n x n
@staticmethod
def gram_B(module, out_grads1, out_grads2):
out_size = out_grads1.shape[-1]
# n x (c_out)(out_size)
m1 = out_grads1.flatten(start_dim=1)
m2 = out_grads2.flatten(start_dim=1)
return torch.matmul(m1, m2.T).div(out_size) # n x n
| [
"torch.matmul"
] | 1.7.0 | rioyokotalab/asdfghjkl | f435c1e2527162fb07512b4ce5058460aab238b9 |
1.6 | from argparse import ArgumentParser
import torch
from lasaft.source_separation.conditioned.cunet.dcun_base import Dense_CUNet, Dense_CUNet_Framework
from lasaft.source_separation.sub_modules.control_models import pocm_control_model, dense_control_block
from lasaft.utils.PoCM_utils import Pocm_Matmul, Pocm_naive
class DenseCUNet_GPoCM(Dense_CUNet):
def __init__(self,
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
mk_block_f, mk_ds_f, mk_us_f,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim, condition_to,
control_type, control_n_layer, pocm_type, pocm_norm
):
super(DenseCUNet_GPoCM, self).__init__(
n_fft,
input_channels, internal_channels,
n_blocks, n_internal_layers,
mk_block_f, mk_ds_f, mk_us_f,
first_conv_activation, last_activation,
t_down_layers, f_down_layers,
# Conditional Mechanism #
control_vector_type, control_input_dim, embedding_dim, condition_to
)
# select PoCM implementation:
# both yield the same outputs, but 'matmul' is faster with gpus since it does not use loops.
assert pocm_type in ['naive', 'matmul']
self.pocm = Pocm_naive if pocm_type == 'naive' else Pocm_Matmul
# Select normalization methods for PoCM
assert pocm_norm in [None, 'batch_norm']
# Make condition generator
if control_type == "dense":
self.condition_generator = pocm_control_model(
dense_control_block(embedding_dim, control_n_layer),
n_blocks, internal_channels,
pocm_to=condition_to,
pocm_norm=pocm_norm
)
else:
raise NotImplementedError
self.activation = self.last_conv[-1]
def forward(self, input_spec, input_condition):
condition_embedding = self.embedding(input_condition)
gammas, betas = self.condition_generator(condition_embedding)
x = self.first_conv(input_spec)
encoding_outputs = []
gammas_encoder, gammas_middle, gammas_decoder = gammas
betas_encoder, betas_middle, betas_decoder = betas
for i in range(self.n):
x = self.encoders[i](x)
if self.is_encoder_conditioned:
g = self.pocm(x, gammas_encoder[i], betas_encoder[i]).sigmoid()
x = g * x
encoding_outputs.append(x)
x = self.downsamplings[i](x)
x = self.mid_block(x)
if self.is_middle_conditioned:
g = self.pocm(x, gammas_middle, betas_middle).sigmoid()
x = g * x
for i in range(self.n):
x = self.upsamplings[i](x)
x = torch.cat((x, encoding_outputs[-i - 1]), 1)
x = self.decoders[i](x)
if self.is_decoder_conditioned:
g = self.pocm(x, gammas_decoder[i], betas_decoder[i]).sigmoid()
x = g * x
return self.last_conv(x)
class DenseCUNet_GPoCM_Framework(Dense_CUNet_Framework):
def __init__(self, n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
conditional_spec2spec,
optimizer, lr, dev_mode, train_loss, val_loss
):
super(DenseCUNet_GPoCM_Framework, self).__init__(
n_fft, hop_length, num_frame,
spec_type, spec_est_mode,
conditional_spec2spec,
optimizer, lr, dev_mode, train_loss, val_loss
)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--control_n_layer', type=int, default=4)
parser.add_argument('--control_type', type=str, default='dense')
parser.add_argument('--pocm_type', type=str, default='matmul')
parser.add_argument('--pocm_norm', type=str, default='batch_norm')
return Dense_CUNet_Framework.add_model_specific_args(parser)
| [
"torch.cat"
] | 1.6.0 | alswhdgus10/Conditioned-Source-Separation-LaSAFT | f8d8de82391fa04622bbe93e232bb627a9367feb |
1.4 | from typing import Optional, List, Tuple, Callable, Union, Dict, Type, Any
from functools import partial
import gym
import torch as th
import torch.nn as nn
import numpy as np
from stable_baselines3.common.policies import (BasePolicy, register_policy, MlpExtractor,
create_sde_features_extractor, NatureCNN,
BaseFeaturesExtractor, FlattenExtractor)
from stable_baselines3.common.distributions import (make_proba_distribution, Distribution,
DiagGaussianDistribution, CategoricalDistribution,
MultiCategoricalDistribution, BernoulliDistribution,
StateDependentNoiseDistribution)
class PPOPolicy(BasePolicy):
"""
Policy class (with both actor and critic) for A2C and derivates (PPO).
:param observation_space: (gym.spaces.Space) Observation space
:param action_space: (gym.spaces.Space) Action space
:param lr_schedule: (Callable) Learning rate schedule (could be constant)
:param net_arch: ([int or dict]) The specification of the policy and value networks.
:param device: (str or th.device) Device on which the code should run.
:param activation_fn: (Type[nn.Module]) Activation function
:param ortho_init: (bool) Whether to use or not orthogonal initialization
:param use_sde: (bool) Whether to use State Dependent Exploration or not
:param log_std_init: (float) Initial value for the log standard deviation
:param full_std: (bool) Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: ([int]) Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: (bool) Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: (bool) Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: (Type[BaseFeaturesExtractor]) Features extractor to use.
:param features_extractor_kwargs: (Optional[Dict[str, Any]]) Keyword arguments
to pass to the feature extractor.
:param normalize_images: (bool) Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: (Type[th.optim.Optimizer]) The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: (Optional[Dict[str, Any]]) Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
device: Union[th.device, str] = 'auto',
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in ADAM optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs['eps'] = 1e-5
super(PPOPolicy, self).__init__(observation_space, action_space,
device,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space,
**self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
'full_std': full_std,
'squash_output': squash_output,
'use_expln': use_expln,
'learn_features': sde_net_arch is not None
}
self.sde_features_extractor = None
self.sde_net_arch = sde_net_arch
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=self.dist_kwargs['squash_output'] if self.dist_kwargs else None,
full_std=self.dist_kwargs['full_std'] if self.dist_kwargs else None,
sde_net_arch=self.dist_kwargs['sde_net_arch'] if self.dist_kwargs else None,
use_expln=self.dist_kwargs['use_expln'] if self.dist_kwargs else None,
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs
))
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs: (int)
"""
assert isinstance(self.action_dist,
StateDependentNoiseDistribution), 'reset_noise() is only available when using gSDE'
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build(self, lr_schedule: Callable) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: (Callable) Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.mlp_extractor = MlpExtractor(self.features_dim, net_arch=self.net_arch,
activation_fn=self.activation_fn, device=self.device)
latent_dim_pi = self.mlp_extractor.latent_dim_pi
# Separate feature extractor for gSDE
if self.sde_net_arch is not None:
self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(self.features_dim,
self.sde_net_arch,
self.activation_fn)
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi,
log_std_init=self.log_std_init)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim
self.action_net, self.log_std = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi,
latent_sde_dim=latent_sde_dim,
log_std_init=self.log_std_init)
elif isinstance(self.action_dist, CategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, BernoulliDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
for module in [self.features_extractor, self.mlp_extractor,
self.action_net, self.value_net]:
# Values from stable-baselines, TODO: check why
gain = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1
}[module]
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor,
deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: (th.Tensor) Observation
:param deterministic: (bool) Whether to sample or use deterministic actions
:return: (Tuple[th.Tensor, th.Tensor, th.Tensor]) action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: (th.Tensor) Observation
:return: (Tuple[th.Tensor, th.Tensor, th.Tensor]) Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def _get_action_dist_from_latent(self, latent_pi: th.Tensor,
latent_sde: Optional[th.Tensor] = None) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: (th.Tensor) Latent code for the actor
:param latent_sde: (Optional[th.Tensor]) Latent code for the gSDE exploration function
:return: (Distribution) Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde)
else:
raise ValueError('Invalid action distribution')
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation: (th.Tensor)
:param deterministic: (bool) Whether to use stochastic or deterministic actions
:return: (th.Tensor) Taken action according to the policy
"""
latent_pi, _, latent_sde = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor,
actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs: (th.Tensor)
:param actions: (th.Tensor)
:return: (th.Tensor, th.Tensor, th.Tensor) estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
MlpPolicy = PPOPolicy
class CnnPolicy(PPOPolicy):
"""
CnnPolicy class (with both actor and critic) for A2C and derivates (PPO).
:param observation_space: (gym.spaces.Space) Observation space
:param action_space: (gym.spaces.Space) Action space
:param lr_schedule: (Callable) Learning rate schedule (could be constant)
:param net_arch: ([int or dict]) The specification of the policy and value networks.
:param device: (str or th.device) Device on which the code should run.
:param activation_fn: (Type[nn.Module]) Activation function
:param ortho_init: (bool) Whether to use or not orthogonal initialization
:param use_sde: (bool) Whether to use State Dependent Exploration or not
:param log_std_init: (float) Initial value for the log standard deviation
:param full_std: (bool) Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: ([int]) Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: (bool) Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: (bool) Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: (Type[BaseFeaturesExtractor]) Features extractor to use.
:param features_extractor_kwargs: (Optional[Dict[str, Any]]) Keyword arguments
to pass to the feature extractor.
:param normalize_images: (bool) Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: (Type[th.optim.Optimizer]) The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: (Optional[Dict[str, Any]]) Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
device: Union[th.device, str] = 'auto',
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None):
super(CnnPolicy, self).__init__(observation_space,
action_space,
lr_schedule,
net_arch,
device,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)
| [
"torch.nn.Linear"
] | 1.4.0 | rolandgvc/stable-baselines3 | 805a87ed8b340c6a1a2e674468d5769a8cda38b0 |
1.8 | import torch
import random
import argparse
import numpy as np
import ipdb as pdb
import os, pwd, yaml
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
import warnings
warnings.filterwarnings('ignore')
from train_spline import pretrain_spline
from leap.tools.utils import load_yaml
from leap.datasets.kitti import KittiMasksTwoSample
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
def main(args):
assert args.exp is not None, "FATAL: "+__file__+": You must specify an exp config file (e.g., *.yaml)"
current_user = pwd.getpwuid(os.getuid()).pw_name
script_dir = os.path.dirname(__file__)
rel_path = os.path.join('../leap/configs',
'%s.yaml'%args.exp)
abs_file_path = os.path.join(script_dir, rel_path)
cfg = load_yaml(abs_file_path)
print("######### Configuration #########")
print(yaml.dump(cfg, default_flow_style=False))
print("#################################")
pl.seed_everything(args.seed)
data = KittiMasksTwoSample(path = os.path.join(cfg['ROOT'], cfg['DATASET']),
transform = cfg['TRANSFORM'],
max_delta_t = cfg['DT'])
num_validation_samples = cfg['VAE']['N_VAL_SAMPLES']
train_data, val_data = random_split(data, [len(data)-num_validation_samples, num_validation_samples])
# Disable augmentation in validation set
val_data.dataset.transform = None
train_loader = DataLoader(train_data,
batch_size=cfg['VAE']['TRAIN_BS'],
pin_memory=cfg['VAE']['PIN'],
num_workers=cfg['VAE']['CPU'],
drop_last=True,
shuffle=True)
val_loader = DataLoader(val_data,
batch_size=cfg['VAE']['VAL_BS'],
pin_memory=cfg['VAE']['PIN'],
num_workers=cfg['VAE']['CPU'],
shuffle=False)
if cfg['MODEL'] == "BetaVAE":
from leap.baselines.BetaVAE.model import BetaKittiConv
model = BetaKittiConv(nc=cfg['VAE']['NC'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
beta=cfg['BetaVAE']['BETA'],
lr=cfg['BetaVAE']['LR'],
correlation=cfg['MCC']['CORR'],
decoder_dist=cfg['VAE']['DEC']['DIST'])
elif cfg['MODEL'] == "FactorVAE":
from leap.baselines.FactorVAE.model import FactorVAEKitti
model = FactorVAEKitti(nc=cfg['VAE']['NC'],
z_dim=cfg['VAE']['LATENT_DIM'],
hidden_dim=cfg['VAE']['ENC']['HIDDEN_DIM'],
gamma=cfg['FactorVAE']['GAMMA'],
lr_VAE=cfg['FactorVAE']['LR_VAE'],
lr_D=cfg['FactorVAE']['LR_D'],
correlation=cfg['MCC']['CORR'],
decoder_dist=cfg['VAE']['DEC']['DIST'])
log_dir = os.path.join(cfg["LOG"], current_user, args.exp)
checkpoint_callback = ModelCheckpoint(monitor='val_mcc',
save_top_k=1,
mode='max')
trainer = pl.Trainer(default_root_dir=log_dir,
gpus=cfg['VAE']['GPU'],
val_check_interval = cfg['MCC']['FREQ'],
max_epochs=cfg['VAE']['EPOCHS'],
deterministic=True,
callbacks=[checkpoint_callback])
# Train the model
trainer.fit(model, train_loader, val_loader)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-e',
'--exp',
type=str
)
argparser.add_argument(
'-s',
'--seed',
type=int,
default=770
)
args = argparser.parse_args()
main(args)
| [
"torch.utils.data.DataLoader"
] | 1.8.1 | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da |
1.5 | import inspect
import logging
from abc import abstractmethod
from typing import Dict, Generic, List, Sequence, Union
import torch
from torch.nn import Parameter, ParameterList
import flair
from flair.data import DT
log = logging.getLogger("flair")
class Embeddings(torch.nn.Module, Generic[DT]):
"""Abstract base class for all embeddings. Every new type of embedding must implement these methods."""
def __init__(self):
"""Set some attributes that would otherwise result in errors. Overwrite these in your embedding class."""
if not hasattr(self, "name"):
self.name: str = "unnamed_embedding"
if not hasattr(self, "static_embeddings"):
# if the embeddings for a sentence are the same in each epoch, set this to True for improved efficiency
self.static_embeddings = False
super().__init__()
@property
@abstractmethod
def embedding_length(self) -> int:
"""Returns the length of the embedding vector."""
raise NotImplementedError
@property
@abstractmethod
def embedding_type(self) -> str:
raise NotImplementedError
def embed(self, data_points: Union[DT, List[DT]]) -> List[DT]:
"""Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings
are non-static."""
# if only one sentence is passed, convert to list of sentence
if not isinstance(data_points, list):
data_points = [data_points]
if not self._everything_embedded(data_points) or not self.static_embeddings:
self._add_embeddings_internal(data_points)
return data_points
def _everything_embedded(self, data_points: Sequence[DT]) -> bool:
for data_point in data_points:
if self.name not in data_point._embeddings.keys():
return False
return True
@abstractmethod
def _add_embeddings_internal(self, sentences: List[DT]):
"""Private method for adding embeddings to all words in a list of sentences."""
pass
def get_names(self) -> List[str]:
"""Returns a list of embedding names. In most cases, it is just a list with one item, namely the name of
this embedding. But in some cases, the embedding is made up by different embeddings (StackedEmbedding).
Then, the list contains the names of all embeddings in the stack."""
return [self.name]
def get_named_embeddings_dict(self) -> Dict:
return {self.name: self}
@staticmethod
def get_instance_parameters(locals: dict) -> dict:
class_definition = locals.get("__class__")
instance_parameter_names = set(inspect.signature(class_definition.__init__).parameters) # type: ignore
instance_parameter_names.remove("self")
instance_parameter_names.add("__class__")
instance_parameters = {
class_attribute: attribute_value
for class_attribute, attribute_value in locals.items()
if class_attribute in instance_parameter_names
}
return instance_parameters
class ScalarMix(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors.
This method was proposed by Liu et al. (2019) in the paper:
"Linguistic Knowledge and Transferability of Contextual Representations" (https://arxiv.org/abs/1903.08855)
The implementation is copied and slightly modified from the allennlp repository and is licensed under Apache 2.0.
It can be found under:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/scalar_mix.py.
"""
def __init__(self, mixture_size: int, trainable: bool = False) -> None:
"""
Inits scalar mix implementation.
``mixture = gamma * sum(s_k * tensor_k)`` where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
:param mixture_size: size of mixtures (usually the number of layers)
"""
super(ScalarMix, self).__init__()
self.mixture_size = mixture_size
initial_scalar_parameters = [0.0] * mixture_size
self.scalar_parameters = ParameterList(
[
Parameter(
torch.tensor(
[initial_scalar_parameters[i]],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
for i in range(mixture_size)
]
)
self.gamma = Parameter(
torch.tensor(
[1.0],
dtype=torch.float,
device=flair.device,
),
requires_grad=trainable,
)
def forward(self, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Computes a weighted average of the ``tensors``. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
:param tensors: list of input tensors
:return: computed weighted average of input tensors
"""
if len(tensors) != self.mixture_size:
log.error(
"{} tensors were passed, but the module was initialized to mix {} tensors.".format(
len(tensors), self.mixture_size
)
)
normed_weights = torch.nn.functional.softmax(
torch.cat([parameter for parameter in self.scalar_parameters]), dim=0
)
normed_weights = torch.split(normed_weights, split_size_or_sections=1)
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
| [
"torch.split",
"torch.cat",
"torch.tensor"
] | 1.5.0 | lukasgarbas/flair | 041c85cf3d45940dccd453fc350767c1c85aad49 |
1.2 | from overrides import overrides
import torch
from torch.nn import Linear
from allennlp.common.checks import ConfigurationError
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.modules.seq2seq_encoders.seq2seq_encoder import Seq2SeqEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity, SimilarityFunction
from allennlp.modules.similarity_functions import MultiHeadedSimilarity
from allennlp.nn import util
@Seq2SeqEncoder.register("intra_sentence_attention")
class IntraSentenceAttentionEncoder(Seq2SeqEncoder):
"""
An ``IntraSentenceAttentionEncoder`` is a :class:`Seq2SeqEncoder` that merges the original word
representations with an attention (for each word) over other words in the sentence. As a
:class:`Seq2SeqEncoder`, the input to this module is of shape ``(batch_size, num_tokens,
input_dim)``, and the output is of shape ``(batch_size, num_tokens, output_dim)``.
We compute the attention using a configurable :class:`SimilarityFunction`, which could have
multiple attention heads. The operation for merging the original representations with the
attended representations is also configurable (e.g., you can concatenate them, add them,
multiply them, etc.).
Parameters
----------
input_dim : ``int`` required
The dimension of the vector for each element in the input sequence;
``input_tensor.size(-1)``.
projection_dim : ``int``, optional
If given, we will do a linear projection of the input sequence to this dimension before
performing the attention-weighted sum.
similarity_function : ``SimilarityFunction``, optional
The similarity function to use when computing attentions. Default is to use a dot product.
num_attention_heads : ``int``, optional
If this is greater than one (default is 1), we will split the input into several "heads" to
compute multi-headed weighted sums. Must be used with a multi-headed similarity function,
and you almost certainly want to do a projection in conjunction with the multiple heads.
combination : ``str``, optional
This string defines how we merge the original word representations with the result of the
intra-sentence attention. This will be passed to
:func:`~allennlp.nn.util.combine_tensors`; see that function for more detail on exactly how
this works, but some simple examples are ``"1,2"`` for concatenation (the default),
``"1+2"`` for adding the two, or ``"2"`` for only keeping the attention representation.
output_dim : ``int``, optional (default = None)
The dimension of an optional output projection.
"""
def __init__(
self,
input_dim: int,
projection_dim: int = None,
similarity_function: SimilarityFunction = DotProductSimilarity(),
num_attention_heads: int = 1,
combination: str = "1,2",
output_dim: int = None,
) -> None:
super().__init__()
self._input_dim = input_dim
if projection_dim:
self._projection = torch.nn.Linear(input_dim, projection_dim)
else:
self._projection = lambda x: x
projection_dim = input_dim
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._num_attention_heads = num_attention_heads
if isinstance(similarity_function, MultiHeadedSimilarity):
if num_attention_heads == 1:
raise ConfigurationError(
"Similarity function has multiple heads but encoder doesn't"
)
if num_attention_heads != similarity_function.num_heads:
raise ConfigurationError(
"Number of heads don't match between similarity function "
"and encoder: %d, %d" % (num_attention_heads, similarity_function.num_heads)
)
elif num_attention_heads > 1:
raise ConfigurationError("Encoder has multiple heads but similarity function doesn't")
self._combination = combination
combined_dim = util.get_combined_dim(combination, [input_dim, projection_dim])
if output_dim:
self._output_projection = Linear(combined_dim, output_dim)
self._output_dim = output_dim
else:
self._output_projection = lambda x: x
self._output_dim = combined_dim
@overrides
def get_input_dim(self) -> int:
return self._input_dim
@overrides
def get_output_dim(self) -> int:
return self._output_dim
@overrides
def is_bidirectional(self):
return False
@overrides
def forward(self, tokens: torch.Tensor, mask: torch.Tensor):
batch_size, sequence_length, _ = tokens.size()
# Shape: (batch_size, sequence_length, sequence_length)
similarity_matrix = self._matrix_attention(tokens, tokens)
if self._num_attention_heads > 1:
# In this case, the similarity matrix actually has shape
# (batch_size, sequence_length, sequence_length, num_heads). To make the rest of the
# logic below easier, we'll permute this to
# (batch_size, sequence_length, num_heads, sequence_length).
similarity_matrix = similarity_matrix.permute(0, 1, 3, 2)
# Shape: (batch_size, sequence_length, [num_heads,] sequence_length)
intra_sentence_attention = util.masked_softmax(similarity_matrix.contiguous(), mask)
# Shape: (batch_size, sequence_length, projection_dim)
output_token_representation = self._projection(tokens)
if self._num_attention_heads > 1:
# We need to split and permute the output representation to be
# (batch_size, num_heads, sequence_length, projection_dim / num_heads), so that we can
# do a proper weighted sum with `intra_sentence_attention`.
shape = list(output_token_representation.size())
new_shape = shape[:-1] + [self._num_attention_heads, -1]
# Shape: (batch_size, sequence_length, num_heads, projection_dim / num_heads)
output_token_representation = output_token_representation.view(*new_shape)
# Shape: (batch_size, num_heads, sequence_length, projection_dim / num_heads)
output_token_representation = output_token_representation.permute(0, 2, 1, 3)
# Shape: (batch_size, sequence_length, [num_heads,] projection_dim [/ num_heads])
attended_sentence = util.weighted_sum(output_token_representation, intra_sentence_attention)
if self._num_attention_heads > 1:
# Here we concatenate the weighted representation for each head. We'll accomplish this
# just with a resize.
# Shape: (batch_size, sequence_length, projection_dim)
attended_sentence = attended_sentence.view(batch_size, sequence_length, -1)
# Shape: (batch_size, sequence_length, combination_dim)
combined_tensors = util.combine_tensors(self._combination, [tokens, attended_sentence])
return self._output_projection(combined_tensors)
| [
"torch.nn.Linear"
] | 1.2.0 | mhagiwara/allennlp | a05add5293f091e9dbcaa9db0783e782d77714cf |
1.2 | from typing import Dict, List, Tuple, Optional
from overrides import overrides
import numpy
import torch
import torch.nn.functional as F
from torch.nn import Linear
from allennlp.common.checks import ConfigurationError
from allennlp.common.util import END_SYMBOL, START_SYMBOL
from allennlp.modules.seq2seq_decoders.seq_decoder import SeqDecoder
from allennlp.data import Vocabulary
from allennlp.modules import Embedding
from allennlp.modules.seq2seq_decoders.decoder_net import DecoderNet
from allennlp.nn import util
from allennlp.nn.beam_search import BeamSearch
from allennlp.training.metrics import Metric
@SeqDecoder.register("auto_regressive_seq_decoder")
class AutoRegressiveSeqDecoder(SeqDecoder):
"""
An autoregressive decoder that can be used for most seq2seq tasks.
Parameters
----------
vocab : ``Vocabulary``, required
Vocabulary containing source and target vocabularies. They may be under the same namespace
(`tokens`) or the target tokens can have a different namespace, in which case it needs to
be specified as `target_namespace`.
decoder_net : ``DecoderNet``, required
Module that contains implementation of neural network for decoding output elements
max_decoding_steps : ``int``, required
Maximum length of decoded sequences.
target_embedder : ``Embedding``, required
Embedder for target tokens.
target_namespace : ``str``, optional (default = 'tokens')
If the target side vocabulary is different from the source side's, you need to specify the
target's namespace here. If not, we'll assume it is "tokens", which is also the default
choice for the source side, and this might cause them to share vocabularies.
beam_size : ``int``, optional (default = 4)
Width of the beam for beam search.
tensor_based_metric : ``Metric``, optional (default = None)
A metric to track on validation data that takes raw tensors when its called.
This metric must accept two arguments when called: a batched tensor
of predicted token indices, and a batched tensor of gold token indices.
token_based_metric : ``Metric``, optional (default = None)
A metric to track on validation data that takes lists of lists of tokens
as input. This metric must accept two arguments when called, both
of type `List[List[str]]`. The first is a predicted sequence for each item
in the batch and the second is a gold sequence for each item in the batch.
scheduled_sampling_ratio : ``float`` optional (default = 0)
Defines ratio between teacher forced training and real output usage. If its zero
(teacher forcing only) and `decoder_net`supports parallel decoding, we get the output
predictions in a single forward pass of the `decoder_net`.
"""
def __init__(
self,
vocab: Vocabulary,
decoder_net: DecoderNet,
max_decoding_steps: int,
target_embedder: Embedding,
target_namespace: str = "tokens",
tie_output_embedding: bool = False,
scheduled_sampling_ratio: float = 0,
label_smoothing_ratio: Optional[float] = None,
beam_size: int = 4,
tensor_based_metric: Metric = None,
token_based_metric: Metric = None,
) -> None:
super().__init__(target_embedder)
self._vocab = vocab
# Decodes the sequence of encoded hidden states into e new sequence of hidden states.
self._decoder_net = decoder_net
self._max_decoding_steps = max_decoding_steps
self._target_namespace = target_namespace
self._label_smoothing_ratio = label_smoothing_ratio
# At prediction time, we use a beam search to find the most likely sequence of target tokens.
# We need the start symbol to provide as the input at the first timestep of decoding, and
# end symbol as a way to indicate the end of the decoded sequence.
self._start_index = self._vocab.get_token_index(START_SYMBOL, self._target_namespace)
self._end_index = self._vocab.get_token_index(END_SYMBOL, self._target_namespace)
self._beam_search = BeamSearch(
self._end_index, max_steps=max_decoding_steps, beam_size=beam_size
)
target_vocab_size = self._vocab.get_vocab_size(self._target_namespace)
if self.target_embedder.get_output_dim() != self._decoder_net.target_embedding_dim:
raise ConfigurationError(
"Target Embedder output_dim doesn't match decoder module's input."
)
# We project the hidden state from the decoder into the output vocabulary space
# in order to get log probabilities of each target token, at each time step.
self._output_projection_layer = Linear(
self._decoder_net.get_output_dim(), target_vocab_size
)
if tie_output_embedding:
if self._output_projection_layer.weight.shape != self.target_embedder.weight.shape:
raise ConfigurationError(
"Can't tie embeddings with output linear layer, due to shape mismatch"
)
self._output_projection_layer.weight = self.target_embedder.weight
# These metrics will be updated during training and validation
self._tensor_based_metric = tensor_based_metric
self._token_based_metric = token_based_metric
self._scheduled_sampling_ratio = scheduled_sampling_ratio
def _forward_beam_search(self, state: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Prepare inputs for the beam search, does beam search and returns beam search results.
"""
batch_size = state["source_mask"].size()[0]
start_predictions = state["source_mask"].new_full(
(batch_size,), fill_value=self._start_index
)
# shape (all_top_k_predictions): (batch_size, beam_size, num_decoding_steps)
# shape (log_probabilities): (batch_size, beam_size)
all_top_k_predictions, log_probabilities = self._beam_search.search(
start_predictions, state, self.take_step
)
output_dict = {
"class_log_probabilities": log_probabilities,
"predictions": all_top_k_predictions,
}
return output_dict
def _forward_loss(
self, state: Dict[str, torch.Tensor], target_tokens: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
"""
Make forward pass during training or do greedy search during prediction.
Notes
-----
We really only use the predictions from the method to test that beam search
with a beam size of 1 gives the same results.
"""
# shape: (batch_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (batch_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (batch_size, max_target_sequence_length)
targets = target_tokens["tokens"]
# Prepare embeddings for targets. They will be used as gold embeddings during decoder training
# shape: (batch_size, max_target_sequence_length, embedding_dim)
target_embedding = self.target_embedder(targets)
# shape: (batch_size, max_target_batch_sequence_length)
target_mask = util.get_text_field_mask(target_tokens)
if self._scheduled_sampling_ratio == 0 and self._decoder_net.decodes_parallel:
_, decoder_output = self._decoder_net(
previous_state=state,
previous_steps_predictions=target_embedding[:, :-1, :],
encoder_outputs=encoder_outputs,
source_mask=source_mask,
previous_steps_mask=target_mask[:, :-1],
)
# shape: (group_size, max_target_sequence_length, num_classes)
logits = self._output_projection_layer(decoder_output)
else:
batch_size = source_mask.size()[0]
_, target_sequence_length = targets.size()
# The last input from the target is either padding or the end symbol.
# Either way, we don't have to process it.
num_decoding_steps = target_sequence_length - 1
# Initialize target predictions with the start index.
# shape: (batch_size,)
last_predictions = source_mask.new_full((batch_size,), fill_value=self._start_index)
# shape: (steps, batch_size, target_embedding_dim)
steps_embeddings = torch.Tensor([])
step_logits: List[torch.Tensor] = []
for timestep in range(num_decoding_steps):
if self.training and torch.rand(1).item() < self._scheduled_sampling_ratio:
# Use gold tokens at test time and at a rate of 1 - _scheduled_sampling_ratio
# during training.
# shape: (batch_size, steps, target_embedding_dim)
state["previous_steps_predictions"] = steps_embeddings
# shape: (batch_size, )
effective_last_prediction = last_predictions
else:
# shape: (batch_size, )
effective_last_prediction = targets[:, timestep]
if timestep == 0:
state["previous_steps_predictions"] = torch.Tensor([])
else:
# shape: (batch_size, steps, target_embedding_dim)
state["previous_steps_predictions"] = target_embedding[:, :timestep]
# shape: (batch_size, num_classes)
output_projections, state = self._prepare_output_projections(
effective_last_prediction, state
)
# list of tensors, shape: (batch_size, 1, num_classes)
step_logits.append(output_projections.unsqueeze(1))
# shape (predicted_classes): (batch_size,)
_, predicted_classes = torch.max(output_projections, 1)
# shape (predicted_classes): (batch_size,)
last_predictions = predicted_classes
# shape: (batch_size, 1, target_embedding_dim)
last_predictions_embeddings = self.target_embedder(last_predictions).unsqueeze(1)
# This step is required, since we want to keep up two different prediction history: gold and real
if steps_embeddings.shape[-1] == 0:
# There is no previous steps, except for start vectors in ``last_predictions``
# shape: (group_size, 1, target_embedding_dim)
steps_embeddings = last_predictions_embeddings
else:
# shape: (group_size, steps_count, target_embedding_dim)
steps_embeddings = torch.cat([steps_embeddings, last_predictions_embeddings], 1)
# shape: (batch_size, num_decoding_steps, num_classes)
logits = torch.cat(step_logits, 1)
# Compute loss.
target_mask = util.get_text_field_mask(target_tokens)
loss = self._get_loss(logits, targets, target_mask)
# TODO: We will be using beam search to get predictions for validation, but if beam size in 1
# we could consider taking the last_predictions here and building step_predictions
# and use that instead of running beam search again, if performance in validation is taking a hit
output_dict = {"loss": loss}
return output_dict
def _prepare_output_projections(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Decode current state and last prediction to produce produce projections
into the target space, which can then be used to get probabilities of
each target token for the next step.
Inputs are the same as for `take_step()`.
"""
# shape: (group_size, max_input_sequence_length, encoder_output_dim)
encoder_outputs = state["encoder_outputs"]
# shape: (group_size, max_input_sequence_length)
source_mask = state["source_mask"]
# shape: (group_size, steps_count, decoder_output_dim)
previous_steps_predictions = state.get("previous_steps_predictions")
# shape: (batch_size, 1, target_embedding_dim)
last_predictions_embeddings = self.target_embedder(last_predictions).unsqueeze(1)
if previous_steps_predictions is None or previous_steps_predictions.shape[-1] == 0:
# There is no previous steps, except for start vectors in ``last_predictions``
# shape: (group_size, 1, target_embedding_dim)
previous_steps_predictions = last_predictions_embeddings
else:
# shape: (group_size, steps_count, target_embedding_dim)
previous_steps_predictions = torch.cat(
[previous_steps_predictions, last_predictions_embeddings], 1
)
decoder_state, decoder_output = self._decoder_net(
previous_state=state,
encoder_outputs=encoder_outputs,
source_mask=source_mask,
previous_steps_predictions=previous_steps_predictions,
)
state["previous_steps_predictions"] = previous_steps_predictions
# Update state with new decoder state, override previous state
state.update(decoder_state)
if self._decoder_net.decodes_parallel:
decoder_output = decoder_output[:, -1, :]
# shape: (group_size, num_classes)
output_projections = self._output_projection_layer(decoder_output)
return output_projections, state
def _get_loss(
self, logits: torch.LongTensor, targets: torch.LongTensor, target_mask: torch.LongTensor
) -> torch.Tensor:
"""
Compute loss.
Takes logits (unnormalized outputs from the decoder) of size (batch_size,
num_decoding_steps, num_classes), target indices of size (batch_size, num_decoding_steps+1)
and corresponding masks of size (batch_size, num_decoding_steps+1) steps and computes cross
entropy loss while taking the mask into account.
The length of ``targets`` is expected to be greater than that of ``logits`` because the
decoder does not need to compute the output corresponding to the last timestep of
``targets``. This method aligns the inputs appropriately to compute the loss.
During training, we want the logit corresponding to timestep i to be similar to the target
token from timestep i + 1. That is, the targets should be shifted by one timestep for
appropriate comparison. Consider a single example where the target has 3 words, and
padding is to 7 tokens.
The complete sequence would correspond to <S> w1 w2 w3 <E> <P> <P>
and the mask would be 1 1 1 1 1 0 0
and let the logits be l1 l2 l3 l4 l5 l6
We actually need to compare:
the sequence w1 w2 w3 <E> <P> <P>
with masks 1 1 1 1 0 0
against l1 l2 l3 l4 l5 l6
(where the input was) <S> w1 w2 w3 <E> <P>
"""
# shape: (batch_size, num_decoding_steps)
relevant_targets = targets[:, 1:].contiguous()
# shape: (batch_size, num_decoding_steps)
relevant_mask = target_mask[:, 1:].contiguous()
return util.sequence_cross_entropy_with_logits(
logits, relevant_targets, relevant_mask, label_smoothing=self._label_smoothing_ratio
)
def get_output_dim(self):
return self._decoder_net.get_output_dim()
def take_step(
self, last_predictions: torch.Tensor, state: Dict[str, torch.Tensor]
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""
Take a decoding step. This is called by the beam search class.
Parameters
----------
last_predictions : ``torch.Tensor``
A tensor of shape ``(group_size,)``, which gives the indices of the predictions
during the last time step.
state : ``Dict[str, torch.Tensor]``
A dictionary of tensors that contain the current state information
needed to predict the next step, which includes the encoder outputs,
the source mask, and the decoder hidden state and context. Each of these
tensors has shape ``(group_size, *)``, where ``*`` can be any other number
of dimensions.
Returns
-------
Tuple[torch.Tensor, Dict[str, torch.Tensor]]
A tuple of ``(log_probabilities, updated_state)``, where ``log_probabilities``
is a tensor of shape ``(group_size, num_classes)`` containing the predicted
log probability of each class for the next step, for each item in the group,
while ``updated_state`` is a dictionary of tensors containing the encoder outputs,
source mask, and updated decoder hidden state and context.
Notes
-----
We treat the inputs as a batch, even though ``group_size`` is not necessarily
equal to ``batch_size``, since the group may contain multiple states
for each source sentence in the batch.
"""
# shape: (group_size, num_classes)
output_projections, state = self._prepare_output_projections(last_predictions, state)
# shape: (group_size, num_classes)
class_log_probabilities = F.log_softmax(output_projections, dim=-1)
return class_log_probabilities, state
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
all_metrics: Dict[str, float] = {}
if not self.training:
if self._tensor_based_metric is not None:
all_metrics.update(
self._tensor_based_metric.get_metric(reset=reset) # type: ignore
)
if self._token_based_metric is not None:
all_metrics.update(self._token_based_metric.get_metric(reset=reset)) # type: ignore
return all_metrics
@overrides
def forward(
self,
encoder_out: Dict[str, torch.LongTensor],
target_tokens: Dict[str, torch.LongTensor] = None,
) -> Dict[str, torch.Tensor]:
state = encoder_out
decoder_init_state = self._decoder_net.init_decoder_state(state)
state.update(decoder_init_state)
if target_tokens:
output_dict = self._forward_loss(state, target_tokens)
else:
output_dict = {}
if not self.training:
predictions = self._forward_beam_search(state)
output_dict.update(predictions)
if target_tokens:
if self._tensor_based_metric is not None:
# shape: (batch_size, beam_size, max_sequence_length)
top_k_predictions = output_dict["predictions"]
# shape: (batch_size, max_predicted_sequence_length)
best_predictions = top_k_predictions[:, 0, :]
self._tensor_based_metric( # type: ignore
best_predictions, target_tokens["tokens"]
)
if self._token_based_metric is not None:
output_dict = self.post_process(output_dict)
predicted_tokens = output_dict["predicted_tokens"]
self._token_based_metric( # type: ignore
predicted_tokens, self.indices_to_tokens(target_tokens["tokens"][:, 1:])
)
return output_dict
@overrides
def post_process(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_tokens`` to the ``output_dict``.
"""
predicted_indices = output_dict["predictions"]
all_predicted_tokens = self.indices_to_tokens(predicted_indices)
output_dict["predicted_tokens"] = all_predicted_tokens
return output_dict
def indices_to_tokens(self, batch_indeces: numpy.ndarray) -> List[List[str]]:
if not isinstance(batch_indeces, numpy.ndarray):
batch_indeces = batch_indeces.detach().cpu().numpy()
all_tokens = []
for indices in batch_indeces:
# Beam search gives us the top k results for each source sentence in the batch
# but we just want the single best.
if len(indices.shape) > 1:
indices = indices[0]
indices = list(indices)
# Collect indices till the first end_symbol
if self._end_index in indices:
indices = indices[: indices.index(self._end_index)]
tokens = [
self._vocab.get_token_from_index(x, namespace=self._target_namespace)
for x in indices
]
all_tokens.append(tokens)
return all_tokens
| [
"torch.rand",
"torch.cat",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.Tensor"
] | 1.2.0 | mhagiwara/allennlp | a05add5293f091e9dbcaa9db0783e782d77714cf |
1.5 | """
Author: Navid Shervani-Tabar
"""
import torch
from torch import nn
from torch.autograd import Variable
from filter import scattering
class VAEmod(nn.Module):
def __init__(self, args):
super(VAEmod, self).__init__()
# -- training parameters
self.device = args.device
# -- graph parameters
self.n_max_atom = args.n_node
self.n_type_bond = args.n_bond_type
self.n_atom_features = args.n_atom_type
self.n_scat_atom_features = args.n_scat_atom_features
# -- scattering parameters
self.scat = scattering(args).to(self.device)
self.sdim = args.sdim
# -- network parameters
self.leaky = nn.LeakyReLU(0.01, inplace=False)
self.relu = nn.ReLU()
self.dim_interm = 8
self.z_dim = args.z_dim
scat_dim = self.sdim * self.n_scat_atom_features
enc_dim = 400
h_1_dim = 2 * self.z_dim
h_2_dim = 4 * self.z_dim
h_3_dim = 8 * self.z_dim
h_4_dim = self.n_max_atom * self.n_type_bond * self.dim_interm
h_6_dim = self.n_max_atom * self.n_atom_features
# -- encoder
self.bn_1 = nn.BatchNorm1d(scat_dim)
self.enc_fc_1 = nn.Linear(scat_dim, enc_dim)
self.bn_2 = nn.BatchNorm1d(enc_dim)
self.enc_fc_2 = nn.Linear(enc_dim, self.z_dim)
self.enc_fc_3 = nn.Linear(enc_dim, self.z_dim)
# -- weight network
if bool(args.y_target):
self.dec_fc_1 = nn.Linear(self.z_dim + 3, h_1_dim)
else:
self.dec_fc_1 = nn.Linear(self.z_dim, h_1_dim)
self.dec_fc_2 = nn.Linear(h_1_dim, h_2_dim)
self.dec_fc_3 = nn.Linear(h_2_dim, h_3_dim)
self.dec_fc_4 = nn.Linear(h_3_dim, h_4_dim)
# -- signal network
self.SM = nn.Softmax(dim=3)
if bool(args.y_target):
self.dec_fc_5 = nn.Linear(self.n_max_atom * self.n_type_bond * self.n_max_atom + self.z_dim + 3, h_6_dim)
else:
self.dec_fc_5 = nn.Linear(self.n_max_atom * self.n_type_bond * self.n_max_atom + self.z_dim, h_6_dim)
def encode(self, x):
h_1 = self.bn_1(x)
h_2 = self.relu(self.bn_2(self.enc_fc_1(h_1)))
return self.enc_fc_2(h_2), self.enc_fc_3(h_2)
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Variable(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return mu
def decode_W(self, z):
# -- adjacency network (shared)
h_1 = self.leaky((self.dec_fc_1(z)))
h_2 = self.leaky((self.dec_fc_2(h_1)))
h_3 = self.leaky((self.dec_fc_3(h_2)))
h_4 = self.leaky((self.dec_fc_4(h_3)))
h_4 = h_4.view(-1, self.n_max_atom, self.n_type_bond, self.dim_interm)
h_4 = self.leaky(torch.matmul(h_4.permute(0, 2, 1, 3), h_4.permute(0, 2, 3, 1)))
h_4 = h_4.permute(0, 2, 3, 1)
return h_4
def decode_f(self, z, W):
W = self.SM(W)
# -- node network
h_5 = W.reshape(-1, self.n_max_atom * self.n_max_atom * self.n_type_bond)
h_5 = torch.cat((z, h_5), dim=1)
h_5 = self.leaky((self.dec_fc_5(h_5)))
h_5 = h_5.view(-1, self.n_max_atom, self.n_atom_features)
return h_5
def decode(self, z):
W = self.decode_W(z)
f = self.decode_f(z, W)
return [f, W]
def forward(self, signal, adjacency, props):
signal_in = torch.transpose(signal.reshape(-1, self.n_max_atom, self.n_atom_features), 2, 1)
if props is not None:
signal_in = torch.cat((signal_in, props.unsqueeze(2).repeat(1, 1, 9)), dim=1)
mu, logvar = self.encode(self.scat(adjacency, signal_in).reshape(-1, self.sdim * self.n_scat_atom_features))
z = self.reparameterize(mu, logvar)
# -- for constraint regularization
z_prior = self.reparameterize(torch.zeros(mu.size(), device=self.device), torch.zeros(mu.size(), device=self.device))
if props is not None:
return self.decode(torch.cat((z, props), dim=1)), mu, logvar, self.decode(torch.cat((z_prior, props), dim=1))
else:
return self.decode(z), mu, logvar, self.decode(z_prior)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Softmax",
"torch.nn.LeakyReLU",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d"
] | 1.5.0 | nshervt/GSVAE | 6a7771a32634e39644be5549f1c24ee7507518b0 |
1.0 | """Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
if model_opt.model_dtype == "fp16":
logger.warning(
"FP16 is experimental, the generated checkpoints may "
"be incompatible with a future version")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and opt.accum_count > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError("gpuid is deprecated \
see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert os.path.isfile(opt.train_src) \
and os.path.isfile(opt.train_tgt), \
"Please check path of your train src and tgt files!"
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
| [
"torch.cuda.is_available"
] | 1.0 | brainsharks-fyp17/ZEST | 036d5b92ebde6053ad789b95a257bda9db296926 |
1.1 | ########################################################################################################################
"""
Description : Contains the utility functions for the module.
Python version : 3.7.3
"""
########################################################################################################################
################################################ Importing libraries ###################################################
import re
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.autograd as autograd
from torch.autograd import Variable
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
import torch.optim as optim
import matplotlib.pyplot as plt
########################################################################################################################
def plot_loss_and_perplexity_for_language_model(data_list1, data_list2, epoch_list, figure_size=(7,1), dpi_value=300, figure_name=''):
"""
:param data_list1: loss list(dtype: list)
:param data_list2: perplexity list(dtype: list)
:param epoch_list: epoch list(dtype: list)
:param figure_name: Name of the figure to be saved(dtype: string)
:return: Plots the Loss and perplexity of the language model.
"""
fig1 = plt.figure(figsize=figure_size, dpi=dpi_value)
plt.plot(epoch_list, data_list1, 'b', label='val_loss')
plt.plot(epoch_list, data_list2, 'r', label='perplexity')
plt.xlabel("Epochs")
plt.ylabel("Loss and Perplexity")
plt.title("Loss-Perplexity curve for " + figure_name + " data" )
fig1.savefig(figure_name + "_loss_curve.png", bbox_inches='tight')
def batchify(data, bsz, device):
"""
:param data: data corpus(could be train, test, or validation dataset)
:param bsz: Batch size(dtype: int32)
:param device: GPU/CPU(dtype: torch.device)
:return: dataset divided into batches
"""
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def repackage_hidden(h):
"""
:param h: hidden state(dtype: torch.tensor)
:return: Wraps hidden states in new Tensors, to detach them from their history.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def make_batches(data, bptt, i):
"""
:param data: data corpus(could be train, test, or validation dataset)
:param bptt: Backpropogation through time or sequence length(dtype: int32)
:param i: Iterated chunks(dtype: int32)
:return: subdivides the input data into chunks of length bptt and generates source and targets for model to train
"""
seq_len = min(bptt, len(data) - 1 - i)
inputs = data[i:i+seq_len]
targets = data[i+1:i+1+seq_len].view(-1)
return inputs, targets
def tag_sent_tsv_line_sup(sents, tagger, tag_type):
"""
:param sents: sentences in paragraphs(dtype:list of strings)
:param tagger: POS/NER tagger from flair/nltk
:param tag_type: tag type('pos'/'ner')
:return: array of tagged sentences with their associated 'pos'/'ner' for supervised corpus
"""
tagger.predict(sents)
tags = []
for s in sents: # tag a batch of sentence and pipe out tsv lines
temp_tags = [str(t.get_tag(tag_type)) for t in s] # throws error for wrong tag type
tags.append([re.sub(r'\([^)]*\)', '', tag) for tag in temp_tags])
return tags[0]
def tag_sent_tsv_line_unsup(sents, tagger, tag_type):
"""
:param sents: sentences in paragraphs(dtype:list of strings)
:param tagger: POS/NER tagger from flair/nltk
:param tag_type: tag type('pos'/'ner')
:return: array of tagged sentences with their associated 'pos'/'ner' for unsupervised corpus
"""
tagger.predict(sents)
tags = []
for s in sents: # tag a batch of sentence and pipe out tsv lines
temp_tags = [str(t.get_tag(tag_type)) for t in s] # throws error for wrong tag type
tags.append([re.sub(r'\([^)]*\)', '', tag) for tag in temp_tags])
return tags
def sent_2_index(seq, to_ix, cuda=False):
"""
:param seq: sequence list with pararaphs denoted by list of tokens(dtype:list).
:param to_ix: word to index mappings(dtype:dict)
:return: Long tensor for all the tokens converted to their respective ids
"""
var = autograd.Variable(torch.LongTensor([to_ix[w.lower()] if w.lower() in to_ix.keys() else to_ix["unk"] for w in seq]))
return var
def label_2_index(label, label_to_ix, cuda=False):
"""
:param label: sequence list of labels(dtype:list).
:param label_to_ix: labels to index mappings(dtype:dict)
:return: Long tensor for all the labels converted to their respective ids(negative being zero and positive being one)
"""
var = autograd.Variable(torch.LongTensor([label_to_ix[label]]))
return var
def evaluate_supervised_model(model, data, loss_function, word_to_ix, label_to_ix, data_acc_list, data_roc_list,
data_loss_list, name ='valid'):
"""
:param model: trained model
:param data: data to evaluated on(dtype: pandas.core.frame.DataFrame)
:param loss_function: loss function used for evaluation
:param word_to_ix: word to index mappings(dtype: dict)
:param label_to_ix: label to index mappings(dtype: dict)
:param data_acc_list: a list to collect accuracy at every epoch(dtype: list)
:param data_roc_list: a list to collect roc score at every epoch(dtype: list)
:param data_loss_list: a list to collect loss at every epoch(dtype: list)
:param name: type of data(Could be 'train','test',or 'valid'. dtype: string)
:return: evaluated accuracy and roc on the entire dataset, data_acc_list, data_roc_list, data_loss_list
"""
model.eval()
avg_loss = 0.0
truth_res, pred_res = ([] for i in range(2))
with torch.no_grad():
for (para, label) in zip(data["review"],data["label"]):
truth_res.append(label_to_ix[label])
para = sent_2_index(para, word_to_ix)
label = label_2_index(label, label_to_ix)
y_pred, value_dict = model(para, True)
# The predicted results are processed through BCElosswithlogits, hence the outputs are
# passed through the sigmoid layer to turn it into probabilities.
pred_res.append(float(torch.sigmoid(torch.FloatTensor(y_pred))))
# Since the loss_function already has a sigmoid layer attached to it, we don't need to pass the predictions
# again through another sigmoid layer.
loss = loss_function(y_pred, label.type(torch.FloatTensor))
avg_loss += loss.item()
avg_loss /= len(data)
roc = roc_auc_score(np.array(truth_res), np.array(pred_res).round(), sample_weight=None)
pred_res = [0 if values > 0.5 else 1 for values in pred_res]
acc = accuracy_score(np.array(truth_res), np.array(pred_res).round(), sample_weight=None)
data_roc_list.append(roc)
data_loss_list.append(avg_loss)
data_acc_list.append(acc)
print(' '*16 + name + ':|avg_loss:%g|ROC:%g|Accuracy:%g|' % (avg_loss, roc, acc))
return acc, roc, data_acc_list, data_roc_list, data_loss_list
def train_supervised_model(model, train_data, loss_function, optimizer, word_to_ix, label_to_ix, i, train_acc_list,
train_roc_list, train_loss_list, batch_size=32, clip=5):
"""
:param model: the model to be trained
:param train_data: Training data(dtype: pandas.core.frame.DataFrame)
:param loss_function: loss function used for evaluation
:param optimizer: Optimizer used while training
:param word_to_ix: word to index mappings(dtype: dict)
:param label_to_ix: label to index mappings(dtype: dict)
:param i: number of steps passed(dtype: int)
:param train_acc_list: a list to collect accuracy at every epoch(dtype: list)
:param train_roc_list: a list to collect roc score at every epoch(dtype: list)
:param train_loss_list: a list to collect loss at every epoch(dtype: list)
:param batch_size: batch size(dtype: int)
:param clip: clip rate(dtype: int)
:return: train_acc_list, train_roc_list, train_loss_list
"""
model.train()
truth_res, pred_res = ([] for i in range(2))
avg_loss = 0.0
count = 0
for (para, label) in zip(train_data["review"],train_data["label"]):
truth_res.append(label_to_ix[label])
para = sent_2_index(para, word_to_ix)
label = label_2_index(label, label_to_ix)
y_pred, value_dict = model(para)
# The predicted results are processed through BCElosswithlogits, hence the outputs are
# passed through the sigmoid layer to turn it into probabilities.
pred_res.append(float(torch.sigmoid(torch.FloatTensor(y_pred))))
# Since the loss_function already has a sigmoid layer attached to it, we don't need to pass the predictions
# again through another sigmoid layer.
loss = loss_function(y_pred, label.type(torch.FloatTensor))
loss.backward()
current_loss = loss.item()
avg_loss += current_loss
count += 1
if count % 10000 == 0:
print('|paragraphs: %d|loss :%g|' % (count, current_loss))
if count % batch_size == 0:
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
#raise Exception("Arrange clip as per your batch size")
#raise Exception("Try with and without clipping")
if clip != None:
nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
optimizer.zero_grad()
avg_loss /= len(train_data)
print('-' * 100)
train_loss_list.append(avg_loss)
roc = roc_auc_score(np.array(truth_res), np.array(pred_res).round(), sample_weight=None)
pred_res = [0 if values > 0.5 else 1 for values in pred_res]
acc = accuracy_score(np.array(truth_res), np.array(pred_res).round(), sample_weight=None)
train_roc_list.append(roc)
train_acc_list.append(acc)
print('|End of Epoch:%d|Training data:|avg_loss:%g|ROC:%g|Accuracy:%g|'%(int(i+1), avg_loss, roc, acc))
return train_acc_list, train_roc_list, train_loss_list
def train_model_and_generate_plots(model, train_data, test_data, val_data, unsup_word_to_idx, label_to_idx, learning_rate, batch_size,
nb_epochs, save_dir, description, early_stopping=5):
"""
:param model: the model to be trained
:param train_data: Training data(dtype: pandas.core.frame.DataFrame)
:param test_data: Test data(dtype: pandas.core.frame.DataFrame)
:param val_data: Validation data(dtype: pandas.core.frame.DataFrame)
:param unsup_word_to_idx: word to index mappings(dtype: dict)
:param label_to_idx: label to index mappings(dtype: dict)
:param learning_rate: Learning rate(dtype:float)
:param nb_epochs: number of Epochs(dtype:int)
:param save_dir: directory for the model to be saved(dtype:string)
:param batch_size: Batch size(dtype:int)
:param early_stopping: After how many steps should the model stop training if the val_roc doesn't change(dtype:int)
:param description: Data desciption(Train,test, or validation; dtype:string)
"""
loss_function = nn.BCEWithLogitsLoss()
optimizer = optimizer = optim.Adam(model.parameters(), lr=learning_rate)
optimizer.zero_grad()
train_acc_list, train_roc_list, train_loss_list, test_acc_list, test_roc_list, test_loss_list, val_acc_list, val_roc_list, val_loss_list, epoch_list = ([] for i in range(10))
#optimizer = torch.optim.SGD(model.parameters(), lr = 1e-2)
no_up = 0
best_val_roc = 0.0
for i in range(nb_epochs):
epoch_list.append(int(i+1))
print('epoch: %d start!' % int(i+1))
# Training the model
optimizer.zero_grad()
# pbar = tqdm(total=train_data.shape[0])
train_acc_list, train_roc_list, train_loss_list = train_supervised_model(model, train_data, loss_function, optimizer, unsup_word_to_idx, label_to_idx, i, train_acc_list, train_roc_list, train_loss_list, batch_size)
# Hyper-tuning the model
optimizer.zero_grad()
val_acc, val_roc, val_acc_list, val_roc_list, val_loss_list = evaluate_supervised_model(model, val_data, loss_function, unsup_word_to_idx, label_to_idx, val_acc_list, val_roc_list, val_loss_list, 'validation data')
# Testing the model
optimizer.zero_grad()
test_acc, test_roc, test_acc_list, test_roc_list, test_loss_list = evaluate_supervised_model(model,test_data, loss_function, unsup_word_to_idx, label_to_idx, test_acc_list, test_roc_list, test_loss_list, 'test data')
# Un-comment the below lines if you want to save models with smallest change in val_acc and val_roc
"""if (val_acc > best_val_acc) and (val_roc <= best_val_roc):
# Saving models on the basis of validation accuracy
best_val_acc = val_acc
torch.save(model.state_dict(), 'best_models_lstm_1500/acc_' + str(int(test_acc*100))
+ "_roc_" + str(int(test_roc*100)) + '.pt')
no_up = 0
elif (val_roc > best_val_roc) and (val_acc <= best_val_acc):
# Saving models on the basis of validation roc
best_val_roc = val_roc
torch.save(model.state_dict(), 'best_models_lstm_1500/roc_' + str(int(test_roc*100))
+ "_acc_" + str(int(test_acc*100)) + '.pt')
no_up = 0
elif (val_roc > best_val_roc) and (val_acc > best_val_acc):
# Saving models on the basis of validation roc and validation accuracy
best_val_roc = val_roc
best_val_acc = val_acc
torch.save(model.state_dict(), 'best_models_lstm_1500/combined_roc_' + str(int(test_roc*100))
+ "_acc_" + str(int(test_acc*100)) + '.pt')
no_up = 0"""
if val_roc > best_val_roc:
torch.save(model.state_dict(), save_dir)
best_val_acc = val_roc
else:
# early stopping
no_up += 1
if no_up >= 5:
break
# Un-comment the below lines to generate training, test, and validation plots
"""
# Saving the lists in a dataframe so that it can be used to plot the variations wrt epochs.
df = pd.DataFrame({"epochs":epoch_list, "train_acc": train_acc_list, "train_roc": train_roc_list,
"train_loss":train_loss_list, "val_acc" : val_acc_list, "val_roc": val_roc_list, "val_loss" :
val_loss_list, "test_acc" : test_acc_list, "test_roc": test_roc_list, "test_loss" : test_loss_list})
plot = df.plot(x="epochs",y=["train_acc","test_acc","val_acc"],title= "Accuracy curve")
fig = plot.get_figure()
fig.savefig(description + "_acc.png")
plot = df.plot(x="epochs",y=["train_loss","test_loss","val_loss"],title="Loss curve")
fig = plot.get_figure()
fig.savefig(description + "_loss.png")
plot = df.plot(x="epochs",y=["train_roc","test_roc","val_roc"],title="ROC curve")
fig = plot.get_figure()
fig.savefig(description + "_roc.png")
"""
return model | [
"torch.no_grad",
"torch.FloatTensor",
"torch.LongTensor",
"torch.nn.BCEWithLogitsLoss"
] | 1.1.0 | vageeshSaxena/TX-Ray | 80f96012bd7ab4c789b037bbfa996fa26c160701 |
1.9 | # setting device on GPU if available, else CPU
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
# Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0) / 1024 ** 3, 1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0) / 1024 ** 3, 1), 'GB')
| [
"torch.cuda.memory_cached",
"torch.cuda.is_available",
"torch.cuda.memory_allocated",
"torch.cuda.get_device_name"
] | 1.9.0 | KirillPushkarev/valuenet | 54ff6351e55d0b7c74b3d9db9ea8f686e3d855d9 |
1.2 | import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
from utils import data_loader
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA
from torch.utils.data import DataLoader
from cesm import *
from Hurricane import *
from exaalt import EXAALT
from aramco import ARAMCO
from exafel import *
from nyx import *
from qmcpack import *
from miranda import *
from turbulence import *
class VAEXperiment(pl.LightningModule):
def __init__(self,
vae_model: BaseVAE,
params: dict) -> None:
super(VAEXperiment, self).__init__()
self.model = vae_model
self.params = params
self.curr_device = None
self.hold_graph = False
if 'epsilon' not in self.params.keys():
self.params['epsilon']=-1
try:
self.hold_graph = self.params['retain_first_backpass']
except:
pass
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx = 0,scalar=False):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels = labels)
train_loss = self.model.loss_function(*results,
M_N = self.params['batch_size']/ self.num_train_imgs,
optimizer_idx=optimizer_idx,
batch_idx = batch_idx)
try:
self.logger.experiment.log({key: val.item() for key, val in train_loss.items()})
except:
pass
if scalar:
return train_loss['loss']
else:
return train_loss
def validation_step(self, batch, batch_idx, optimizer_idx = 0):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels = labels)
val_loss = self.model.loss_function(*results,
M_N = self.params['batch_size']/ self.num_val_imgs,
optimizer_idx = optimizer_idx,
batch_idx = batch_idx)
return val_loss
def validation_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
tensorboard_logs = {'avg_val_loss': avg_loss}
self.sample_images()
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def sample_images(self):
if self.params['dataset'] != 'celeba':
return
# Get sample reconstruction image
test_input, test_label = next(iter(self.sample_dataloader))
test_input = test_input.to(self.curr_device)
test_label = test_label.to(self.curr_device)
recons = self.model.generate(test_input, labels = test_label)
vutils.save_image(recons.data,
f"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/"
f"recons_{self.logger.name}_{self.current_epoch}.png",
normalize=True,
nrow=12)
# vutils.save_image(test_input.data,
# f"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/"
# f"real_img_{self.logger.name}_{self.current_epoch}.png",
# normalize=True,
# nrow=12)
try:
samples = self.model.sample(144,
self.curr_device,
labels = test_label)
vutils.save_image(samples.cpu().data,
f"{self.logger.save_dir}{self.logger.name}/version_{self.logger.version}/"
f"{self.logger.name}_{self.current_epoch}.png",
normalize=True,
nrow=12)
except:
pass
del test_input, recons #, samples
def configure_optimizers(self):
optims = []
scheds = []
optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=self.params['LR'],
weight_decay=self.params['weight_decay'])
optims.append(optimizer)
# Check if more than 1 optimizer is required (Used for adversarial training)
try:
if self.params['LR_2'] is not None:
optimizer2 = optim.Adam(getattr(self.model,self.params['submodel']).parameters(),
lr=self.params['LR_2'])
optims.append(optimizer2)
except:
pass
try:
if self.params['scheduler_gamma'] is not None:
scheduler = optim.lr_scheduler.ExponentialLR(optims[0],
gamma = self.params['scheduler_gamma'])
scheds.append(scheduler)
# Check if another scheduler is required for the second optimizer
try:
if self.params['scheduler_gamma_2'] is not None:
scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],
gamma = self.params['scheduler_gamma_2'])
scheds.append(scheduler2)
except:
pass
return optims, scheds
except:
return optims
@data_loader
def train_dataloader(self):
transform = self.data_transforms()
self.params['epsilon']=float(self.params['epsilon'])
if self.params['dataset'] == 'celeba':
dataset = CelebA(root = self.params['data_path'],
split = "train",
transform=transform,
download=True)
elif self.params['dataset'] == 'cesm':
dataset=CLDHGH(path=self.params['data_path'],start=0,end=50,size=self.params['img_size'],normalize=True,epsilon=self.params['epsilon'])
elif self.params['dataset'] =='cesm_new':
dataset=CESM(path=self.params['data_path'],start=0,end=50,size=self.params['img_size'],field=self.params['field'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
elif self.params['dataset'] =='nyx':
dataset=NYX(path=self.params['data_path'],start=self.params['start'],end=self.params['end'],size=self.params['img_size'],field=self.params['field'],log=self.params['log'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
elif self.params['dataset'] =='exafel':
dataset=EXAFEL(path=self.params['data_path'],start=0,end=300,size=self.params['img_size'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
elif self.params['dataset'] =='hurricane':
dataset=Hurricane(path=self.params['data_path'],start=1,end=41,size=self.params['img_size'],field=self.params['field'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
elif self.params['dataset'] == 'exaalt':
dataset=EXAALT(path=self.params['data_path'],start=0,end=4000)
elif self.params['dataset'] == 'aramco':
dataset=ARAMCO(path=self.params['data_path'],start=self.params['start'],end=self.params['end'],size=self.params['img_size'],global_max=0.0386,global_min=-0.0512,cache_size=self.params['cache_size'],epsilon=self.params['epsilon'])
elif self.params['dataset'] == 'qmcpack':
dataset=QMCPACK(path=self.params['data_path'],start=self.params['start'],end=self.params['end'],size=self.params['img_size'],global_max=20.368572,global_min=-21.25822,epsilon=self.params['epsilon'])
elif self.params['dataset'] == 'miranda':
dataset=MIRANDA(path=self.params['data_path'],start=self.params['start'],end=self.params['end'],size=self.params['img_size'],global_max=3,global_min=0.99,epsilon=self.params['epsilon'])
elif self.params['dataset'] =='turbulence':
dataset=Turbulence(path=self.params['data_path'],field=self.params['field'],side_length=self.params['side_length'],start=self.params['start'],end=self.params['end'],size=self.params['img_size'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
else:
raise ValueError('Undefined dataset type')
self.num_train_imgs = len(dataset)
return DataLoader(dataset,
batch_size= self.params['batch_size'],
shuffle = True,
drop_last=True,num_workers=0)
@data_loader
def val_dataloader(self):
transform = self.data_transforms()
self.params['epsilon']=float(self.params['epsilon'])
if self.params['dataset'] == 'celeba':
celeba=CelebA(root = self.params['data_path'],split = "test",transform=transform,download=True)
self.sample_dataloader = DataLoader(celeba,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'cesm':
#print(self.params['img_size'])
dataset=CLDHGH(path=self.params['data_path'],start=50,end=52,size=self.params['img_size'],normalize=True)
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] =='cesm_new':
dataset=CESM(path=self.params['data_path'],start=50,end=52,size=self.params['img_size'],field=self.params['field'],global_max=self.params['max'],global_min=self.params['min'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] =='nyx':
dataset=NYX(path=self.params['data_path'],start=3,end=4,size=self.params['img_size'],field=self.params['field'],log=self.params['log'],global_max=self.params['max'],global_min=self.params['min'],epsilon=self.params['epsilon'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] =='exafel':
dataset=EXAFEL(path=self.params['data_path'],start=300,end=310,size=self.params['img_size'],global_max=self.params['max'],global_min=self.params['min'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] =='hurricane':
dataset=Hurricane(path=self.params['data_path'],start=41,end=42,size=self.params['img_size'],field=self.params['field'],global_max=self.params['max'],global_min=self.params['min'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'exaalt':
dataset=EXAALT(path=self.params['data_path'],start=4000,end=4400)
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'aramco':
dataset=ARAMCO(path=self.params['data_path'],start=1500,end=1503,size=self.params['img_size'],global_max=0.0386,global_min=-0.0512,cache_size=self.params['cache_size'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'qmcpack':
dataset=QMCPACK(path=self.params['data_path'],start=0,end=2,size=self.params['img_size'],global_max=20.368572,global_min=-21.25822,epsilon=self.params['epsilon'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'miranda':
dataset=MIRANDA(path=self.params['data_path'],start=0,end=1,size=self.params['img_size'],global_max=3,global_min=0.99,epsilon=self.params['epsilon'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 144,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
elif self.params['dataset'] == 'turbulence':
dataset=Turbulence_Val(self.params['img_size'])
self.sample_dataloader = DataLoader(dataset,
batch_size= 1,
shuffle = True,
drop_last=True)
self.num_val_imgs = len(self.sample_dataloader)
else:
raise ValueError('Undefined dataset type')
return self.sample_dataloader
def data_transforms(self):
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
SetScale = transforms.Lambda(lambda X: X/X.sum(0).expand_as(X))
if self.params['dataset'] == 'celeba':
transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.CenterCrop(148),
transforms.Resize(self.params['img_size']),
transforms.ToTensor(),
SetRange])
else:
transform = SetRange
#raise ValueError('Undefined dataset type')
return transform
| [
"torch.optim.lr_scheduler.ExponentialLR",
"torch.stack",
"torch.utils.data.DataLoader"
] | 1.2.0 | Meso272/PyTorch-VAE | b1f80082a92c706969a63162ae083b9f7d15d9aa |
0.4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""An example of how to pretrain a transformer encoder with BERT."""
import collections
import itertools
import typing
import gensim.models.word2vec as word2vec
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import transformer
import transformer.bert as bert
__author__ = "Patrick Hohenecker"
__copyright__ = (
"Copyright (c) 2019, Patrick Hohenecker\n"
"All rights reserved.\n"
"\n"
"Redistribution and use in source and binary forms, with or without\n"
"modification, are permitted provided that the following conditions are met:\n"
"\n"
"1. Redistributions of source code must retain the above copyright notice, this\n"
" list of conditions and the following disclaimer.\n"
"2. Redistributions in binary form must reproduce the above copyright notice,\n"
" this list of conditions and the following disclaimer in the documentation\n"
" and/or other materials provided with the distribution.\n"
"\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n"
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n"
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n"
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n"
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n"
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n"
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n"
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n"
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n"
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
)
__license__ = "BSD-2-Clause"
__version__ = "2019.1"
__date__ = "23 Apr 2019"
__maintainer__ = "Patrick Hohenecker"
__email__ = "[email protected]"
__status__ = "Development"
# ==================================================================================================================== #
# C O N S T A N T S #
# ==================================================================================================================== #
Token = collections.namedtuple("Token", ["index", "word"])
"""This is used to store index-word pairs."""
DATA = [
"where the streets have no name",
"we ' re still building then burning down love",
"burning down love",
"and when i go there , i go there with you",
"it ' s all i can do"
]
"""list[str]: The already preprocessed training data."""
# SPECIAL TOKENS #####################################################################################################
SOS = Token(0, "<sos>")
"""The start-of-sequence token."""
EOS = Token(1, "<eos>")
"""The end-of-sequence token."""
PAD = Token(2, "<pad>")
"""The padding token."""
MASK = Token(3, "<mask>")
"""The mask token."""
# MODEL CONFIG #######################################################################################################
DIMENSIONS = (256, 32, 32)
"""tuple[int]: A tuple of d_model, d_k, d_v."""
DROPOUT_RATE = 0.1
"""float: The used dropout rate."""
EMBEDDING_SIZE = DIMENSIONS[0]
"""int: The used embedding size."""
NUM_LAYERS = 6
"""int: The number of layers in the trained transformer encoder."""
# TRAINING DETAILS ###################################################################################################
GPU = False # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< SET THIS TO True, IF YOU ARE USING A MACHINE WITH A GPU!
"""bool: Indicates whether to make use of a GPU."""
LEARNING_RATE = 0.0001
"""float: The used learning rate."""
NUM_EPOCHS = 500
"""int: The total number of training epochs."""
NUM_HEADS = 6
"""int: The number of attention heads to use."""
# ==================================================================================================================== #
# H E L P E R F U N C T I O N S #
# ==================================================================================================================== #
def prepare_data() -> typing.Tuple[typing.List[typing.List[str]], collections.OrderedDict]:
"""Preprocesses the training data, and creates the vocabulary.
Returns:
list[list[str]]: The training data as list of samples, each of which is a list of words.
collections.OrderedDict: The vocabulary as an ``OrderedDict`` from words to indices.
"""
# gather all words that appear in the data
all_words = set()
for sample in DATA:
all_words.update(sample.split(" "))
# create the vocabulary
vocab = collections.OrderedDict(
[
(SOS.word, SOS.index),
(EOS.word, EOS.index),
(PAD.word, PAD.index),
(MASK.word, MASK.index)
]
)
for idx, word in enumerate(sorted(all_words)):
vocab[word] = idx + 4
# split, add <sos>...<eos>, and pad the dataset
data = [[SOS.word] + sample.split(" ") + [EOS.word] for sample in DATA]
max_len = max(len(sample) for sample in data)
data = [sample + ([PAD.word] * (max_len - len(sample))) for sample in data]
return data, vocab
# ==================================================================================================================== #
# M A I N #
# ==================================================================================================================== #
def main():
# fetch the training data
data, vocab = prepare_data()
# create the word embeddings with word2vec and positional embeddings
emb_model = word2vec.Word2Vec(
sentences=data,
size=EMBEDDING_SIZE,
min_count=1
)
for word in vocab.keys():
if word not in emb_model.wv:
emb_model.wv[word] = np.zeros((EMBEDDING_SIZE,))
word_emb_mat = nn.Parameter(
data=torch.FloatTensor([emb_model[word] for word in vocab.keys()]),
requires_grad=False
)
word_emb = nn.Embedding(len(vocab), EMBEDDING_SIZE)
word_emb.weight = word_emb_mat
pos_emb = nn.Embedding(len(data[0]), EMBEDDING_SIZE)
pos_emb.weight.require_grad = True
# turn the dataset into a tensor of word indices
data = torch.LongTensor([[vocab[word] for word in sample] for sample in data])
# create the encoder, the pretraining loss, and the optimizer
encoder = transformer.Encoder(
NUM_LAYERS, # num_layers
NUM_HEADS, # num_heads
*DIMENSIONS, # dim_model / dim_keys / dim_values
DROPOUT_RATE, # residual_dropout
DROPOUT_RATE, # attention_dropout
PAD.index # pad_index
)
loss = bert.MLMLoss(
encoder,
word_emb,
pos_emb,
MASK.index
)
optimizer = optim.Adam(
itertools.chain(encoder.parameters(), loss.parameters()),
lr=LEARNING_RATE
)
# move to GPU, if possible
if GPU:
data = data.cuda()
encoder.cuda()
loss.cuda() # -> also moves embeddings to the GPU
# pretrain the encoder
for epoch in range(NUM_EPOCHS):
# compute the loss
optimizer.zero_grad()
current_loss = loss(data)
print("EPOCH", epoch + 1, ": LOSS =", current_loss.item())
# update the model
current_loss.backward()
optimizer.step()
if __name__ == "__main__":
main()
| [
"torch.LongTensor"
] | 0.4.1 | phohenecker/pytorch-transformer | 211406d82ac04a7b473bcdebda77cc3c2e9af0cf |
1.8 | from __future__ import division
import re
from collections import OrderedDict, defaultdict
from functools import partial
try:
import apex
except:
print("No APEX!")
import numpy as np
import torch
from det3d.builder import _create_learning_rate_scheduler
# from det3d.datasets.kitti.eval_hooks import KittiDistEvalmAPHook, KittiEvalmAPHookV2
from det3d.core import DistOptimizerHook
from det3d.datasets import DATASETS, build_dataloader
from det3d.solver.fastai_optim import OptimWrapper
from det3d.torchie.trainer import DistSamplerSeedHook, Trainer, obj_from_dict
from det3d.utils.print_utils import metric_to_str
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from .env import get_root_logger
def example_to_device(example, device=None, non_blocking=False) -> dict:
assert device is not None
example_torch = {}
float_names = ["voxels", "bev_map"]
for k, v in example.items():
if k in ["anchors", "anchors_mask", "reg_targets", "reg_weights", "labels"]:
example_torch[k] = [res.to(device, non_blocking=non_blocking) for res in v]
elif k in [
"voxels",
"bev_map",
"coordinates",
"num_points",
"points",
"num_voxels",
"cyv_voxels",
"cyv_num_voxels",
"cyv_coordinates",
"cyv_num_points"
]:
example_torch[k] = v.to(device, non_blocking=non_blocking)
elif k == "calib":
calib = {}
for k1, v1 in v.items():
# calib[k1] = torch.tensor(v1, dtype=dtype, device=device)
calib[k1] = torch.tensor(v1).to(device, non_blocking=non_blocking)
example_torch[k] = calib
else:
example_torch[k] = v
return example_torch
def parse_losses(losses):
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError("{} is not a tensor or list of tensors".format(loss_name))
loss = sum(_value for _key, _value in log_vars.items() if "loss" in _key)
log_vars["loss"] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
def parse_second_losses(losses):
log_vars = OrderedDict()
loss = sum(losses["loss"])
for loss_name, loss_value in losses.items():
if loss_name == "loc_loss_elem":
log_vars[loss_name] = [[i.item() for i in j] for j in loss_value]
else:
log_vars[loss_name] = [i.item() for i in loss_value]
return loss, log_vars
def batch_processor(model, data, train_mode, **kwargs):
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
# data = example_convert_to_torch(data, device=device)
example = example_to_device(data, device, non_blocking=False)
del data
if train_mode:
losses = model(example, return_loss=True)
loss, log_vars = parse_second_losses(losses)
outputs = dict(
loss=loss, log_vars=log_vars, num_samples=len(example["anchors"][0])
)
return outputs
else:
return model(example, return_loss=False)
def batch_processor_ensemble(model1, model2, data, train_mode, **kwargs):
assert 0, 'deprecated'
if "local_rank" in kwargs:
device = torch.device(kwargs["local_rank"])
else:
device = None
assert train_mode is False
example = example_to_device(data, device, non_blocking=False)
del data
preds_dicts1 = model1.pred_hm(example)
preds_dicts2 = model2.pred_hm(example)
num_task = len(preds_dicts1)
merge_list = []
# take the average
for task_id in range(num_task):
preds_dict1 = preds_dicts1[task_id]
preds_dict2 = preds_dicts2[task_id]
for key in preds_dict1.keys():
preds_dict1[key] = (preds_dict1[key] + preds_dict2[key]) / 2
merge_list.append(preds_dict1)
# now get the final prediciton
return model1.pred_result(example, merge_list)
def flatten_model(m):
return sum(map(flatten_model, m.children()), []) if len(list(m.children())) else [m]
def get_layer_groups(m):
return [nn.Sequential(*flatten_model(m))]
def build_one_cycle_optimizer(model, optimizer_config):
if optimizer_config.fixed_wd:
optimizer_func = partial(
torch.optim.Adam, betas=(0.9, 0.99), amsgrad=optimizer_config.amsgrad
)
else:
optimizer_func = partial(torch.optim.Adam, amsgrad=optimizer_cfg.amsgrad)
optimizer = OptimWrapper.create(
optimizer_func,
3e-3, # TODO: CHECKING LR HERE !!!
get_layer_groups(model),
wd=optimizer_config.wd,
true_wd=optimizer_config.fixed_wd,
bn_wd=True,
)
return optimizer
def build_optimizer(model, optimizer_cfg):
"""Build optimizer from configs.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
optimizer_cfg (dict): The config dict of the optimizer.
Positional fields are:
- type: class name of the optimizer.
- lr: base learning rate.
Optional fields are:
- any arguments of the corresponding optimizer type, e.g.,
weight_decay, momentum, etc.
- paramwise_options: a dict with 3 accepted fileds
(bias_lr_mult, bias_decay_mult, norm_decay_mult).
`bias_lr_mult` and `bias_decay_mult` will be multiplied to
the lr and weight decay respectively for all bias parameters
(except for the normalization layers), and
`norm_decay_mult` will be multiplied to the weight decay
for all weight and bias parameters of normalization layers.
Returns:
torch.optim.Optimizer: The initialized optimizer.
"""
if hasattr(model, "module"):
model = model.module
optimizer_cfg = optimizer_cfg.copy()
paramwise_options = optimizer_cfg.pop("paramwise_options", None)
# if no paramwise option is specified, just use the global setting
if paramwise_options is None:
return obj_from_dict(
optimizer_cfg, torch.optim, dict(params=model.parameters())
)
else:
assert isinstance(paramwise_options, dict)
# get base lr and weight decay
base_lr = optimizer_cfg["lr"]
base_wd = optimizer_cfg.get("weight_decay", None)
# weight_decay must be explicitly specified if mult is specified
if (
"bias_decay_mult" in paramwise_options
or "norm_decay_mult" in paramwise_options
):
assert base_wd is not None
# get param-wise options
bias_lr_mult = paramwise_options.get("bias_lr_mult", 1.0)
bias_decay_mult = paramwise_options.get("bias_decay_mult", 1.0)
norm_decay_mult = paramwise_options.get("norm_decay_mult", 1.0)
# set param-wise lr and weight decay
params = []
for name, param in model.named_parameters():
param_group = {"params": [param]}
if not param.requires_grad:
# FP16 training needs to copy gradient/weight between master
# weight copy and model weight, it is convenient to keep all
# parameters here to align with model.parameters()
params.append(param_group)
continue
# for norm layers, overwrite the weight decay of weight and bias
# TODO: obtain the norm layer prefixes dynamically
if re.search(r"(bn|gn)(\d+)?.(weight|bias)", name):
if base_wd is not None:
param_group["weight_decay"] = base_wd * norm_decay_mult
# for other layers, overwrite both lr and weight decay of bias
elif name.endswith(".bias"):
param_group["lr"] = base_lr * bias_lr_mult
if base_wd is not None:
param_group["weight_decay"] = base_wd * bias_decay_mult
# otherwise use the global settings
params.append(param_group)
optimizer_cls = getattr(torch.optim, optimizer_cfg.pop("type"))
return optimizer_cls(params, **optimizer_cfg)
def train_detector(model, dataset, cfg, distributed=False, validate=False, logger=None,tb_writer=None):
if logger is None:
logger = get_root_logger(cfg.log_level)
mix_prec = cfg.data.mix_prec
print("==============================Mix Precision ? {}====================================".format(mix_prec))
# start training
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, dist=distributed
)
for ds in dataset
]
total_steps = cfg.total_epochs * len(data_loaders[0])
print(f"total_steps: {total_steps}")
if distributed:
model = apex.parallel.convert_syncbn_model(model)
if cfg.lr_config.type == "one_cycle":
# build trainer
optimizer = build_one_cycle_optimizer(model, cfg.optimizer)
lr_scheduler = _create_learning_rate_scheduler(
optimizer, cfg.lr_config, total_steps
)
cfg.lr_config = None
else:
optimizer = build_optimizer(model, cfg.optimizer)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=cfg.drop_step, gamma=.1)
# lr_scheduler = None
cfg.lr_config = None
# put model on gpus
if distributed:
model = DistributedDataParallel(
model.cuda(cfg.local_rank),
device_ids=[cfg.local_rank],
output_device=cfg.local_rank,
# broadcast_buffers=False,
find_unused_parameters=True,
)
else:
model = model.cuda()
logger.info(f"model structure: {model}")
trainer = Trainer(
model, batch_processor, optimizer, lr_scheduler, cfg.work_dir, cfg.log_level,mix_prec = mix_prec,tb_writer=tb_writer,
)
if distributed:
optimizer_config = DistOptimizerHook(**cfg.optimizer_config)
else:
optimizer_config = cfg.optimizer_config
# register hooks
trainer.register_training_hooks(
cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config
)
if distributed:
trainer.register_hook(DistSamplerSeedHook())
# # register eval hooks
# if validate:
# val_dataset_cfg = cfg.data.val
# eval_cfg = cfg.get('evaluation', {})
# dataset_type = DATASETS.get(val_dataset_cfg.type)
# trainer.register_hook(
# KittiEvalmAPHookV2(val_dataset_cfg, **eval_cfg))
if cfg.resume_from:
trainer.resume(cfg.resume_from,resume_optimizer=cfg.resume_optimizer)
elif cfg.load_from:
trainer.load_checkpoint(cfg.load_from)
trainer.run(data_loaders, cfg.workflow, cfg.total_epochs, local_rank=cfg.local_rank)
| [
"torch.device",
"torch.tensor",
"torch.optim.lr_scheduler.MultiStepLR"
] | 1.8.0 | yukke42/CenterPointTensorRT | c06ec5da881b4f44f22f9e4b67bebbd35b7d1ed3 |
1.8 | # ------------------------------------------------------------------------------
# Portions of this code are from
# det3d (https://github.com/poodarchu/Det3D/tree/56402d4761a5b73acd23080f537599b0888cce07)
# Copyright (c) 2019 朱本金
# Licensed under the MIT License
# ------------------------------------------------------------------------------
import logging
from collections import defaultdict
from det3d.core import box_torch_ops
import torch
from det3d.torchie.cnn import kaiming_init
from torch import double, nn
from det3d.models.losses.centernet_loss import FastFocalLoss, RegLoss, BinRotLoss
from det3d.models.utils import Sequential, get_binrot_alpha, get_binrot_target
from ..registry import HEADS
from . import SepHead,DCNSepHead
import copy
try:
from det3d.ops.dcn import DeformConv
except:
print("Deformable Convolution not built!")
from det3d.core.utils.circle_nms_jit import circle_nms
import numpy as np
def _circle_nms(boxes, min_radius, post_max_size=83):
"""
NMS according to center distance
"""
keep = np.array(circle_nms(boxes.cpu().numpy(), thresh=min_radius))[:post_max_size]
keep = torch.from_numpy(keep).long().to(boxes.device)
return keep
class FeatureAdaption(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAdaption, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
in_channels, deformable_groups * offset_channels, 1, bias=True)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
def forward(self, x,):
offset = self.conv_offset(x)
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class CenterHeadBinRot(nn.Module):
def __init__(
self,
in_channels=[128,],
tasks=[],
dataset='nuscenes',
weight=0.25,
code_weights=[],
common_heads=dict(),
logger=None,
init_bias=-2.19,
share_conv_channel=64,
num_hm_conv=2,
dcn_head=False,
is_bin_rot = False,
is_iou_aux = False,
):
super(CenterHeadBinRot, self).__init__()
# tasks = [ dict(num_class=3, class_names=['VEHICLE', 'PEDESTRIAN', 'CYCLIST']), ]
# TODO num_classes : [3]
num_classes = [len(t["class_names"]) for t in tasks]
# TODO class_names : ['VEHICLE', 'PEDESTRIAN', 'CYCLIST']
self.class_names = [t["class_names"] for t in tasks]
self.code_weights = code_weights
self.weight = weight # weight between hm loss and loc loss
self.dataset = dataset
self.in_channels = in_channels
self.num_classes = num_classes
self.is_bin_rot = is_bin_rot
self.is_iou_aux = is_iou_aux
self.crit = FastFocalLoss()
self.crit_reg = RegLoss()
self.crit_rot = BinRotLoss()
if is_bin_rot :
assert (common_heads['rot'][0] == 8, "Binrot head need to set 8 channels !")
# TODO common_heads={'reg': (2, 2), 'height': (1, 2), 'dim':(3, 2), 'rot':(8, 2)}
# TODO box_n_dim = 7
self.box_n_dim = 9 if 'vel' in common_heads else 7
self.use_direction_classifier = False
if not logger:
logger = logging.getLogger("CenterHeadBinRot")
self.logger = logger
logger.info(
f"num_classes: {num_classes}"
)
# a shared convolution
self.shared_conv = nn.Sequential(
nn.Conv2d(in_channels, share_conv_channel,
kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(share_conv_channel),
nn.ReLU(inplace=True)
)
self.tasks = nn.ModuleList()
print("Use HM Bias: ", init_bias)
if dcn_head:
print("Use Deformable Convolution in the CenterHead!")
for num_cls in num_classes:
heads = copy.deepcopy(common_heads)
if not dcn_head:
# num_cls = 3, num_hm_conv = 2
heads.update(dict(hm=(num_cls, num_hm_conv)))
# share_conv_channel = 64 , init_bias = -2.19
self.tasks.append(
SepHead(share_conv_channel, heads, bn=True, init_bias=init_bias, final_kernel=3))
else:
self.tasks.append(
DCNSepHead(share_conv_channel, num_cls, heads, bn=True, init_bias=init_bias, final_kernel=3))
logger.info("Finish CenterHead Initialization")
def forward(self, x, *kwargs):
ret_dicts = []
x = self.shared_conv(x)
for task in self.tasks:
ret_dicts.append(task(x))
return ret_dicts
def _sigmoid(self, x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _compute_binrot_loc_loss(self,preds_dict,target_box,example,task_id):
# Regression loss (SmoothL1 Loss ) for dimension, offset, height, rotation
box_loss = self.crit_reg(preds_dict['anno_box'][:,:6],\
example['mask'][task_id], example['ind'][task_id], \
target_box[...,:-2])
# Specially for bin rot loss
target_bin, target_res = get_binrot_target(target_box[...,-2],target_box[...,-1])
rot_loss = self.crit_rot(preds_dict['anno_box'][:,6:14],\
example['mask'][task_id], example['ind'][task_id],
target_bin,target_res)
box_loss *= box_loss.new_tensor(self.code_weights[:6])
rot_loss = rot_loss * self.code_weights[6]
# loc_loss = (box_loss*box_loss.new_tensor(self.code_weights[:-8])).sum()
loc_loss = box_loss.sum() + rot_loss
return box_loss, rot_loss, loc_loss
# TODO : For Training
def loss(self, example, preds_dicts, **kwargs):
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
# heatmap focal loss
# heads = {'reg': (2, 2), 'height': (1, 2), 'dim':(3, 2), 'rot':(2, 8), 'hm' : (3,2)}
preds_dict['hm'] = self._sigmoid(preds_dict['hm'])
# TODO : FastFocalLoss ,defined in CornerNet, see in file models/losses/centernet_loss.py FastFocalLoss
hm_loss = self.crit(preds_dict['hm'], example['hm'][task_id], example['ind'][task_id], example['mask'][task_id], example['cat'][task_id])
target_box = example['anno_box'][task_id]
# reconstruct the anno_box from multiple reg heads
if 'vel' in preds_dict:
preds_dict['anno_box'] = torch.cat((preds_dict['reg'], preds_dict['height'], preds_dict['dim'],
preds_dict['vel'], preds_dict['rot']), dim=1)
else:
preds_dict['anno_box'] = torch.cat((preds_dict['reg'], preds_dict['height'], preds_dict['dim'],
preds_dict['rot']), dim=1)
target_box = target_box[..., [0, 1, 2, 3, 4, 5, -2,-1]] # remove vel target
ret = {}
if self.is_bin_rot :
box_loss, rot_loss, loc_loss = self._compute_binrot_loc_loss(preds_dict,target_box,example,task_id)
else :
box_loss = self.crit_reg(preds_dict['anno_box'], example['mask'][task_id], example['ind'][task_id], target_box)
loc_loss = (box_loss*box_loss.new_tensor(self.code_weights)).sum()
rot_loss = box_loss[6:8]
loss = hm_loss + self.weight*loc_loss
ret.update({'loss': loss, 'hm_loss': hm_loss.detach().cpu(), 'loc_loss':loc_loss, 'loc_loss_elem': box_loss.detach().cpu()[:6],\
'rot_loss' : rot_loss.detach().cpu(), 'num_positive': example['mask'][task_id].float().sum()})
rets.append(ret)
"""convert batch-key to key-batch
"""
rets_merged = defaultdict(list)
for ret in rets:
for k, v in ret.items():
rets_merged[k].append(v)
return rets_merged
# TODO : For Inference
@torch.no_grad()
def predict(self, example, preds_dicts, test_cfg, **kwargs):
"""decode, nms, then return the detection result. Additionaly support double flip testing
"""
# get loss info
rets = []
metas = []
double_flip = test_cfg.get('double_flip', False)
post_center_range = test_cfg.post_center_limit_range
if len(post_center_range) > 0:
post_center_range = torch.tensor(
post_center_range,
dtype=preds_dicts[0]['hm'].dtype,
device=preds_dicts[0]['hm'].device,
)
for task_id, preds_dict in enumerate(preds_dicts):
# convert N C H W to N H W C
for key, val in preds_dict.items():
preds_dict[key] = val.permute(0, 2, 3, 1).contiguous()
batch_size = preds_dict['hm'].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size
else:
meta_list = example["metadata"]
batch_hm = torch.sigmoid(preds_dict['hm'])
batch_dim = torch.exp(preds_dict['dim'])
if self.is_bin_rot:
batch_rot = get_binrot_alpha(preds_dict['rot'])
else:
batch_rots = preds_dict['rot'][..., 0:1]
batch_rotc = preds_dict['rot'][..., 1:2]
batch_rot = torch.atan2(batch_rots, batch_rotc)
batch_reg = preds_dict['reg']
batch_hei = preds_dict['height']
batch, H, W, num_cls = batch_hm.size()
batch_reg = batch_reg.reshape(batch, H*W, 2)
batch_hei = batch_hei.reshape(batch, H*W, 1)
batch_rot = batch_rot.reshape(batch, H*W, 1)
batch_dim = batch_dim.reshape(batch, H*W, 3)
batch_hm = batch_hm.reshape(batch, H*W, num_cls)
ys, xs = torch.meshgrid([torch.arange(0, H), torch.arange(0, W)])
ys = ys.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)
xs = xs.view(1, H, W).repeat(batch, 1, 1).to(batch_hm)
xs = xs.view(batch, -1, 1) + batch_reg[:, :, 0:1]
ys = ys.view(batch, -1, 1) + batch_reg[:, :, 1:2]
xs = xs * test_cfg.out_size_factor * test_cfg.voxel_size[0] + test_cfg.pc_range[0]
ys = ys * test_cfg.out_size_factor * test_cfg.voxel_size[1] + test_cfg.pc_range[1]
if 'vel' in preds_dict:
batch_vel = preds_dict['vel']
batch_vel = batch_vel.reshape(batch, H*W, 2)
batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_vel, batch_rot], dim=2)
else:
batch_box_preds = torch.cat([xs, ys, batch_hei, batch_dim, batch_rot], dim=2)
metas.append(meta_list)
if test_cfg.get('per_class_nms', False):
pass # TODO TODO TODO : NEED TO ADD HERE CLS_SPECIFIC NMS
else:
rets.append(self.post_processing(batch_box_preds, batch_hm, test_cfg, post_center_range, task_id))
# Merge branches results
ret_list = []
num_samples = len(rets[0])
ret_list = []
for i in range(num_samples):
ret = {}
for k in rets[0][i].keys():
if k in ["label_preds"]:
flag = 0
for j, num_class in enumerate(self.num_classes):
rets[j][i][k] += flag
flag += num_class
ret[k] = torch.cat([ret[i][k] for ret in rets])
else:
ret[k] = torch.cat([ret[i][k] for ret in rets])
ret['metadata'] = metas[0][i]
ret_list.append(ret)
return ret_list
@torch.no_grad()
def post_processing(self, batch_box_preds, batch_hm, test_cfg, post_center_range, task_id):
batch_size = len(batch_hm)
prediction_dicts = []
for i in range(batch_size):
box_preds = batch_box_preds[i]
# batch_hm : (batch, H*W, 3 )
hm_preds = batch_hm[i]
scores, labels = torch.max(hm_preds, dim=-1)
score_mask = scores > test_cfg.score_threshold
distance_mask = (box_preds[..., :3] >= post_center_range[:3]).all(1) \
& (box_preds[..., :3] <= post_center_range[3:]).all(1)
mask = distance_mask & score_mask
box_preds = box_preds[mask]
scores = scores[mask]
labels = labels[mask]
boxes_for_nms = box_preds[:, [0, 1, 2, 3, 4, 5, -1]]
if test_cfg.get('circular_nms', False):
centers = boxes_for_nms[:, [0, 1]]
boxes = torch.cat([centers, scores.view(-1, 1)], dim=1)
selected = _circle_nms(boxes, min_radius=test_cfg.min_radius[task_id], post_max_size=test_cfg.nms.nms_post_max_size)
else:
selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms.float(), scores.float(),
thresh=test_cfg.nms.nms_iou_threshold,
pre_maxsize=test_cfg.nms.nms_pre_max_size,
post_max_size=test_cfg.nms.nms_post_max_size)
selected_boxes = box_preds[selected]
selected_scores = scores[selected]
selected_labels = labels[selected]
prediction_dict = {
'box3d_lidar': selected_boxes,
'scores': selected_scores,
'label_preds': selected_labels,
}
prediction_dicts.append(prediction_dict)
return prediction_dicts
| [
"torch.sigmoid",
"torch.cat",
"torch.nn.ModuleList",
"torch.max",
"torch.arange",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tensor",
"torch.atan2",
"torch.exp"
] | 1.8.0 | yukke42/CenterPointTensorRT | c06ec5da881b4f44f22f9e4b67bebbd35b7d1ed3 |
1.7 | import argparse
import glob
import logging
import os
import random
import timeit
import numpy as np
import torch
from transformers import (MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME,
AdamW, AutoConfig, AutoModelForQuestionAnswering,
AutoTokenizer, get_linear_schedule_with_warmup,
squad_convert_examples_to_features)
from evaluate import do_eval
from training import do_train
from utils import set_seed
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def setting(args):
if args.doc_stride >= args.max_seq_length - args.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s\n",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\n",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
args.model_type = args.model_type.lower()
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = AutoModelForQuestionAnswering.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s\n", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
return args, tokenizer, model
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_TYPES),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
help="The input data dir. Should contain the .json files for the task."
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--train_file",
default=None,
type=str,
help="The input training file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--predict_file",
default=None,
type=str,
help="The input evaluation file. If a data dir is specified, will look for the file there"
+ "If no data dir or train/predict files are specified, will run with tensorflow_datasets.",
)
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.",
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument(
"--max_query_length",
default=64,
type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.",
)
parser.add_argument("--do_train", action="store_true",
help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true",
help="Whether to run eval on the dev set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8,
type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument("--learning_rate", default=5e-5,
type=float, help="The initial learning rate for Adam.")
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--weight_decay", default=0.0,
type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8,
type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0,
type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.",
)
parser.add_argument(
"--verbose_logging",
action="store_true",
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.",
)
parser.add_argument(
"--lang_id",
default=0,
type=int,
help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)",
)
parser.add_argument("--logging_steps", type=int,
default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500,
help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true",
help="Whether not to use CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--server_ip", type=str, default="",
help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="",
help="Can be used for distant debugging.")
parser.add_argument("--threads", type=int, default=1,
help="multiple threads for converting example to features")
args = parser.parse_args()
# ===========================================================================
args, tokenizer, model = setting(args)
# Training
do_train(args, model, tokenizer)
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
do_eval(args, tokenizer)
if __name__ == "__main__":
main()
| [
"torch.device",
"torch.distributed.init_process_group",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.distributed.barrier"
] | 1.7.0 | jojowither/Question-Answer-Project | f44ca52acc784e13295cb977cedb513854fac814 |
1.10 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from datetime import date
import logging
import numpy as np
import os
# A hack to get OpenMM and PyTorch to peacefully coexist
os.environ["OPENMM_DEFAULT_PLATFORM"] = "OpenCL"
import pickle
import random
import sys
import time
import torch
from openfold.config import model_config
from openfold.data import templates, feature_pipeline, data_pipeline
from openfold.model.model import AlphaFold
from openfold.model.primitives import Attention, GlobalAttention
from openfold.np import residue_constants, protein
import openfold.np.relax.relax as relax
from openfold.utils.import_weights import (
import_jax_weights_,
)
from openfold.utils.torchscript_utils import script_submodules_
from openfold.utils.tensor_utils import (
tensor_tree_map,
)
from scripts.utils import add_data_args
def script_primitives_(model):
script_submodules_(model, [Attention, GlobalAttention])
def main(args):
config = model_config(args.model_name)
model = AlphaFold(config)
model = model.eval()
import_jax_weights_(model, args.param_path)
script_primitives_(model)
model = model.to(args.model_device)
template_featurizer = templates.TemplateHitFeaturizer(
mmcif_dir=args.template_mmcif_dir,
max_template_date=args.max_template_date,
max_hits=config.data.predict.max_templates,
kalign_binary_path=args.kalign_binary_path,
release_dates_path=None,
obsolete_pdbs_path=args.obsolete_pdbs_path
)
use_small_bfd=(args.bfd_database_path is None)
data_processor = data_pipeline.DataPipeline(
template_featurizer=template_featurizer,
)
output_dir_base = args.output_dir
random_seed = args.data_random_seed
if random_seed is None:
random_seed = random.randrange(sys.maxsize)
feature_processor = feature_pipeline.FeaturePipeline(config.data)
if not os.path.exists(output_dir_base):
os.makedirs(output_dir_base)
if(args.use_precomputed_alignments is None):
alignment_dir = os.path.join(output_dir_base, "alignments")
else:
alignment_dir = args.use_precomputed_alignments
# Gather input sequences
with open(args.fasta_path, "r") as fp:
lines = [l.strip() for l in fp.readlines()]
tags, seqs = lines[::2], lines[1::2]
tags = [l[1:] for l in tags]
for tag, seq in zip(tags, seqs):
fasta_path = os.path.join(args.output_dir, "tmp.fasta")
with open(fasta_path, "w") as fp:
fp.write(f">{tag}\n{seq}")
logging.info("Generating features...")
local_alignment_dir = os.path.join(alignment_dir, tag)
if(args.use_precomputed_alignments is None):
if not os.path.exists(local_alignment_dir):
os.makedirs(local_alignment_dir)
alignment_runner = data_pipeline.AlignmentRunner(
jackhmmer_binary_path=args.jackhmmer_binary_path,
hhblits_binary_path=args.hhblits_binary_path,
hhsearch_binary_path=args.hhsearch_binary_path,
uniref90_database_path=args.uniref90_database_path,
mgnify_database_path=args.mgnify_database_path,
bfd_database_path=args.bfd_database_path,
uniclust30_database_path=args.uniclust30_database_path,
small_bfd_database_path=args.small_bfd_database_path,
pdb70_database_path=args.pdb70_database_path,
use_small_bfd=use_small_bfd,
no_cpus=args.cpus,
)
alignment_runner.run(
fasta_path, local_alignment_dir
)
feature_dict = data_processor.process_fasta(
fasta_path=fasta_path, alignment_dir=local_alignment_dir
)
# Remove temporary FASTA file
os.remove(fasta_path)
processed_feature_dict = feature_processor.process_features(
feature_dict, mode='predict',
)
logging.info("Executing model...")
batch = processed_feature_dict
with torch.no_grad():
batch = {
k:torch.as_tensor(v, device=args.model_device)
for k,v in batch.items()
}
t = time.time()
out = model(batch)
logging.info(f"Inference time: {time.time() - t}")
# Toss out the recycling dimensions --- we don't need them anymore
batch = tensor_tree_map(lambda x: np.array(x[..., -1].cpu()), batch)
out = tensor_tree_map(lambda x: np.array(x.cpu()), out)
plddt = out["plddt"]
mean_plddt = np.mean(plddt)
plddt_b_factors = np.repeat(
plddt[..., None], residue_constants.atom_type_num, axis=-1
)
unrelaxed_protein = protein.from_prediction(
features=batch,
result=out,
b_factors=plddt_b_factors
)
amber_relaxer = relax.AmberRelaxation(
**config.relax
)
# Relax the prediction.
t = time.time()
relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)
logging.info(f"Relaxation time: {time.time() - t}")
# Save the relaxed PDB.
relaxed_output_path = os.path.join(
args.output_dir, f'{tag}_{args.model_name}.pdb'
)
with open(relaxed_output_path, 'w') as f:
f.write(relaxed_pdb_str)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"fasta_path", type=str,
)
add_data_args(parser)
parser.add_argument(
"--use_precomputed_alignments", type=str, default=None,
help="""Path to alignment directory. If provided, alignment computation
is skipped and database path arguments are ignored."""
)
parser.add_argument(
"--output_dir", type=str, default=os.getcwd(),
help="""Name of the directory in which to output the prediction""",
required=True
)
parser.add_argument(
"--model_device", type=str, default="cpu",
help="""Name of the device on which to run the model. Any valid torch
device name is accepted (e.g. "cpu", "cuda:0")"""
)
parser.add_argument(
"--model_name", type=str, default="model_1",
help="""Name of a model config. Choose one of model_{1-5} or
model_{1-5}_ptm, as defined on the AlphaFold GitHub."""
)
parser.add_argument(
"--param_path", type=str, default=None,
help="""Path to model parameters. If None, parameters are selected
automatically according to the model name from
openfold/resources/params"""
)
parser.add_argument(
"--cpus", type=int, default=4,
help="""Number of CPUs with which to run alignment tools"""
)
parser.add_argument(
'--preset', type=str, default='full_dbs',
choices=('reduced_dbs', 'full_dbs')
)
parser.add_argument(
'--data_random_seed', type=str, default=None
)
args = parser.parse_args()
if(args.param_path is None):
args.param_path = os.path.join(
"openfold", "resources", "params",
"params_" + args.model_name + ".npz"
)
if(args.model_device == "cpu" and torch.cuda.is_available()):
logging.warning(
"""The model is being run on CPU. Consider specifying
--model_device for better performance"""
)
if(args.bfd_database_path is None and
args.small_bfd_database_path is None):
raise ValueError(
"At least one of --bfd_database_path or --small_bfd_database_path"
"must be specified"
)
main(args)
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.as_tensor"
] | 1.10.0 | cclauss/openfold | a933bc7479a13e4fcb95f7c7d7ffb9a6b55b0d4f |
1.4 | from easydict import EasyDict
import torch
from functools import partial
from core.envs import SimpleCarlaEnv
from core.policy import CILRSPolicy
from core.eval import CarlaBenchmarkEvaluator
from core.utils.others.tcp_helper import parse_carla_tcp
from ding.utils import set_pkg_seed, deep_merge_dicts
from ding.envs import AsyncSubprocessEnvManager
from demo.cilrs.cilrs_env_wrapper import CILRSEnvWrapper
cilrs_config = dict(
env=dict(
env_num=5,
simulator=dict(
town='Town01',
disable_two_wheels=True,
verbose=False,
planner=dict(
type='behavior',
resolution=1,
),
obs=(
dict(
name='rgb',
type='rgb',
size=[400, 300],
position=[1.3, 0.0, 2.3],
fov=100,
),
),
),
wrapper=dict(),
col_is_failure=True,
stuck_is_failure=True,
manager=dict(
auto_reset=False,
shared_memory=False,
context='spawn',
max_retry=1,
),
),
server=[dict(carla_host='localhost', carla_ports=[9000, 9010, 2])],
policy=dict(
ckpt_path=None,
model=dict(
num_branch=4,
pretrained=False,
),
eval=dict(
evaluator=dict(
suite=['FullTown01-v1'],
transform_obs=True,
),
)
),
)
main_config = EasyDict(cilrs_config)
def wrapped_env(env_cfg, host, port, tm_port=None):
return CILRSEnvWrapper(SimpleCarlaEnv(env_cfg, host, port))
def main(cfg, seed=0):
cfg.env.manager = deep_merge_dicts(AsyncSubprocessEnvManager.default_config(), cfg.env.manager)
tcp_list = parse_carla_tcp(cfg.server)
env_num = cfg.env.env_num
assert len(tcp_list) >= env_num, \
"Carla server not enough! Need {} servers but only found {}.".format(env_num, len(tcp_list))
carla_env = AsyncSubprocessEnvManager(
env_fn=[partial(wrapped_env, cfg.env, *tcp_list[i]) for i in range(env_num)],
cfg=cfg.env.manager,
)
carla_env.seed(seed)
set_pkg_seed(seed)
cilrs_policy = CILRSPolicy(cfg.policy, ['eval']).eval_mode
if cfg.policy.ckpt_path is not None:
state_dict = torch.load(cfg.policy.ckpt_path)
cilrs_policy.load_state_dict(state_dict)
evaluator = CarlaBenchmarkEvaluator(cfg.policy.eval.evaluator, carla_env, cilrs_policy)
success_rate = evaluator.eval()
evaluator.close()
if __name__ == '__main__':
main(main_config)
| [
"torch.load"
] | 1.4 | L-Net-1992/DI-drive | cc7f47bedbf60922acbcf3a5f77fc8e274df62cf |
1.4 | # third party
import pytest
import torch as th
# syft absolute
import syft as sy
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_searchable(with_verify_key: bool) -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
assert len(client.store) == 0
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
@pytest.mark.parametrize("with_verify_key", [True, False])
def test_make_unsearchable(with_verify_key: bool) -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
if with_verify_key:
ptr.update_searchability(target_verify_key=client.verify_key)
else:
ptr.update_searchability()
assert len(client.store) == 1
if with_verify_key:
ptr.update_searchability(searchable=False, target_verify_key=client.verify_key)
else:
ptr.update_searchability(searchable=False)
assert len(client.store) == 0
def test_searchable_property() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
client = bob.get_client()
ten = th.tensor([1, 2])
ptr = ten.send(root_client)
assert len(client.store) == 0
ptr.searchable = False
assert len(client.store) == 0
ptr.searchable = True
assert len(client.store) == 1
ptr.searchable = True
assert len(client.store) == 1
ptr.searchable = False
assert len(client.store) == 0
def test_tags() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
ten = th.tensor([1, 2])
ten = ten.tag("tag1", "tag1", "other")
assert ten.tags == ["tag1", "other"]
# .send without `tags` passed in
ptr = ten.send(root_client)
assert ptr.tags == ["tag1", "other"]
# .send with `tags` passed in
ptr = ten.send(root_client, tags=["tag2", "tag2", "other"])
assert ten.tags == ["tag2", "other"]
assert ptr.tags == ["tag2", "other"]
def test_description() -> None:
bob = sy.VirtualMachine(name="Bob")
root_client = bob.get_root_client()
ten = th.tensor([1, 2])
ten = ten.describe("description 1")
assert ten.description == "description 1"
# .send without `description` passed in
ptr = ten.send(root_client)
assert ptr.description == "description 1"
# .send with `description` passed in
ptr = ten.send(root_client, description="description 2")
assert ten.description == "description 2"
assert ptr.description == "description 2"
| [
"torch.tensor"
] | 1.4 | aeroaks/PySyft | 88220c38faf3cd72ddc63c73f3c0533695df53c9 |
1.10 | from argparse import ArgumentParser
from config_parser import get_config
from utils.loss import LabelSmoothingLoss
from utils.opt import get_optimizer
from utils.scheduler import WarmUpLR, get_scheduler
from utils.trainer import train
from utils.load_SHREC import get_loaders
from utils.misc import seed_everything, count_params, get_model
import torch
from torch import nn
import numpy as np
import wandb
import os
import yaml
import random
import time
def training_pipeline(config):
"""Initiates and executes all the steps involved with model training.
Args:
config (dict) - Dict containing various settings for the training run.
"""
config["exp"]["save_dir"] = os.path.join(config["exp"]["exp_dir"], config["exp"]["exp_name"])
os.makedirs(config["exp"]["save_dir"], exist_ok=True)
######################################
# save hyperparameters for current run
######################################
config_str = yaml.dump(config)
print("Using settings:\n", config_str)
with open(os.path.join(config["exp"]["save_dir"], "settings.txt"), "w+") as f:
f.write(config_str)
#####################################
# initialize training items
#####################################
# data
loaders = get_loaders(config)
# model
model = get_model(config["hparams"]["model"])
model = model.to(config["hparams"]["device"])
print(f"Created model with {count_params(model)} parameters.")
# loss
if config["hparams"]["l_smooth"]:
criterion = LabelSmoothingLoss(num_classes=config["hparams"]["model"]["num_classes"], smoothing=config["hparams"]["l_smooth"])
else:
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = get_optimizer(model, config["hparams"])
# lr scheduler
schedulers = {
"warmup": None,
"scheduler": None
}
if config["hparams"]["scheduler"]["n_warmup"]:
schedulers["warmup"] = WarmUpLR(optimizer, total_iters=len(loaders["train"]) * config["hparams"]["scheduler"]["n_warmup"])
if config["hparams"]["scheduler"]["scheduler_type"] is not None:
total_iters = len(loaders["train"]) * max(1, (config["hparams"]["n_epochs"] - config["hparams"]["scheduler"]["n_warmup"]))
schedulers["scheduler"] = get_scheduler(optimizer, config["hparams"]["scheduler"]["scheduler_type"], total_iters)
#####################################
# Training Run
#####################################
print("Initiating training.")
train(model, optimizer, criterion, loaders["train"], loaders["test"], schedulers, config)
def main(args):
config = get_config(args.conf)
seed_everything(config["hparams"]["seed"])
if config["exp"]["wandb"]:
if config["exp"]["wandb_api_key"] is not None:
with open(config["exp"]["wandb_api_key"], "r") as f:
os.environ["WANDB_API_KEY"] = f.read()
else:
wandb.login()
with wandb.init(project=config["exp"]["proj_name"], name=config["exp"]["exp_name"], config=config["hparams"]):
training_pipeline(config)
else:
training_pipeline(config)
if __name__ == "__main__":
parser = ArgumentParser("Driver code.")
parser.add_argument("--conf", type=str, required=True, help="Path to config.yaml file.")
args = parser.parse_args()
main(args) | [
"torch.nn.CrossEntropyLoss"
] | 1.10.0 | ID56/Multimodal-Fusion-CRNN | 1775ec0cb9d0878c2635860c291b343130296797 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.