Dataset Viewer
version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.6 | # -*- coding: utf-8 -*-
"""
@Time : 2021/1/14 下午5:34
@FileName: bert.py
@author: 王炳宁
@contact: [email protected]
"""
import sys
import time
import apex
import torch
import torch.distributed as dist
from apex import amp
sys.path.append('..')
from modules.BERT import Bert
from train.parser import get_argument_parser
from utils import *
np.random.seed(1000)
torch.manual_seed(1024)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args = get_argument_parser()
print(args.local_rank, dist.get_rank(), dist.get_world_size())
torch.cuda.set_device(args.local_rank)
vocab_size = 50000
n_embedding = 128
n_hidden = 768
n_layer = 12
n_head = 12
batch_size = 8
max_learning_rate = 4e-5
doc_max_length_size = 1024
train_data = load_file(args.train_file_path)
dev_data = load_file(args.dev_file_path)
dev_data = sorted(dev_data, key=lambda x: len(x[0]))
remove_data_size = len(dev_data) % dist.get_world_size()
thread_dev_data = [dev_data[x + args.local_rank] for x in
range(0, len(dev_data) - remove_data_size, dist.get_world_size())]
print('train data size is {} test size {}'.format(len(train_data), len(dev_data)))
model = Bert(vocab_size, n_embedding, n_hidden, n_layer, n_head)
filename = args.pretrain_model
state_dict = load_file(filename)
for name, para in model.named_parameters():
if name not in state_dict:
if dist.get_rank() == 0:
print('{} not load'.format(name))
continue
para.data = torch.FloatTensor(state_dict[name])
print('model size {}'.format(get_model_parameters(model)))
model.cuda()
if args.optimizer.lower() == 'adam':
optimizer = apex.optimizers.FusedLAMB
elif args.optimizer.lower() == 'lamb':
optimizer = apex.optimizers.FusedLAMB
else:
optimizer = apex.optimizers.FusedSGD
optim = optimizer(
model.parameters(),
eps=2.0e-7,
lr=1.0e-7,
)
model, optim = amp.initialize(model, optim, opt_level="O2", verbosity=0)
model = apex.parallel.DistributedDataParallel(model)
warm_up_steps = 500
lr_opt_steps = max_learning_rate / 1000000
warm_up_lr_opt_steps = max_learning_rate / warm_up_steps
def metric_sum(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item()
def metric_mean(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item() / dist.get_world_size()
def get_shuffle_train_data():
pool = {}
for one in train_data:
length = len(one[0]) // 5
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
whole_data = [x for y in length_lst for x in pool[y]]
remove_data_size = len(whole_data) % dist.get_world_size()
thread_data = [whole_data[x + args.local_rank] for x in
range(0, len(whole_data) - remove_data_size, dist.get_world_size())]
return thread_data
def get_train_data(batch, max_len=doc_max_length_size):
batch, _ = padding(batch, max_len=max_len)
seq = batch.flatten()
real_end_pos = np.where(seq == -1)[0]
np.put(seq, real_end_pos, vocab_size)
all_end_pos_seq = np.where(seq == vocab_size)[0]
label = np.zeros(shape=len(all_end_pos_seq), dtype='float32')
for i, j in enumerate(all_end_pos_seq):
if j in real_end_pos:
label[i] = 1
batch = seq.reshape(batch.shape)
return batch, label
current_number = 0
update_number = 0
def evaluation(epo):
results = []
for i in range(dist.get_world_size()):
results.extend(load_file('{}.tmp.obj'.format(i)))
os.remove('{}.tmp.obj'.format(i))
print('epoch:{},total:{}'.format(epo, len(results)))
threshold = 0.5
precision, recall, f1, macro_f1, accuracy = evaluate_comqa(results, threshold)
print('threshold:{}\nprecision:{}\nrecall:{}\nf1:{}\nmacro_f1:{}\naccuracy:{}\n{}'.format(
threshold, precision,
recall, f1,
macro_f1, accuracy,
'===' * 10))
return [precision, recall, macro_f1, f1, accuracy]
def dev(epo):
model.eval()
total = len(thread_dev_data)
results = []
with torch.no_grad():
for i in tqdm(range(0, total, batch_size)):
sample = thread_dev_data[i:i + batch_size]
context_raw = [x[0] for x in sample]
paras = [x[1] for x in sample]
batch, label = get_train_data(context_raw, 1024)
batch = torch.LongTensor(batch)
mask_idx = torch.eq(batch, vocab_size)
answer_logits = model([batch.cuda(), None])
end_num = mask_idx.sum(1).data.numpy().tolist()
answer_logits = answer_logits.cpu().data.numpy().tolist()
start = 0
for one_sent_end_num, para in zip(end_num, paras):
pred = answer_logits[start:start + one_sent_end_num]
results.append([pred, para])
start += one_sent_end_num
dump_file(results, '{}.tmp.obj'.format(dist.get_rank()))
dist.barrier()
if dist.get_rank() == 0:
return evaluation(epo)
return None
def train(epo):
global current_number, update_number
model.train()
data = get_shuffle_train_data()
total = len(data)
total_loss = 0
num = 0
pre_time = None
instance_number = 0
for i in range(0, total, batch_size):
context = [x[0] for x in data[i:i + batch_size]]
batch, label = get_train_data(context)
batch = torch.LongTensor(batch)
loss = model([batch.cuda(), torch.FloatTensor(label).cuda()])
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
total_loss += loss.item() * len(context)
instance_number += len(context)
optim.step()
optim.zero_grad()
update_number += 1
for param_group in optim.param_groups:
if update_number > warm_up_steps:
param_group['lr'] -= lr_opt_steps
else:
param_group['lr'] += warm_up_lr_opt_steps
num += 1
if num % args.log_interval == 0:
if pre_time is None:
eclipse = 0
else:
eclipse = time.time() - pre_time
total_loss = metric_sum(total_loss)
instance_number = metric_sum(instance_number)
if dist.get_rank() == 0:
print(
'epoch {}, mask loss is {:5.4f}, ms per batch is {:7.4f}, eclipse {:4.3f}% lr={:e}'.format(epo,
total_loss / instance_number,
1000 * eclipse / instance_number,
i * 100 / total,
optim.param_groups[
0][
'lr']))
pre_time = time.time()
total_loss = 0
instance_number = 0
if __name__ == '__main__':
results = []
best_f1 = 0
for i in range(args.epoch):
train(i)
results = dev(i)
output = {}
if dist.get_rank() == 0:
print('epoch {} done!! result is {}'.format(i, results))
if results[2] > best_f1:
best_f1 = results[2]
for name, param in model.module.named_parameters():
output[name] = param.data.cpu().numpy()
dump_file(output, args.model_save_path)
| [
"torch.distributed.get_world_size",
"torch.eq",
"torch.distributed.init_process_group",
"torch.FloatTensor",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.LongTensor",
"torch.tensor",
"torch.distributed.reduce",
"torch.distributed.get_rank",
"torch.distributed.barrier"
] | 1.6.0 | benywon/ComQA | 6731d63d16b731d6c3654b2dc7d2503cf333127f |
1.1 | import torch.nn as nn
from .gen_resblock import GenBlock
class Generator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), n_classes=0):
super(Generator, self).__init__()
self.bottom_width = args.bottom_width
self.activation = activation
self.n_classes = n_classes
self.ch = args.gf_dim
self.l1 = nn.Linear(args.latent_dim, (self.bottom_width ** 2) * self.ch)
self.block2 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block3 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.block4 = GenBlock(self.ch, self.ch, activation=activation, upsample=True, n_classes=n_classes)
self.b5 = nn.BatchNorm2d(self.ch)
self.c5 = nn.Conv2d(self.ch, 3, kernel_size=3, stride=1, padding=1)
def forward(self, z):
h = z
h = self.l1(h).view(-1, self.ch, self.bottom_width, self.bottom_width)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.b5(h)
h = self.activation(h)
h = nn.Tanh()(self.c5(h))
return h
"""Discriminator"""
def _downsample(x):
# Downsample (Mean Avg Pooling with 2x2 kernel)
return nn.AvgPool2d(kernel_size=2)(x)
class OptimizedDisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()):
super(OptimizedDisBlock, self).__init__()
self.activation = activation
self.c1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(out_channels, out_channels, kernel_size=ksize, padding=pad)
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
h = _downsample(h)
return h
def shortcut(self, x):
return self.c_sc(_downsample(x))
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class DisBlock(nn.Module):
def __init__(self, args, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1,
activation=nn.ReLU(), downsample=False):
super(DisBlock, self).__init__()
self.activation = activation
self.downsample = downsample
self.learnable_sc = (in_channels != out_channels) or downsample
hidden_channels = in_channels if hidden_channels is None else hidden_channels
self.c1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=ksize, padding=pad)
self.c2 = nn.Conv2d(hidden_channels, out_channels, kernel_size=ksize, padding=pad)
if args.d_spectral_norm:
self.c1 = nn.utils.spectral_norm(self.c1)
self.c2 = nn.utils.spectral_norm(self.c2)
if self.learnable_sc:
self.c_sc = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0)
if args.d_spectral_norm:
self.c_sc = nn.utils.spectral_norm(self.c_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def forward(self, x):
return self.residual(x) + self.shortcut(x)
class Discriminator(nn.Module):
def __init__(self, args, activation=nn.ReLU(), ssup=False):
super(Discriminator, self).__init__()
self.ch = args.df_dim
self.activation = activation
self.ssup = ssup
self.block1 = OptimizedDisBlock(args, 3, self.ch)
self.block2 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=True)
self.block3 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.block4 = DisBlock(args, self.ch, self.ch, activation=activation, downsample=False)
self.softmax = nn.Softmax()
if ssup:
self.fully_connect_rot = nn.Linear(self.ch, 4, bias=False)
self.fully_connect_gan = nn.Linear(self.ch, 1, bias=False)
if args.d_spectral_norm:
self.fully_connect_gan = nn.utils.spectral_norm(self.fully_connect_gan)
if ssup:
self.fully_connect_rot = nn.utils.spectral_norm(self.fully_connect_rot)
def forward(self, x):
h = x
h = self.block1(h)
h = self.block2(h)
h = self.block3(h)
h = self.block4(h)
h = self.activation(h)
# GAN logits
# Global average pooling
h = h.sum(2).sum(2)
gan_logits = self.fully_connect_gan(h)
rot_logits, rot_prob = -1, -1
if self.ssup:
rot_logits = self.fully_connect_rot(h)
rot_prob = self.softmax(rot_logits)
return gan_logits, rot_logits, rot_prob
| [
"torch.nn.Linear",
"torch.nn.Softmax",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.utils.spectral_norm"
] | 1.1.0 | sudarshanregmi/ICRGAN-and-SSGAN | c9e7b01d89cba19505e566892a678932717b8039 |
1.8 | from typing import Iterable, Optional, Sequence
import numpy as np
import torch
from torch.distributions import Categorical, Normal
from torch.distributions import kl_divergence as kl
from torch.nn import functional as F
from scvi import _CONSTANTS
from scvi._compat import Literal
from scvi.module.base import LossRecorder, auto_move_data
from scvi.nn import Decoder, Encoder
from ._classifier import Classifier
from ._utils import broadcast_labels
from ._vae import VAE
class SCANVAE(VAE):
"""
Single-cell annotation using variational inference.
This is an implementation of the scANVI model described in [Xu21]_,
inspired from M1 + M2 model, as described in (https://arxiv.org/pdf/1406.5298.pdf).
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
n_continuous_cov
Number of continuous covarites
n_cats_per_cov
Number of categories for each extra categorical covariate
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
gene_likelihood
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
y_prior
If None, initialized to uniform probability over cell types
labels_groups
Label group designations
use_labels_groups
Whether to use the label groups
use_batch_norm
Whether to use batch norm in layers
use_layer_norm
Whether to use layer norm in layers
**vae_kwargs
Keyword args for :class:`~scvi.module.VAE`
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
n_continuous_cov: int = 0,
n_cats_per_cov: Optional[Iterable[int]] = None,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
gene_likelihood: str = "zinb",
y_prior=None,
labels_groups: Sequence[int] = None,
use_labels_groups: bool = False,
classifier_parameters: dict = dict(),
use_batch_norm: Literal["encoder", "decoder", "none", "both"] = "both",
use_layer_norm: Literal["encoder", "decoder", "none", "both"] = "none",
**vae_kwargs
):
super().__init__(
n_input,
n_hidden=n_hidden,
n_latent=n_latent,
n_layers=n_layers,
n_continuous_cov=n_continuous_cov,
n_cats_per_cov=n_cats_per_cov,
dropout_rate=dropout_rate,
n_batch=n_batch,
dispersion=dispersion,
log_variational=log_variational,
gene_likelihood=gene_likelihood,
use_batch_norm=use_batch_norm,
use_layer_norm=use_layer_norm,
**vae_kwargs
)
use_batch_norm_encoder = use_batch_norm == "encoder" or use_batch_norm == "both"
use_batch_norm_decoder = use_batch_norm == "decoder" or use_batch_norm == "both"
use_layer_norm_encoder = use_layer_norm == "encoder" or use_layer_norm == "both"
use_layer_norm_decoder = use_layer_norm == "decoder" or use_layer_norm == "both"
self.n_labels = n_labels
# Classifier takes n_latent as input
cls_parameters = {
"n_layers": n_layers,
"n_hidden": n_hidden,
"dropout_rate": dropout_rate,
}
cls_parameters.update(classifier_parameters)
self.classifier = Classifier(
n_latent,
n_labels=n_labels,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
**cls_parameters
)
self.encoder_z2_z1 = Encoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
use_batch_norm=use_batch_norm_encoder,
use_layer_norm=use_layer_norm_encoder,
)
self.decoder_z1_z2 = Decoder(
n_latent,
n_latent,
n_cat_list=[self.n_labels],
n_layers=n_layers,
n_hidden=n_hidden,
use_batch_norm=use_batch_norm_decoder,
use_layer_norm=use_layer_norm_decoder,
)
self.y_prior = torch.nn.Parameter(
y_prior
if y_prior is not None
else (1 / n_labels) * torch.ones(1, n_labels),
requires_grad=False,
)
self.use_labels_groups = use_labels_groups
self.labels_groups = (
np.array(labels_groups) if labels_groups is not None else None
)
if self.use_labels_groups:
if labels_groups is None:
raise ValueError("Specify label groups")
unique_groups = np.unique(self.labels_groups)
self.n_groups = len(unique_groups)
if not (unique_groups == np.arange(self.n_groups)).all():
raise ValueError()
self.classifier_groups = Classifier(
n_latent, n_hidden, self.n_groups, n_layers, dropout_rate
)
self.groups_index = torch.nn.ParameterList(
[
torch.nn.Parameter(
torch.tensor(
(self.labels_groups == i).astype(np.uint8),
dtype=torch.uint8,
),
requires_grad=False,
)
for i in range(self.n_groups)
]
)
@auto_move_data
def classify(self, x, batch_index=None):
if self.log_variational:
x = torch.log(1 + x)
qz_m, _, z = self.z_encoder(x, batch_index)
# We classify using the inferred mean parameter of z_1 in the latent space
z = qz_m
if self.use_labels_groups:
w_g = self.classifier_groups(z)
unw_y = self.classifier(z)
w_y = torch.zeros_like(unw_y)
for i, group_index in enumerate(self.groups_index):
unw_y_g = unw_y[:, group_index]
w_y[:, group_index] = unw_y_g / (
unw_y_g.sum(dim=-1, keepdim=True) + 1e-8
)
w_y[:, group_index] *= w_g[:, [i]]
else:
w_y = self.classifier(z)
return w_y
@auto_move_data
def classification_loss(self, labelled_dataset):
x = labelled_dataset[_CONSTANTS.X_KEY]
y = labelled_dataset[_CONSTANTS.LABELS_KEY]
batch_idx = labelled_dataset[_CONSTANTS.BATCH_KEY]
classification_loss = F.cross_entropy(
self.classify(x, batch_idx), y.view(-1).long()
)
return classification_loss
def loss(
self,
tensors,
inference_outputs,
generative_ouputs,
feed_labels=False,
kl_weight=1,
labelled_tensors=None,
classification_ratio=None,
):
px_r = generative_ouputs["px_r"]
px_rate = generative_ouputs["px_rate"]
px_dropout = generative_ouputs["px_dropout"]
qz1_m = inference_outputs["qz_m"]
qz1_v = inference_outputs["qz_v"]
z1 = inference_outputs["z"]
x = tensors[_CONSTANTS.X_KEY]
batch_index = tensors[_CONSTANTS.BATCH_KEY]
if feed_labels:
y = tensors[_CONSTANTS.LABELS_KEY]
else:
y = None
is_labelled = False if y is None else True
# Enumerate choices of label
ys, z1s = broadcast_labels(y, z1, n_broadcast=self.n_labels)
qz2_m, qz2_v, z2 = self.encoder_z2_z1(z1s, ys)
pz1_m, pz1_v = self.decoder_z1_z2(z2, ys)
reconst_loss = self.get_reconstruction_loss(x, px_rate, px_r, px_dropout)
# KL Divergence
mean = torch.zeros_like(qz2_m)
scale = torch.ones_like(qz2_v)
kl_divergence_z2 = kl(
Normal(qz2_m, torch.sqrt(qz2_v)), Normal(mean, scale)
).sum(dim=1)
loss_z1_unweight = -Normal(pz1_m, torch.sqrt(pz1_v)).log_prob(z1s).sum(dim=-1)
loss_z1_weight = Normal(qz1_m, torch.sqrt(qz1_v)).log_prob(z1).sum(dim=-1)
if not self.use_observed_lib_size:
ql_m = inference_outputs["ql_m"]
ql_v = inference_outputs["ql_v"]
(
local_library_log_means,
local_library_log_vars,
) = self._compute_local_library_params(batch_index)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_library_log_means, torch.sqrt(local_library_log_vars)),
).sum(dim=1)
else:
kl_divergence_l = 0.0
if is_labelled:
loss = reconst_loss + loss_z1_weight + loss_z1_unweight
kl_locals = {
"kl_divergence_z2": kl_divergence_z2,
"kl_divergence_l": kl_divergence_l,
}
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss,
reconst_loss,
kl_locals,
kl_global=torch.tensor(0.0),
)
probs = self.classifier(z1)
reconst_loss += loss_z1_weight + (
(loss_z1_unweight).view(self.n_labels, -1).t() * probs
).sum(dim=1)
kl_divergence = (kl_divergence_z2.view(self.n_labels, -1).t() * probs).sum(
dim=1
)
kl_divergence += kl(
Categorical(probs=probs),
Categorical(probs=self.y_prior.repeat(probs.size(0), 1)),
)
kl_divergence += kl_divergence_l
loss = torch.mean(reconst_loss + kl_divergence * kl_weight)
if labelled_tensors is not None:
classifier_loss = self.classification_loss(labelled_tensors)
loss += classifier_loss * classification_ratio
return LossRecorder(
loss,
reconst_loss,
kl_divergence,
kl_global=torch.tensor(0.0),
classification_loss=classifier_loss,
n_labelled_tensors=labelled_tensors[_CONSTANTS.X_KEY].shape[0],
)
return LossRecorder(
loss, reconst_loss, kl_divergence, kl_global=torch.tensor(0.0)
)
| [
"torch.distributions.Categorical",
"torch.sqrt",
"torch.distributions.Normal",
"torch.ones",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like",
"torch.log",
"torch.mean"
] | 1.8.0 | jules-samaran/scvi-tools | 7dcbb819cdc6a7991469fdca6b292276c59a946d |
2.0 | #!/usr/bin/env python3
import argparse
import datetime
import os
import pickle
import pprint
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from examples.atari.atari_network import QRDQN
from examples.atari.atari_wrapper import make_atari_env
from examples.offline.utils import load_buffer
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.policy import DiscreteCQLPolicy
from tianshou.trainer import offline_trainer
from tianshou.utils import TensorboardLogger, WandbLogger
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, default="PongNoFrameskip-v4")
parser.add_argument("--seed", type=int, default=1626)
parser.add_argument("--eps-test", type=float, default=0.001)
parser.add_argument("--lr", type=float, default=0.0001)
parser.add_argument("--gamma", type=float, default=0.99)
parser.add_argument("--num-quantiles", type=int, default=200)
parser.add_argument("--n-step", type=int, default=1)
parser.add_argument("--target-update-freq", type=int, default=500)
parser.add_argument("--min-q-weight", type=float, default=10.)
parser.add_argument("--epoch", type=int, default=100)
parser.add_argument("--update-per-epoch", type=int, default=10000)
parser.add_argument("--batch-size", type=int, default=32)
parser.add_argument("--hidden-sizes", type=int, nargs="*", default=[512])
parser.add_argument("--test-num", type=int, default=10)
parser.add_argument("--frames-stack", type=int, default=4)
parser.add_argument("--scale-obs", type=int, default=0)
parser.add_argument("--logdir", type=str, default="log")
parser.add_argument("--render", type=float, default=0.)
parser.add_argument("--resume-path", type=str, default=None)
parser.add_argument("--resume-id", type=str, default=None)
parser.add_argument(
"--logger",
type=str,
default="tensorboard",
choices=["tensorboard", "wandb"],
)
parser.add_argument("--wandb-project", type=str, default="offline_atari.benchmark")
parser.add_argument(
"--watch",
default=False,
action="store_true",
help="watch the play of pre-trained policy only"
)
parser.add_argument("--log-interval", type=int, default=100)
parser.add_argument(
"--load-buffer-name", type=str, default="./expert_DQN_PongNoFrameskip-v4.hdf5"
)
parser.add_argument(
"--buffer-from-rl-unplugged", action="store_true", default=False
)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu"
)
args = parser.parse_known_args()[0]
return args
def test_discrete_cql(args=get_args()):
# envs
env, _, test_envs = make_atari_env(
args.task,
args.seed,
1,
args.test_num,
scale=args.scale_obs,
frame_stack=args.frames_stack,
)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
# should be N_FRAMES x H x W
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# model
net = QRDQN(*args.state_shape, args.action_shape, args.num_quantiles, args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
# define policy
policy = DiscreteCQLPolicy(
net,
optim,
args.gamma,
args.num_quantiles,
args.n_step,
args.target_update_freq,
min_q_weight=args.min_q_weight,
).to(args.device)
# load a previous policy
if args.resume_path:
policy.load_state_dict(torch.load(args.resume_path, map_location=args.device))
print("Loaded agent from: ", args.resume_path)
# buffer
if args.buffer_from_rl_unplugged:
buffer = load_buffer(args.load_buffer_name)
else:
assert os.path.exists(args.load_buffer_name), \
"Please run atari_dqn.py first to get expert's data buffer."
if args.load_buffer_name.endswith(".pkl"):
buffer = pickle.load(open(args.load_buffer_name, "rb"))
elif args.load_buffer_name.endswith(".hdf5"):
buffer = VectorReplayBuffer.load_hdf5(args.load_buffer_name)
else:
print(f"Unknown buffer format: {args.load_buffer_name}")
exit(0)
print("Replay buffer size:", len(buffer), flush=True)
# collector
test_collector = Collector(policy, test_envs, exploration_noise=True)
# log
now = datetime.datetime.now().strftime("%y%m%d-%H%M%S")
args.algo_name = "cql"
log_name = os.path.join(args.task, args.algo_name, str(args.seed), now)
log_path = os.path.join(args.logdir, log_name)
# logger
if args.logger == "wandb":
logger = WandbLogger(
save_interval=1,
name=log_name.replace(os.path.sep, "__"),
run_id=args.resume_id,
config=args,
project=args.wandb_project,
)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
if args.logger == "tensorboard":
logger = TensorboardLogger(writer)
else: # wandb
logger.load(writer)
def save_best_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, "policy.pth"))
def stop_fn(mean_rewards):
return False
# watch agent's performance
def watch():
print("Setup test envs ...")
policy.eval()
policy.set_eps(args.eps_test)
test_envs.seed(args.seed)
print("Testing agent ...")
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
pprint.pprint(result)
rew = result["rews"].mean()
print(f'Mean reward (over {result["n/ep"]} episodes): {rew}')
if args.watch:
watch()
exit(0)
result = offline_trainer(
policy,
buffer,
test_collector,
args.epoch,
args.update_per_epoch,
args.test_num,
args.batch_size,
stop_fn=stop_fn,
save_best_fn=save_best_fn,
logger=logger,
)
pprint.pprint(result)
watch()
if __name__ == "__main__":
test_discrete_cql(get_args())
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load",
"torch.utils.tensorboard.SummaryWriter"
] | 2.0.0 | BFAnas/tianshou | 6e86a0bed7d1117c5ad6a421b483b45a6adfe336 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from convs.dyres_conv import *
from convs.condconv import *
__all__ = ['DyResA_ResNet18']
class DyRes_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = DyResConv(in_channels, channels, kernel_size=3, stride=stride, padding=1,
num_experts=num_experts, mode='A')
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = DyResConv(channels, channels, kernel_size=3, stride=1, padding=1,
num_experts=num_experts, mode='A')
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class CondConv_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = CondConv(in_channels, channels, kernel_size=3, stride=stride, padding=1, num_experts=num_experts)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = CondConv(channels, channels, kernel_size=3, stride=1, padding=1, num_experts=num_experts)
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class DyResA_ResNet(nn.Module):
def __init__(self, block1, block2, num_blocks, num_classes=100, num_experts=3):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block1, 64, num_blocks[0], stride=1, num_experts=num_experts)
self.layer2 = self._make_layer(block1, 128, num_blocks[1], stride=2, num_experts=num_experts)
self.layer3 = self._make_layer(block2, 256, num_blocks[2], stride=2, num_experts=num_experts)
self.layer4 = self._make_layer(block2, 512, num_blocks[3], stride=2, num_experts=num_experts)
self.linear = nn.Linear(512*block2.expansion, num_classes)
def _make_layer(self, block, channels, num_blocks, stride, num_experts):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, channels, stride, num_experts))
self.in_channels = channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DyResA_ResNet18(num_experts=3):
return DyResA_ResNet(DyRes_BasicBlock, CondConv_BasicBlock, [2, 2, 2, 2], num_experts=num_experts)
def test():
x = torch.randn(128, 3, 32, 32)
net1 = DyResA_ResNet18()
y1 = net1(x); print(y1.size())
# test() | [
"torch.nn.Linear",
"torch.nn.functional.avg_pool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.randn"
] | 1.4.0 | Nyquixt/DyConv | 255193068424aaa83352bee258d34cb8b32b6ee6 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['CondConv_Inf']
class route_func(nn.Module):
def __init__(self, in_channels, num_experts):
super().__init__()
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(in_channels, num_experts)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.sigmoid(x)
return x
class CondConv_Inf(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, num_experts=3, stride=1, padding=0, groups=1, reduction=16, mode='A'):
super().__init__()
self.num_experts = num_experts
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.groups = groups
# routing function
self.routing_func = route_func(in_channels, num_experts)
# convs
self.convs = [nn.Parameter(torch.Tensor(out_channels, in_channels, kernel_size, kernel_size)) for i in range(num_experts)]
def forward(self, x):
routing_weight = self.routing_func(x) # N x k
convs = []
for i in range(self.num_experts):
route = routing_weight[:, i]
weight = self.convs[i]
weight = weight * route
convs.append(weight)
conv = sum(convs)
output = F.conv2d(x, weight=conv, stride=self.stride, padding=self.padding, groups=self.groups)
return None
def test():
x = torch.randn(1, 16, 32, 32)
conv = CondConv_Inf(16, 64, 3, padding=1)
y = conv(x)
print(y.shape)
conv = CondConv_Inf(16, 64, 3, padding=1)
y = conv(x)
print(y.shape)
# test() | [
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.randn"
] | 1.4.0 | Nyquixt/DyConv | 255193068424aaa83352bee258d34cb8b32b6ee6 |
1.9 | import torch
import torch.nn as nn
class CosineSimilarity:
"""
Cosine similarity between the two vector.
Given two vector v1 and v2, the cosine similarity between the two vector
is the cosine of theta, where the theta is the angle between the two vector on therir inner product space.
The cosine of the theta can be derived from Euclidean dot product of the two vectors.
"""
def __init__(
self,
**kwargs
) -> None:
super(CosineSimilarity, self).__init__()
def __call__(
self,
v1: torch.Tensor,
v2: torch.Tensor,
) -> torch.Tensor:
if v1.dim() == 1:
v1 = v1.unsqueeze(0)
if v2.dim() == 1:
v2 = v2.unsqueeze(0)
v1 = v1.unsqueeze(1)
v2 = v2.unsqueeze(-1)
return v1.matmul(v2).squeeze(1).squeeze(1).div(v1.pow(2).sum().sqrt() * v2.pow(2).sum().sqrt())
class AsymmertricSimilarity(nn.Module):
def __init__(
self,
n_dim: int,
**kwargs,
) -> None:
super(AsymmertricSimilarity, self).__init__()
self.Phi_src = nn.Linear(n_dim, n_dim, bias=False)
self.Phi_dst = nn.Linear(n_dim, n_dim, bias=False)
nn.init.xavier_normal_(self.Phi_src.weight)
nn.init.xavier_normal_(self.Phi_dst.weight)
def forward(
self,
z_src: torch.Tensor,
z_dst: torch.Tensor,
) -> torch.Tensor:
return self.Phi_src(z_src).unsqueeze(-2).matmul(self.Phi_dst(z_dst).unsqueeze(-1)).squeeze()
| [
"torch.nn.Linear",
"torch.nn.init.xavier_normal_"
] | 1.9.1 | helloybz/CLANE | 60e6f0503642ac63d3bcde136885e47954067c17 |
1.6 | import os
from typing import Text
import torch
import unittest
import torch.nn as nn
import torch.optim as optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from zsl_kg.class_encoders.auto_gnn import AutoGNN
from zsl_kg.example_encoders.text_encoder import TextEncoder
from zsl_kg.data.snips import SnipsDataset
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from zsl_kg.common.graph import NeighSampler
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from allennlp.common.tqdm import Tqdm
class BiLinearModel(Model):
def __init__(
self,
vocab: Vocabulary,
example_encoder: object,
class_encoder: object,
joint_dim: int,
bias: bool = False,
):
super().__init__(vocab)
self.example_encoder = example_encoder
self.class_encoder = class_encoder
self.text_joint = nn.Linear(
self.example_encoder.output_dim, joint_dim, bias=bias
)
self.class_joint = nn.Linear(
self.class_encoder.output_dim, joint_dim, bias=bias
)
def forward(self, batch, node_idx, kg):
encoder_out = self.example_encoder(batch)
text_rep = self.text_joint(encoder_out)
# get label representation
class_out = self.class_encoder(node_idx, kg)
class_rep = self.class_joint(class_out)
logits = torch.matmul(text_rep, class_rep.t())
return logits
class TestIntentClassification(unittest.TestCase):
def setUp(
self,
):
label_maps = {
"train": ["weather", "music", "restaurant"],
"dev": ["search", "movie"],
"test": ["book", "playlist"],
}
data_path = "tests/test_data/datasets/snips/"
datasets = []
for split in ["train", "dev", "test"]:
labels = label_maps[split]
label_to_idx = dict(
[(label, idx) for idx, label in enumerate(labels)]
)
reader = SnipsDataset(label_to_idx)
path = os.path.join(data_path, f"{split}.txt")
_dataset = reader.read(path)
datasets.append(_dataset)
self.train_dataset, self.dev_dataset, self.test_dataset = datasets
vocab = Vocabulary.from_instances(
self.train_dataset + self.dev_dataset + self.test_dataset
)
# create the iterator
self.iterator = BasicIterator(batch_size=32)
self.iterator.index_with(vocab)
print("Loading GloVe...")
# token embed
token_embed_path = os.path.join(data_path, "word_emb.pt")
token_embedding = torch.load(token_embed_path)
print("word embeddings created...")
word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# create the text encoder
print("Loading the text encoder...")
self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20)
trgcn = {
"input_dim": 300,
"output_dim": 64,
"type": "trgcn",
"gnn": [
{
"input_dim": 300,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(100, mode="topk"),
"fh": 100,
},
{
"input_dim": 64,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(50, mode="topk"),
},
],
}
self.class_encoder = AutoGNN(trgcn)
self.train_graph = ConceptNetKG.load_from_disk(
"tests/test_data/subgraphs/snips/train_graph"
)
node_to_idx = dict(
[(node, idx) for idx, node in enumerate(self.train_graph.nodes)]
)
#
self.train_nodes = torch.tensor(
[
node_to_idx[node]
for node in [
"/c/en/weather",
"/c/en/music",
"/c/en/restaurant",
]
]
)
self.model = BiLinearModel(
vocab, self.example_encoder, self.class_encoder, joint_dim=20
)
self.optimizer = optim.Adam(
self.model.parameters(), lr=1e-03, weight_decay=5e-04
)
self.loss_function = nn.CrossEntropyLoss()
def test_intent_classification_train(self):
self.model.train()
total_batch_loss = 0.0
generator_tqdm = Tqdm.tqdm(
self.iterator(self.train_dataset, num_epochs=1, shuffle=False),
total=self.iterator.get_num_batches(self.train_dataset),
)
for batch in generator_tqdm:
self.optimizer.zero_grad()
logits = self.model(
batch["sentence"], self.train_nodes, self.train_graph
)
loss = self.loss_function(logits, batch["labels"])
total_batch_loss += loss.item()
loss.backward()
self.optimizer.step()
self.assertLessEqual(total_batch_loss, 100.0)
| [
"torch.nn.Linear",
"torch.nn.ReLU",
"torch.tensor",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.6.0 | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 |
1.6 | import unittest
from zsl_kg.common.graph import NeighSampler
import torch
from allennlp.common.params import Params
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from zsl_kg.gnn.attention_agg import AttnAggregator
class TestAttnAggregator(unittest.TestCase):
def setUp(self) -> None:
params = Params({"bidirectional": True})
nodes = [
"/c/en/cat",
"/c/en/dog",
"/c/en/elephant",
]
relations = [
"/r/IsA",
"/r/RelatedTo",
]
# (u, r, v)
edges = [
(
0,
0,
1,
),
(
0,
1,
2,
),
(
1,
0,
2,
),
]
features = torch.randn((3, 10))
self.kg_obj = ConceptNetKG(
nodes,
features,
edges,
relations,
params,
)
self.kg_obj.run_random_walk()
attn_args = {
"features": None,
"input_dim": 10,
"output_dim": 20,
"sampler": NeighSampler(-1, "none"),
"feature_dropout": 0.1,
"leaky_relu_neg_slope": 0.4,
"self_loop": True,
}
self.graph_agg = AttnAggregator(**attn_args)
def test_forward(self):
"""testing forward function from the attention aggregator"""
features = self.graph_agg(torch.tensor([0, 1]), self.kg_obj)
self.assertEqual(features.size(0), 2)
self.assertEqual(features.size(1), 20)
| [
"torch.tensor",
"torch.randn"
] | 1.6.0 | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 |
1.3 | #!/h/haoran/anaconda3/bin/python
import sys
import os
sys.path.append(os.getcwd())
import pandas as pd
import numpy as np
import argparse
import Constants
import torch
import torch.nn as nn
from torch.utils import data
import pickle
from pytorch_pretrained_bert import BertTokenizer, BertModel
from run_classifier_dataset_utils import InputExample, convert_examples_to_features
from pathlib import Path
from tqdm import tqdm
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
from gradient_reversal import GradientReversal
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, log_loss, mean_squared_error, classification_report
import random
import json
from pytorch_pretrained_bert.file_utils import WEIGHTS_NAME, CONFIG_NAME
from utils import create_hdf_key, Classifier, get_emb_size, MIMICDataset, extract_embeddings, EarlyStopping, load_checkpoint
from sklearn.model_selection import ParameterGrid
parser = argparse.ArgumentParser('Fine-tunes a pre-trained BERT model on a certain target for one fold. Outputs fine-tuned BERT model and classifier, ' +
'as well as a pickled dictionary mapping id: predicted probability')
parser.add_argument("--df_path",help = 'must have the following columns: seqs, num_seqs, fold, with note_id as index', type=str)
parser.add_argument("--model_path", type=str)
parser.add_argument('--fold_id', help = 'what fold to use as the DEV fold. Dataframe must have a "fold" column',nargs = '+', type=str, dest = 'fold_id', default = [])
parser.add_argument('--target_col_name', help = 'name of target to train on. Must be a column in the dataframe', type=str)
parser.add_argument("--output_dir", help = 'folder to output model/results', type=str)
parser.add_argument('--use_adversary', help = "whether or not to use an adversary. If True, must not have --freeze_bert", action = 'store_true')
parser.add_argument('--lm', help = 'lambda value for the adversary', type = float, default = 1.0)
parser.add_argument('--protected_group', help = 'name of protected group, must be a column in the dataframe', type = str, default = 'insurance')
parser.add_argument('--adv_layers', help = 'number of layers in adversary', type = int, default = 2)
parser.add_argument('--freeze_bert', help = 'freeze all BERT layers and only use pre-trained representation', action = 'store_true')
parser.add_argument('--train_batch_size', help = 'batch size to use for training', type = int)
parser.add_argument('--max_num_epochs', help = 'maximum number of epochs to train for', type = int, default = 20)
parser.add_argument('--es_patience', help = 'patience for the early stopping', type = int, default = 3)
parser.add_argument('--other_fields', help = 'other fields to add, must be columns in df', nargs = '+', type = str, dest = 'other_fields', default = [])
parser.add_argument('--seed', type = int, default = 42, help = 'random seed for initialization')
parser.add_argument('--dropout', type = float, default = 0, help = 'dropout probability for classifier')
parser.add_argument('--lr', type = float, default = 5e-4, help = 'learning rate for BertAdam optimizer')
parser.add_argument('--predictor_layers', type = int, default = 2, help = 'number of layers for classifier, ignored if gridsearch_classifier')
parser.add_argument('--emb_method', default = 'last', const = 'last', nargs = '?', choices = ['last', 'sum4', 'cat4'], help = 'what embedding layer to take')
parser.add_argument('--fairness_def', default = 'demo', const = 'demo', nargs = '?', choices = ['demo', 'odds'], help = 'what fairness definition to use: demographic parity, equality of odds')
parser.add_argument('--task_type', default = 'binary', const = 'binary', nargs = '?', choices = ['binary', 'multiclass', 'regression'], help = 'what type of data the target_col_name is')
parser.add_argument('--save_embs', help = 'save computed embeddings at the end', action = 'store_true')
parser.add_argument('--output_train_stats', help = 'export training set predictions into the dataframe', action = 'store_true')
parser.add_argument('--gridsearch_classifier', help = 'whether to run a grid search over the classifier parameters, using AUPRC as metric', action = 'store_true')
parser.add_argument('--average', help = 'whether to aggregate sequences to a single prediction by simple average, or by using the NYU agg function', action = 'store_true')
parser.add_argument('--gridsearch_c', help = 'whether to run a grid search over the NYU agg c parameter, using AUPRC as metric, only valid if not --average, and --gridsearch_classifier', action = 'store_true')
parser.add_argument('--use_new_mapping', help = 'whether to use new mapping for adversarial training', action = 'store_true')
parser.add_argument('--pregen_emb_path', help = '''if embeddings have been precomputed, can provide a path here (as a pickled dictionary mapping note_id:numpy array).
Will only be used if freeze_bert. note_ids in this dictionary must a be a superset of the note_ids in df_path''', type = str)
parser.add_argument('--overwrite', help = 'whether to overwrite existing model/predictions', action = 'store_true')
args = parser.parse_args()
if os.path.isfile(os.path.join(args.output_dir, 'preds.pkl')) and not args.overwrite:
print("File already exists; exiting.")
sys.exit()
print('Reading dataframe...', flush = True)
df = pd.read_pickle(args.df_path)
if 'note_id' in df.columns:
df = df.set_index('note_id')
tokenizer = BertTokenizer.from_pretrained(args.model_path)
model = BertModel.from_pretrained(args.model_path)
target = args.target_col_name
assert(target in df.columns)
#even if no adversary, must have valid protected group column for code to work
if args.use_adversary:
protected_group = args.protected_group
assert(protected_group in df.columns)
if args.use_new_mapping:
mapping = Constants.newmapping
for i in Constants.drop_groups[protected_group]:
df = df[df[protected_group] != i]
else:
mapping = Constants.mapping
other_fields_to_include = args.other_fields
if args.freeze_bert:
for param in model.parameters():
param.requires_grad = False
assert('fold' in df.columns)
for i in args.fold_id:
assert(i in df['fold'].unique())
assert('test' in df['fold'].unique())
fold_id = args.fold_id
if args.gridsearch_c:
assert(args.task_type == 'binary')
c_grid = [0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.5, 0.7, 1, 1.2, 1.5, 2, 3, 5, 10, 20, 50, 100, 1000]
else:
c_grid = [2]
Path(args.output_dir).mkdir(parents = True, exist_ok = True)
EMB_SIZE = get_emb_size(args.emb_method)
train_df = df[~df.fold.isin(['test', 'NA', *fold_id])]
val_df = df[df.fold.isin(fold_id)]
test_df = df[df.fold == 'test']
def convert_input_example(note_id, text, seqIdx, target, group, other_fields = []):
return InputExample(guid = '%s-%s'%(note_id,seqIdx), text_a = text, text_b = None, label = target, group = mapping[protected_group][group] if args.use_adversary else 0, other_fields = other_fields)
# in training generator, return all folds except this.
# in validation generator, return only this fold
print('Converting input examples to appropriate format...', flush = True)
examples_train = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in train_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_eval = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in val_df.iterrows()
for c, i in enumerate(row.seqs)]
examples_test = [convert_input_example(idx, i, c, row[target], row[protected_group] if args.use_adversary else 0,
[] if len(other_fields_to_include) ==0 else row[other_fields_to_include].values.tolist())
for idx, row in test_df.iterrows()
for c, i in enumerate(row.seqs)]
def convert_examples_to_features_emb(examples, embs):
features = []
for i in examples:
note_id, seq_id = i.guid.split('-')
emb = embs[note_id][int(seq_id), :]
features.append(EmbFeature(emb, y = i.label, guid = i.guid, group = i.group, other_fields = i.other_fields))
return features
class EmbFeature():
def __init__(self, emb, y, guid, group, other_fields):
self.emb = emb
self.y = y
self.guid = guid
self.group = group
self.other_fields = other_fields
class Embdataset(data.Dataset):
def __init__(self, features, gen_type):
self.features = features #list of EmbFeatures
self.gen_type = gen_type
self.length = len(features)
def __len__(self):
return self.length
def __getitem__(self, index):
emb = torch.tensor(self.features[index].emb, dtype = torch.float32)
if args.task_type in ['binary', 'regression']:
y = torch.tensor(self.features[index].y, dtype = torch.float32)
else:
y = torch.tensor(self.features[index].y, dtype = torch.long)
other_fields = self.features[index].other_fields
guid = self.features[index].guid
return emb, y, guid, other_fields
class Discriminator(nn.Module):
def __init__(self, input_dim, num_layers, num_categories, lm):
super(Discriminator, self).__init__()
self.num_layers = num_layers
assert(num_layers >= 1)
self.input_dim = input_dim
self.num_categories = num_categories
self.lm = lm
self.layers = [GradientReversal(lambda_ = lm)]
for c, i in enumerate(range(num_layers)):
if c != num_layers-1:
self.layers.append(nn.Linear(input_dim // (2**c), input_dim // (2**(c+1))))
self.layers.append(nn.ReLU())
else:
self.layers.append(nn.Linear(input_dim // (2**c), num_categories))
self.layers.append(nn.Softmax(dim = 0))
self.layers = nn.ModuleList(self.layers)
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
return x
if args.gridsearch_classifier:
assert(args.freeze_bert)
grid = list(ParameterGrid({
'num_layers': [2,3,4],
'dropout_prob': [0, 0.2],
'decay_rate': [2,4,6]
}))
grid.append({
'num_layers': 1,
'dropout_prob': 0,
'decay_rate': 2
})
for i in grid: # adds extra fields to input arguments
i['input_dim'] = EMB_SIZE + len(other_fields_to_include)
i['task_type'] = args.task_type
else:
grid = [{ # only one parameter combination
'input_dim': EMB_SIZE + len(other_fields_to_include),
'num_layers': args.predictor_layers,
'dropout_prob': args.dropout,
'task_type': args.task_type
}]
if args.task_type == 'multiclass':
for i in grid:
i['multiclass_nclasses'] = len(df[target].unique())
if args.use_adversary:
discriminator = Discriminator(EMB_SIZE + int(args.fairness_def == 'odds'), args.adv_layers, len(mapping[protected_group]), args.lm)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
model.to(device)
if args.use_adversary:
discriminator.to(device)
seed = args.seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
if args.task_type == 'binary':
criterion = nn.BCELoss()
elif args.task_type == 'multiclass':
criterion = nn.CrossEntropyLoss()
elif args.task_type == 'regression':
criterion = nn.MSELoss()
criterion_adv = nn.CrossEntropyLoss()
if n_gpu > 1:
model = torch.nn.DataParallel(model)
criterion = torch.nn.DataParallel(criterion)
if args.use_adversary:
discriminator = torch.nn.DataParallel(discriminator)
criterion_adv = torch.nn.DataParallel(criterion_adv)
def get_embs(generator):
'''
given a generator, runs all the data through one pass of the model to calculate embeddings
used when BERT weights are frozen, calculates embeddings first to save compute
'''
features = []
model.eval()
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
emb = bert_out[c,:].detach().cpu().numpy()
features.append(EmbFeature(emb = emb, y = y[c], guid = i, group = group, other_fields= [i[c] for i in other_vars]))
return features
print('Featurizing examples...', flush = True)
if not args.pregen_emb_path:
features_train = convert_examples_to_features(examples_train,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_eval = convert_examples_to_features(examples_eval,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
features_test = convert_examples_to_features(examples_test,
Constants.MAX_SEQ_LEN, tokenizer, output_mode = ('regression' if args.task_type == 'regression' else 'classification'))
training_set = MIMICDataset(features_train, 'train' ,args.task_type)
training_generator = data.DataLoader(training_set, shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_set = MIMICDataset(features_eval, 'val', args.task_type)
val_generator = data.DataLoader(val_set, shuffle = False, batch_size = args.train_batch_size)
test_set = MIMICDataset(features_test, 'test', args.task_type)
test_generator = data.DataLoader(test_set, shuffle = False, batch_size = args.train_batch_size)
if args.freeze_bert: #only need to precalculate for training and val set
if args.pregen_emb_path:
pregen_embs = pickle.load(open(args.pregen_emb_path, 'rb'))
features_train_embs = convert_examples_to_features_emb(examples_train, pregen_embs)
features_val_embs = convert_examples_to_features_emb(examples_eval, pregen_embs)
features_test_embs = convert_examples_to_features_emb(examples_test, pregen_embs)
else:
features_train_embs = get_embs(training_generator)
features_val_embs = get_embs(val_generator)
features_test_embs = get_embs(test_generator)
training_generator = data.DataLoader(Embdataset(features_train_embs, 'train'), shuffle = True, batch_size = args.train_batch_size, drop_last = True)
val_generator = data.DataLoader(Embdataset(features_val_embs, 'val'), shuffle = False, batch_size = args.train_batch_size)
test_generator= data.DataLoader(Embdataset(features_test_embs, 'test'), shuffle = False, batch_size = args.train_batch_size)
num_train_epochs = args.max_num_epochs
learning_rate = args.lr
num_train_optimization_steps = len(training_generator) * num_train_epochs
warmup_proportion = 0.1
PREDICTOR_CHECKPOINT_PATH = os.path.join(args.output_dir, 'predictor.chkpt')
MODEL_CHECKPOINT_PATH = os.path.join(args.output_dir, 'model.chkpt')
grid_auprcs = []
es_models = []
optimal_cs = []
actual_val = val_df[target]
def merge_probs(probs, c):
return (np.max(probs) + np.mean(probs)*len(probs)/float(c))/(1+len(probs)/float(c))
def avg_probs(probs):
return np.mean(probs)
def avg_probs_multiclass(probs):
return np.argmax(np.mean(probs, axis = 0))
def merge_regression(preds):
return np.mean(preds)
def evaluate_on_set(generator, predictor, emb_gen = False, c_val=2):
'''
Input: a pytorch data loader, whether the generator is an embedding or text generator
Outputs:
prediction_dict: a dictionary mapping note_id (str) to list of predicted probabilities
merged_preds: a dictionary mapping note_id (str) to a single merged probability
embs: a dictionary mapping note_id (str) to a numpy 2d array (shape num_seq * 768)
'''
model.eval()
predictor.eval()
if generator.dataset.gen_type == 'val':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in val_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in val_df.iterrows()}
elif generator.dataset.gen_type == 'test':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in test_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in test_df.iterrows()}
elif generator.dataset.gen_type == 'train':
prediction_dict = {str(idx): [0]*row['num_seqs'] for idx, row in train_df.iterrows()}
embs = {str(idx):np.zeros(shape = (row['num_seqs'], EMB_SIZE)) for idx, row in train_df.iterrows()}
if emb_gen:
with torch.no_grad():
for embs, y, guid, other_vars in generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
else:
with torch.no_grad():
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out).detach().cpu()
for c,i in enumerate(guid):
note_id, seq_id = i.split('-')
if args.task_type in ['binary', 'regression']:
prediction_dict[note_id][int(seq_id)] = preds[c].item()
else:
prediction_dict[note_id][int(seq_id)] = preds[c,:].numpy()
embs[note_id][int(seq_id), :] = bert_out[c,:EMB_SIZE].detach().cpu().numpy()
merged_preds = merge_preds(prediction_dict, c_val)
return (prediction_dict, merged_preds, embs)
def merge_preds(prediction_dict, c=2):
merged_preds = {}
for i in prediction_dict:
if args.task_type == 'binary':
if args.average:
merged_preds[i] = avg_probs(prediction_dict[i])
else:
merged_preds[i] = merge_probs(prediction_dict[i], c)
elif args.task_type == 'regression':
merged_preds[i] = merge_regression(prediction_dict[i])
elif args.task_type == 'multiclass':
merged_preds[i] = avg_probs_multiclass(np.array(prediction_dict[i]))
return merged_preds
for predictor_params in grid:
print(predictor_params, flush = True)
predictor = Classifier(**predictor_params).to(device)
if n_gpu > 1:
predictor = torch.nn.DataParallel(predictor)
if not(args.freeze_bert) and not(args.use_adversary):
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters())
elif args.freeze_bert and not(args.use_adversary):
param_optimizer = list(predictor.named_parameters())
elif args.freeze_bert and args.use_adversary:
raise Exception('No purpose in using an adversary if BERT layers are frozen')
else:
param_optimizer = list(model.named_parameters()) + list(predictor.named_parameters()) + list(discriminator.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
es = EarlyStopping(patience = args.es_patience)
optimizer = BertAdam(optimizer_grouped_parameters,
lr=learning_rate,
warmup=warmup_proportion,
t_total=num_train_optimization_steps)
warmup_linear = WarmupLinearSchedule(warmup=warmup_proportion,
t_total=num_train_optimization_steps)
for epoch in range(1, num_train_epochs+1):
# training
if not args.freeze_bert:
model.train()
else:
model.eval()
predictor.train()
if args.use_adversary:
discriminator.train()
running_loss = 0.0
num_steps = 0
with tqdm(total=len(training_generator), desc="Epoch %s"%epoch) as pbar:
if not args.freeze_bert:
for input_ids, input_mask, segment_ids, y, group, _, other_vars in training_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if args.use_adversary:
adv_input = bert_out[:, :-len(other_vars)]
if args.fairness_def == 'odds':
adv_input = torch.cat([adv_input, y.unsqueeze(dim = 1)], 1)
adv_pred = discriminator(adv_input)
adv_loss = criterion_adv(adv_pred, group)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
else: # if frozen, use precomputed embeddings to save time
for embs, y,_, other_vars in training_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
loss.backward()
optimizer.step()
optimizer.zero_grad()
num_steps += 1
running_loss += loss.item()
mean_loss = running_loss/num_steps
pbar.update(1)
pbar.set_postfix_str("Running Training Loss: %.5f" % mean_loss)
# evaluate here
model.eval()
predictor.eval()
val_loss = 0
with torch.no_grad():
if args.freeze_bert:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor}
for embs, y, guid, other_vars in val_generator:
embs = embs.to(device)
y = y.to(device)
for i in other_vars:
embs = torch.cat([embs, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(embs)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
val_loss += loss.item()
val_loss /= len(val_generator)
# early stopping uses val loss as metric
# model selection/c selection uses AUPRC as metric
else:
checkpoints = {PREDICTOR_CHECKPOINT_PATH: predictor,
MODEL_CHECKPOINT_PATH: model}
for input_ids, input_mask, segment_ids, y, group, guid, other_vars in val_generator:
input_ids = input_ids.to(device)
segment_ids = segment_ids.to(device)
input_mask = input_mask.to(device)
y = y.to(device)
group = group.to(device)
hidden_states, _ = model(input_ids, token_type_ids = segment_ids, attention_mask = input_mask)
bert_out = extract_embeddings(hidden_states, args.emb_method)
for i in other_vars:
bert_out = torch.cat([bert_out, i.float().unsqueeze(dim = 1).to(device)], 1)
preds = predictor(bert_out)
loss = criterion(preds, y)
if n_gpu > 1:
loss = loss.mean()
if args.use_adversary:
adv_loss = adv_loss.mean()
if args.use_adversary:
loss += adv_loss
val_loss += loss.item()
val_loss /= len(val_generator)
print('Val loss: %s'%val_loss, flush = True)
es(val_loss, checkpoints)
if es.early_stop:
break
print('Trained for %s epochs' % epoch)
predictor.load_state_dict(load_checkpoint(PREDICTOR_CHECKPOINT_PATH))
os.remove(PREDICTOR_CHECKPOINT_PATH)
if not args.freeze_bert:
model.load_state_dict(load_checkpoint(MODEL_CHECKPOINT_PATH))
os.remove(MODEL_CHECKPOINT_PATH)
if args.gridsearch_classifier:
auprcs = [] #one value for each in c grid
prediction_dict, _, _ = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert)
for c_val in c_grid:
merged_preds_val = merge_preds(prediction_dict, c_val)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
auprcs.append(average_precision_score(actual_val.values.astype(int), merged_preds_val_list))
print(auprcs, flush = True)
print(c_grid, flush = True)
idx_max = np.argmax(auprcs)
grid_auprcs.append(auprcs[idx_max])
es_models.append(predictor.cpu())
optimal_cs.append(c_grid[idx_max])
print('val AUPRC:%.5f optimal c: %s' %(auprcs[idx_max], c_grid[idx_max] ))
# find best predictor here, move back to cpu
if args.gridsearch_classifier:
idx_max = np.argmax(grid_auprcs)
predictor = es_models[idx_max].to(device)
opt_c = optimal_cs[idx_max]
else:
opt_c = 2.0
# evaluate on val set
prediction_dict_val, merged_preds_val, embs_val = evaluate_on_set(val_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
merged_preds_val_list = [merged_preds_val[str(i)] for i in actual_val.index]
if args.task_type == 'binary':
acc = accuracy_score(actual_val.values.astype(int), np.array(merged_preds_val_list).round())
auprc = average_precision_score(actual_val.values.astype(int), merged_preds_val_list)
ll = log_loss(actual_val.values.astype(int), merged_preds_val_list)
roc = roc_auc_score(actual_val.values.astype(int), merged_preds_val_list)
print('Accuracy: %.5f' % acc)
print('AUPRC: %.5f' % auprc)
print('Log Loss: %.5f' % ll)
print('AUROC: %.5f' % roc)
elif args.task_type == 'regression':
mse = mean_squared_error(actual_val, merged_preds_val_list)
print('MSE: %.5f' % mse)
elif args.task_type == 'multiclass':
report = classification_report(actual_val.values.astype(int), np.array(merged_preds_val_list))
print(report)
prediction_dict_test, merged_preds_test, embs_test = evaluate_on_set(test_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
if args.output_train_stats:
prediction_dict_train, merged_preds_train, embs_train = evaluate_on_set(training_generator, predictor, emb_gen = args.freeze_bert, c_val = opt_c)
else:
merged_preds_train, embs_train = {}, {}
# save predictor
json.dump(predictor_params, open(os.path.join(args.output_dir, 'predictor_params.json'), 'w'))
torch.save(predictor.state_dict(), os.path.join(args.output_dir, 'predictor.pt'))
# save model
if not args.freeze_bert:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(args.output_dir)
# save args
json.dump(vars(args), open(os.path.join(args.output_dir, 'argparse_args.json'), 'w'))
#saves embeddings
if args.save_embs:
embs = {**embs_val, **embs_test, **embs_train}
pickle.dump(embs, open(os.path.join(args.output_dir, 'embs.pkl'), 'wb'))
rough_preds = {**merged_preds_val, **merged_preds_test, **merged_preds_train}
pickle.dump(rough_preds, open(os.path.join(args.output_dir, 'preds.pkl'), 'wb'))
# saves gridsearch info
pickle.dump({
'grid_auprcs':grid_auprcs,
'optimal_cs': optimal_cs,
'opt_c': opt_c
}, open(os.path.join(args.output_dir, 'gs_info.pkl'), 'wb'))
| [
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss",
"torch.nn.DataParallel",
"torch.nn.Softmax",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.tensor",
"torch.cuda.manual_seed_all",
"torch.cuda.device_count",
"torch.nn.ReLU",
"torch.nn.MSELoss",
"torch.no_grad"
] | 1.3.0 | MLforHealth/HurtfulWords | b59181585aa70152f0fbe79fa2611ded928bf9f1 |
1.4 | # Copyright (c) 2020, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import platform
import numpy as np
from torch import Tensor, FloatTensor
class Spectrogram(object):
"""
Create a spectrogram from a audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: torch)
"""
def __init__(
self,
sample_rate: int = 16000,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'torch'
) -> None:
self.sample_rate = sample_rate
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'kaldi':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
try:
import torchaudio
except ImportError:
raise ImportError("Please install torchaudio: `pip install torchaudio`")
self.transforms = torchaudio.compliance.kaldi.spectrogram
self.frame_length = frame_length
self.frame_shift = frame_shift
else:
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
def __call__(self, signal):
if self.feature_extract_by == 'kaldi':
spectrogram = self.transforms(
Tensor(signal).unsqueeze(0),
frame_length=self.frame_length,
frame_shift=self.frame_shift,
sample_frequency=self.sample_rate,
).transpose(0, 1)
else:
spectrogram = torch.stft(
Tensor(signal), self.n_fft, hop_length=self.hop_length,
win_length=self.n_fft, window=torch.hamming_window(self.n_fft),
center=False, normalized=False, onesided=True
)
spectrogram = (spectrogram[:, :, 0].pow(2) + spectrogram[:, :, 1].pow(2)).pow(0.5)
spectrogram = np.log1p(spectrogram.numpy())
return spectrogram
class MelSpectrogram(object):
"""
Create MelSpectrogram for a raw audio signal. This is a composition of Spectrogram and MelScale.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction (default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mels = n_mels
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.amplitude_to_db = torchaudio.transforms.AmplitudeToDB()
self.transforms = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
n_mels=n_mels,
)
else:
import librosa
self.transforms = librosa.feature.melspectrogram
self.amplitude_to_db = librosa.amplitude_to_db
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
melspectrogram = self.transforms(Tensor(signal))
melspectrogram = self.amplitude_to_db(melspectrogram)
melspectrogram = melspectrogram.numpy()
elif self.feature_extract_by == 'librosa':
melspectrogram = self.transforms(
signal,
sr=self.sample_rate,
n_mels=self.n_mels,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
melspectrogram = self.amplitude_to_db(melspectrogram, ref=np.max)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return melspectrogram
class MFCC(object):
"""
Create the Mel-frequency cepstrum coefficients (MFCCs) from an audio signal.
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mfcc (int): Number of mfc coefficients to retain. (Default: 40)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
feature_extract_by (str): which library to use for feature extraction(default: librosa)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mfcc: int = 40,
frame_length: int = 20,
frame_shift: int = 10,
feature_extract_by: str = 'librosa'
) -> None:
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.n_fft = int(round(sample_rate * 0.001 * frame_length))
self.hop_length = int(round(sample_rate * 0.001 * frame_shift))
self.feature_extract_by = feature_extract_by.lower()
if self.feature_extract_by == 'torchaudio':
# torchaudio is only supported on Linux (Linux, Mac)
assert platform.system().lower() == 'linux' or platform.system().lower() == 'darwin'
import torchaudio
self.transforms = torchaudio.transforms.MFCC(
sample_rate=sample_rate,
n_mfcc=n_mfcc,
log_mels=True,
win_length=frame_length,
hop_length=self.hop_length,
n_fft=self.n_fft,
)
else:
import librosa
self.transforms = librosa.feature.mfcc
def __call__(self, signal):
if self.feature_extract_by == 'torchaudio':
mfcc = self.transforms(FloatTensor(signal))
mfcc = mfcc.numpy()
elif self.feature_extract_by == 'librosa':
mfcc = self.transforms(
y=signal,
sr=self.sample_rate,
n_mfcc=self.n_mfcc,
n_fft=self.n_fft,
hop_length=self.hop_length,
)
else:
raise ValueError("Unsupported library : {0}".format(self.feature_extract_by))
return mfcc
class FilterBank(object):
"""
Create a fbank from a raw audio signal. This matches the input/output of Kaldi’s compute-fbank-feats
Args:
sample_rate (int): Sample rate of audio signal. (Default: 16000)
n_mels (int): Number of mfc coefficients to retain. (Default: 80)
frame_length (int): frame length for spectrogram (ms) (Default : 20)
frame_shift (int): Length of hop between STFT windows. (ms) (Default: 10)
"""
def __init__(
self,
sample_rate: int = 16000,
n_mels: int = 80,
frame_length: int = 20,
frame_shift: int = 10
) -> None:
import torchaudio
self.transforms = torchaudio.compliance.kaldi.fbank
self.sample_rate = sample_rate
self.n_mels = n_mels
self.frame_length = frame_length
self.frame_shift = frame_shift
def __call__(self, signal):
return self.transforms(
Tensor(signal).unsqueeze(0),
num_mel_bins=self.n_mels,
frame_length=self.frame_length,
frame_shift=self.frame_shift,
).transpose(0, 1).numpy()
| [
"torch.hamming_window",
"torch.FloatTensor",
"torch.Tensor"
] | 1.4.0 | jungwook518/KoSpeech | 77b8daf2f821c8fa755e937096fdbc3536cafd81 |
1.4 | import torch
import numpy as np
from hipo_rank import Embeddings, SentenceEmbeddings, SectionEmbedding, \
PairIndices, SentenceSimilarities, SectionSimilarities, Similarities
from typing import List, Tuple
from numpy import ndarray
class CosSimilarity:
def __init__(self, threshold = 0):
self.threshold = threshold
def _compute_similarities(self, embeds1: ndarray, embeds2: ndarray) -> ndarray:
embeds1 = torch.from_numpy(embeds1)
embeds2 = torch.from_numpy(embeds2)
similarities = torch.cosine_similarity(embeds1, embeds2).numpy()
similarities = similarities / 2 + 0.5 # normalize to a range [0,1]
similarities = np.clip(similarities, self.threshold, 1)
return similarities
def _get_pairwise_similarities(self, embeds: ndarray) -> Tuple[ndarray, PairIndices]:
pair_indices = self._get_pair_indices(len(embeds))
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
similarities = self._compute_similarities(embeds[pair_indices_i], embeds[pair_indices_j])
return similarities, pair_indices
def _get_pair_indices(self, num_nodes: int) -> PairIndices:
pair_indices = []
for i in range(num_nodes):
for j in range(i+1, num_nodes):
pair_indices += [(i, j)]
return pair_indices
def get_similarities(self, embeds: Embeddings):
sent_to_sent = []
for sent_embeds in embeds.sentence:
id = sent_embeds.id
e = sent_embeds.embeddings
similarities, pair_indices = self._get_pairwise_similarities(e)
directions = ["undirected" for _ in pair_indices]
sent_to_sent += [SentenceSimilarities(id, similarities, pair_indices, directions)]
sent_to_sect = []
sect_embeds = np.stack([s.embedding for s in embeds.section])
num_sect = len(sect_embeds)
for sent_embeds in embeds.sentence:
# TODO: factor out pair indices for one and two matrices
pair_indices = []
num_sent = len(sent_embeds.embeddings)
for i in range(num_sent):
for j in range(num_sect):
pair_indices += [(i,j)]
pair_indices_i = [x[0] for x in pair_indices]
pair_indices_j = [x[1] for x in pair_indices]
embeds1 = sent_embeds.embeddings[pair_indices_i]
embeds2 = sect_embeds[pair_indices_j]
similarities = self._compute_similarities(embeds1, embeds2)
id = sent_embeds.id
directions = ["undirected" for _ in pair_indices]
sent_to_sect += [SentenceSimilarities(id, similarities, pair_indices, directions)]
similarities, pair_indices = self._get_pairwise_similarities(sect_embeds)
directions = ["undirected" for _ in pair_indices]
sect_to_sect = SectionSimilarities(similarities, pair_indices, directions)
return Similarities(sent_to_sent, sect_to_sect, sent_to_sect)
| [
"torch.cosine_similarity",
"torch.from_numpy"
] | 1.4 | mukul-mehta/HipoRank | b44490c4f1f3e0ff8015e3eb0f2b1955947dfe80 |
1.9 | import torch
import torch.nn as nn
from vformer.functional import PatchMerging
from vformer.utils import ENCODER_REGISTRY
encoder_modules = ENCODER_REGISTRY.get_list()
def test_VanillaEncoder():
test_tensor = torch.randn(2, 65, 1024)
encoder = ENCODER_REGISTRY.get("VanillaEncoder")(
embedding_dim=1024, depth=6, num_heads=16, head_dim=64, mlp_dim=2048
)
out = encoder(test_tensor)
assert out.shape == test_tensor.shape # shape remains same
del encoder, test_tensor
def test_SwinEncoder():
test_tensor = torch.randn(3, 3136, 96)
# when downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=PatchMerging,
)
out = encoder(test_tensor)
assert out.shape == (3, 784, 192)
del encoder
# when not downsampled
encoder = ENCODER_REGISTRY.get("SwinEncoder")(
dim=96,
input_resolution=(224 // 4, 224 // 4),
depth=2,
num_heads=3,
window_size=7,
downsample=None,
use_checkpoint=True,
)
out = encoder(test_tensor)
assert out.shape == (3, 3136, 96)
del encoder
encoder_block = ENCODER_REGISTRY.get("SwinEncoderBlock")(
dim=96, input_resolution=(224 // 4, 224 // 4), num_heads=3, window_size=7
)
out = encoder_block(test_tensor)
assert out.shape == test_tensor.shape
del encoder_block
def test_PVTEncoder():
test_tensor = torch.randn(4, 3136, 64)
encoder = ENCODER_REGISTRY.get("PVTEncoder")(
dim=64,
depth=3,
qkv_bias=True,
qk_scale=0.0,
p_dropout=0.0,
attn_dropout=0.1,
drop_path=[0.0] * 3,
act_layer=nn.GELU,
sr_ratio=1,
linear=False,
use_dwconv=False,
num_heads=1,
mlp_ratio=4,
)
out = encoder(test_tensor, H=56, W=56)
assert out.shape == test_tensor.shape
del encoder
def test_CrossEncoder():
test_tensor1 = torch.randn(3, 5, 128)
test_tensor2 = torch.randn(3, 5, 256)
encoder = ENCODER_REGISTRY.get("CrossEncoder")(128, 256)
out = encoder(test_tensor1, test_tensor2)
assert out[0].shape == test_tensor1.shape
assert out[1].shape == test_tensor2.shape
del encoder
| [
"torch.randn"
] | 1.9.0 | aditya-agrawal-30502/vformer | e1f4950f980238442ff1dc39a8f0791e4fbc9dac |
1.1 | import glob
import os
import torch
import tqdm
import time
from torch.nn.utils import clip_grad_norm_
from pcdet.utils import common_utils, commu_utils
def train_one_epoch(cur_epoch,model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
data_time = common_utils.AverageMeter()
batch_time = common_utils.AverageMeter()
forward_time = common_utils.AverageMeter()
for cur_it in range(total_it_each_epoch):
end = time.time()
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
data_timer = time.time()
cur_data_time = data_timer - end
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
forward_timer = time.time()
cur_forward_time = forward_timer - data_timer
loss.backward()
clip_grad_norm_(model.parameters(), optim_cfg.GRAD_NORM_CLIP)
optimizer.step()
accumulated_iter += 1
cur_batch_time = time.time() - end
# average reduce
avg_data_time = commu_utils.average_reduce_value(cur_data_time)
avg_forward_time = commu_utils.average_reduce_value(cur_forward_time)
avg_batch_time = commu_utils.average_reduce_value(cur_batch_time)
# log to console and tensorboard
if rank == 0:
data_time.update(avg_data_time)
forward_time.update(avg_forward_time)
batch_time.update(avg_batch_time)
disp_dict.update({
'loss': loss.item(), 'lr': cur_lr, 'd_time': f'{data_time.val:.2f}({data_time.avg:.2f})',
'f_time': f'{forward_time.val:.2f}({forward_time.avg:.2f})', 'b_time': f'{batch_time.val:.2f}({batch_time.avg:.2f})'
})
pbar.update()
pbar.set_postfix(dict(total_it=accumulated_iter))
tbar.set_postfix(disp_dict)
tbar.refresh()
if tb_log is not None:
tb_log.add_scalar('train/loss', loss, cur_epoch)
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
for key, val in tb_dict.items():
tb_log.add_scalar('train/' + key, val, accumulated_iter)
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(cur_epoch,
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
# save trained model
trained_epoch = cur_epoch + 1
if trained_epoch % ckpt_save_interval == 0 and rank == 0:
ckpt_list = glob.glob(str(ckpt_save_dir / 'checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
if ckpt_list.__len__() >= max_ckpt_save_num:
for cur_file_idx in range(0, len(ckpt_list) - max_ckpt_save_num + 1):
os.remove(ckpt_list[cur_file_idx])
ckpt_name = ckpt_save_dir / ('checkpoint_epoch_%d' % trained_epoch)
save_checkpoint(
checkpoint_state(model, optimizer, trained_epoch, accumulated_iter), filename=ckpt_name,
)
def model_state_to_cpu(model_state):
model_state_cpu = type(model_state)() # ordered dict
for key, val in model_state.items():
model_state_cpu[key] = val.cpu()
return model_state_cpu
def checkpoint_state(model=None, optimizer=None, epoch=None, it=None):
optim_state = optimizer.state_dict() if optimizer is not None else None
if model is not None:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model_state_to_cpu(model.module.state_dict())
else:
model_state = model.state_dict()
else:
model_state = None
try:
import pcdet
version = 'pcdet+' + pcdet.__version__
except:
version = 'none'
return {'epoch': epoch, 'it': it, 'model_state': model_state, 'optimizer_state': optim_state, 'version': version}
def save_checkpoint(state, filename='checkpoint'):
if False and 'optimizer_state' in state:
optimizer_state = state['optimizer_state']
state.pop('optimizer_state', None)
optimizer_filename = '{}_optim.pth'.format(filename)
torch.save({'optimizer_state': optimizer_state}, optimizer_filename)
filename = '{}.pth'.format(filename)
torch.save(state, filename)
| [
"torch.save"
] | 1.1 | Bilal-A-Qureshi/OpenPCDet | 633c6026e56fc3fb2112f2a9f7ce08a21619e78f |
1.9 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 5, 2)
self.conv2 = nn.Conv2d(32, 64, 7, 3)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(36864, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
| [
"torch.nn.Linear",
"torch.flatten",
"torch.nn.functional.log_softmax",
"torch.nn.Conv2d",
"torch.nn.functional.relu",
"torch.nn.functional.max_pool2d",
"torch.nn.Dropout2d"
] | 1.9.0 | evanaze/captcha | 62d226742be7f4091e54a7ea960703812bd44fd5 |
1.6 | import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
# helpers
def pair(t):
return t if isinstance(t, tuple) else (t, t)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(hidden_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
def forward(self, x):
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout = 0.):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads = heads, dim_head = dim_head, dropout = dropout)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout = dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim,
pool = 'cls', channels = 3, dim_head = 64, dropout = 0., emb_dropout = 0.):
super().__init__()
image_height, image_width = pair(image_size)
patch_height, patch_width = pair(patch_size)
assert image_height % patch_height == 0 and image_width % patch_width == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_height // patch_height) * (image_width // patch_width)
patch_dim = channels * patch_height * patch_width
assert pool in {'cls', 'mean'}, 'pool type must be either cls (cls token) or mean (mean pooling)'
self.to_patch_embedding = nn.Sequential(
Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_height, p2 = patch_width),
nn.Linear(patch_dim, dim),
)
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.dropout = nn.Dropout(emb_dropout)
self.transformer = Transformer(dim, depth, heads, dim_head, mlp_dim, dropout)
self.pool = pool
self.to_latent = nn.Identity()
self.mlp_head = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, img):
x = self.to_patch_embedding(img)
b, n, _ = x.shape
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
x = torch.cat((cls_tokens, x), dim=1)
# TODO maybe no need :(n+1), just self.pos_embedding is OK.
x += self.pos_embedding[:, :(n + 1)]
x = self.dropout(x)
# x.shape, b, n+1, d
x = self.transformer(x)
x = x.mean(dim = 1) if self.pool == 'mean' else x[:, 0]
x = self.to_latent(x)
return self.mlp_head(x)
class AttentionWithMask(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads # 64 x 8
self.heads = heads # 8
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, mask = None):
# n is the patch_num + 1, patch_num = (img_size/patch_size)**2.
# just assume img_size 224, patch_size 32, 224/32=7 it is 7*7+1=50 here.
# yolo-v1 also use patch num 7*7.
b, n, _, h = *x.shape, self.heads # n=50,h=8,
# self.to_qkv(x)得到的尺寸为[b,50,64x8x3],然后chunk成3份
# 也就是说,qkv是一个三元tuple,每一份都是[b,50,64x8]的大小
qkv = self.to_qkv(x).chunk(3, dim = -1)
# 把每一份从[b,50,64x8]变成[b,8,50,64]的形式
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
# 这一步不太好理解,q和k都是[b,8,50,64]的形式,50理解为特征数量,64为特征变量
# dots.shape=[b,8,50,50]
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
# 不考虑mask这一块的内容
mask_value = -torch.finfo(dots.dtype).max
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, mask_value)
del mask
# 对[b,8,50,50]的最后一个维度做softmax
attn = dots.softmax(dim=-1)
# 这个attn就是计算出来的自注意力值,和v做点乘,out.shape=[b,8,50,64]
out = torch.einsum('bhij,bhjd->bhid', attn, v)
# out.shape变成[b,50,8x64]
out = rearrange(out, 'b h n d -> b n (h d)')
# out.shape重新变成[b,60,128]
out = self.to_out(out)
return out
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.nn.Identity",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.Softmax",
"torch.einsum",
"torch.finfo",
"torch.nn.GELU",
"torch.randn"
] | 1.6 | rocke2020/vit-pytorch | a1f828da0c952fa56a90a71f7c88c8e0025c1d42 |
1.4 | import torch
import os
import numpy as np
import cv2
from PIL import Image
from collections import defaultdict
from tqdm import tqdm
import mcubes
import open3d as o3d
from plyfile import PlyData, PlyElement
from argparse import ArgumentParser
from models.rendering import *
from models.nerf import *
from utils import load_ckpt
from datasets import dataset_dict
torch.backends.cudnn.benchmark = True
def get_opts():
parser = ArgumentParser()
parser.add_argument('--root_dir', type=str,
default='/home/ubuntu/data/nerf_example_data/nerf_synthetic/lego',
help='root directory of dataset')
parser.add_argument('--dataset_name', type=str, default='blender',
choices=['blender', 'llff'],
help='which dataset to validate')
parser.add_argument('--scene_name', type=str, default='test',
help='scene name, used as output ply filename')
parser.add_argument('--img_wh', nargs="+", type=int, default=[800, 800],
help='resolution (img_w, img_h) of the image')
parser.add_argument('--N_samples', type=int, default=64,
help='number of samples to infer the acculmulated opacity')
parser.add_argument('--chunk', type=int, default=32*1024,
help='chunk size to split the input to avoid OOM')
parser.add_argument('--ckpt_path', type=str, required=True,
help='pretrained checkpoint path to load')
parser.add_argument('--N_grid', type=int, default=256,
help='size of the grid on 1 side, larger=higher resolution')
parser.add_argument('--x_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--y_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--z_range', nargs="+", type=float, default=[-1.0, 1.0],
help='x range of the object')
parser.add_argument('--sigma_threshold', type=float, default=20.0,
help='threshold to consider a location is occupied')
parser.add_argument('--occ_threshold', type=float, default=0.2,
help='''threshold to consider a vertex is occluded.
larger=fewer occluded pixels''')
#### method using vertex normals ####
parser.add_argument('--use_vertex_normal', action="store_true",
help='use vertex normals to compute color')
parser.add_argument('--N_importance', type=int, default=64,
help='number of fine samples to infer the acculmulated opacity')
parser.add_argument('--near_t', type=float, default=1.0,
help='the near bound factor to start the ray')
return parser.parse_args()
@torch.no_grad()
def f(models, embeddings, rays, N_samples, N_importance, chunk, white_back):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, chunk):
rendered_ray_chunks = \
render_rays(models,
embeddings,
rays[i:i+chunk],
N_samples,
False,
0,
0,
N_importance,
chunk,
white_back,
test_time=True)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
if __name__ == "__main__":
args = get_opts()
kwargs = {'root_dir': args.root_dir,
'img_wh': tuple(args.img_wh)}
if args.dataset_name == 'llff':
kwargs['spheric_poses'] = True
kwargs['split'] = 'test'
else:
kwargs['split'] = 'train'
dataset = dataset_dict[args.dataset_name](**kwargs)
embedding_xyz = Embedding(3, 10)
embedding_dir = Embedding(3, 4)
embeddings = [embedding_xyz, embedding_dir]
nerf_fine = NeRF()
load_ckpt(nerf_fine, args.ckpt_path, model_name='nerf_fine')
nerf_fine.cuda().eval()
# define the dense grid for query
N = args.N_grid
xmin, xmax = args.x_range
ymin, ymax = args.y_range
zmin, zmax = args.z_range
# assert xmax-xmin == ymax-ymin == zmax-zmin, 'the ranges must have the same length!'
x = np.linspace(xmin, xmax, N)
y = np.linspace(ymin, ymax, N)
z = np.linspace(zmin, zmax, N)
xyz_ = torch.FloatTensor(np.stack(np.meshgrid(x, y, z), -1).reshape(-1, 3)).cuda()
dir_ = torch.zeros_like(xyz_).cuda()
# sigma is independent of direction, so any value here will produce the same result
# predict sigma (occupancy) for each grid location
print('Predicting occupancy ...')
with torch.no_grad():
B = xyz_.shape[0]
out_chunks = []
for i in tqdm(range(0, B, args.chunk)):
xyz_embedded = embedding_xyz(xyz_[i:i+args.chunk]) # (N, embed_xyz_channels)
dir_embedded = embedding_dir(dir_[i:i+args.chunk]) # (N, embed_dir_channels)
xyzdir_embedded = torch.cat([xyz_embedded, dir_embedded], 1)
out_chunks += [nerf_fine(xyzdir_embedded)]
rgbsigma = torch.cat(out_chunks, 0)
sigma = rgbsigma[:, -1].cpu().numpy()
sigma = np.maximum(sigma, 0).reshape(N, N, N)
# perform marching cube algorithm to retrieve vertices and triangle mesh
print('Extracting mesh ...')
vertices, triangles = mcubes.marching_cubes(sigma, args.sigma_threshold)
##### Until mesh extraction here, it is the same as the original repo. ######
vertices_ = (vertices/N).astype(np.float32)
## invert x and y coordinates (WHY? maybe because of the marching cubes algo)
x_ = (ymax-ymin) * vertices_[:, 1] + ymin
y_ = (xmax-xmin) * vertices_[:, 0] + xmin
vertices_[:, 0] = x_
vertices_[:, 1] = y_
vertices_[:, 2] = (zmax-zmin) * vertices_[:, 2] + zmin
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertices_[:, 0], 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
# remove noise in the mesh by keeping only the biggest cluster
print('Removing noise ...')
mesh = o3d.io.read_triangle_mesh(f"{args.scene_name}.ply")
idxs, count, _ = mesh.cluster_connected_triangles()
max_cluster_idx = np.argmax(count)
triangles_to_remove = [i for i in range(len(face)) if idxs[i] != max_cluster_idx]
mesh.remove_triangles_by_index(triangles_to_remove)
mesh.remove_unreferenced_vertices()
print(f'Mesh has {len(mesh.vertices)/1e6:.2f} M vertices and {len(mesh.triangles)/1e6:.2f} M faces.')
vertices_ = np.asarray(mesh.vertices).astype(np.float32)
triangles = np.asarray(mesh.triangles)
# perform color prediction
# Step 0. define constants (image width, height and intrinsics)
W, H = args.img_wh
K = np.array([[dataset.focal, 0, W/2],
[0, dataset.focal, H/2],
[0, 0, 1]]).astype(np.float32)
# Step 1. transform vertices into world coordinate
N_vertices = len(vertices_)
vertices_homo = np.concatenate([vertices_, np.ones((N_vertices, 1))], 1) # (N, 4)
if args.use_vertex_normal: ## use normal vector method as suggested by the author.
## see https://github.com/bmild/nerf/issues/44
mesh.compute_vertex_normals()
rays_d = torch.FloatTensor(np.asarray(mesh.vertex_normals))
near = dataset.bounds.min() * torch.ones_like(rays_d[:, :1])
far = dataset.bounds.max() * torch.ones_like(rays_d[:, :1])
rays_o = torch.FloatTensor(vertices_) - rays_d * near * args.near_t
nerf_coarse = NeRF()
load_ckpt(nerf_coarse, args.ckpt_path, model_name='nerf_coarse')
nerf_coarse.cuda().eval()
results = f([nerf_coarse, nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
args.N_importance,
args.chunk,
dataset.white_back)
else: ## use my color average method. see README_mesh.md
## buffers to store the final averaged color
non_occluded_sum = np.zeros((N_vertices, 1))
v_color_sum = np.zeros((N_vertices, 3))
# Step 2. project the vertices onto each training image to infer the color
print('Fusing colors ...')
for idx in tqdm(range(len(dataset.image_paths))):
## read image of this pose
image = Image.open(dataset.image_paths[idx]).convert('RGB')
image = image.resize(tuple(args.img_wh), Image.LANCZOS)
image = np.array(image)
## read the camera to world relative pose
P_c2w = np.concatenate([dataset.poses[idx], np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
P_w2c = np.linalg.inv(P_c2w)[:3] # (3, 4)
## project vertices from world coordinate to camera coordinate
vertices_cam = (P_w2c @ vertices_homo.T) # (3, N) in "right up back"
vertices_cam[1:] *= -1 # (3, N) in "right down forward"
## project vertices from camera coordinate to pixel coordinate
vertices_image = (K @ vertices_cam).T # (N, 3)
depth = vertices_image[:, -1:]+1e-5 # the depth of the vertices, used as far plane
vertices_image = vertices_image[:, :2]/depth
vertices_image = vertices_image.astype(np.float32)
vertices_image[:, 0] = np.clip(vertices_image[:, 0], 0, W-1)
vertices_image[:, 1] = np.clip(vertices_image[:, 1], 0, H-1)
## compute the color on these projected pixel coordinates
## using bilinear interpolation.
## NOTE: opencv's implementation has a size limit of 32768 pixels per side,
## so we split the input into chunks.
colors = []
remap_chunk = int(3e4)
for i in range(0, N_vertices, remap_chunk):
colors += [cv2.remap(image,
vertices_image[i:i+remap_chunk, 0],
vertices_image[i:i+remap_chunk, 1],
interpolation=cv2.INTER_LINEAR)[:, 0]]
colors = np.vstack(colors) # (N_vertices, 3)
## predict occlusion of each vertex
## we leverage the concept of NeRF by constructing rays coming out from the camera
## and hitting each vertex; by computing the accumulated opacity along this path,
## we can know if the vertex is occluded or not.
## for vertices that appear to be occluded from every input view, we make the
## assumption that its color is the same as its neighbors that are facing our side.
## (think of a surface with one side facing us: we assume the other side has the same color)
## ray's origin is camera origin
rays_o = torch.FloatTensor(dataset.poses[idx][:, -1]).expand(N_vertices, 3)
## ray's direction is the vector pointing from camera origin to the vertices
rays_d = torch.FloatTensor(vertices_) - rays_o # (N_vertices, 3)
rays_d = rays_d / torch.norm(rays_d, dim=-1, keepdim=True)
near = dataset.bounds.min() * torch.ones_like(rays_o[:, :1])
## the far plane is the depth of the vertices, since what we want is the accumulated
## opacity along the path from camera origin to the vertices
far = torch.FloatTensor(depth) * torch.ones_like(rays_o[:, :1])
results = f([nerf_fine], embeddings,
torch.cat([rays_o, rays_d, near, far], 1).cuda(),
args.N_samples,
0,
args.chunk,
dataset.white_back)
opacity = results['opacity_coarse'].cpu().numpy()[:, np.newaxis] # (N_vertices, 1)
opacity = np.nan_to_num(opacity, 1)
non_occluded = np.ones_like(non_occluded_sum) * 0.1/depth # weight by inverse depth
# near=more confident in color
non_occluded += opacity < args.occ_threshold
v_color_sum += colors * non_occluded
non_occluded_sum += non_occluded
# Step 3. combine the output and write to file
if args.use_vertex_normal:
v_colors = results['rgb_fine'].cpu().numpy() * 255.0
else: ## the combined color is the average color among all views
v_colors = v_color_sum/non_occluded_sum
v_colors = v_colors.astype(np.uint8)
v_colors.dtype = [('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
vertices_.dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4')]
vertex_all = np.empty(N_vertices, vertices_.dtype.descr+v_colors.dtype.descr)
for prop in vertices_.dtype.names:
vertex_all[prop] = vertices_[prop][:, 0]
for prop in v_colors.dtype.names:
vertex_all[prop] = v_colors[prop][:, 0]
face = np.empty(len(triangles), dtype=[('vertex_indices', 'i4', (3,))])
face['vertex_indices'] = triangles
PlyData([PlyElement.describe(vertex_all, 'vertex'),
PlyElement.describe(face, 'face')]).write(f'{args.scene_name}.ply')
print('Done!')
| [
"torch.cat",
"torch.norm",
"torch.FloatTensor",
"torch.zeros_like",
"torch.no_grad",
"torch.ones_like"
] | 1.4.0 | U-sepSick/NeRF | c5910f84321eb5f72e3332507b0384f1b23f51f7 |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 28