version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.1 | import time
import torch
from hpc_rll.origin.rnn import get_lstm
from hpc_rll.torch_utils.network.rnn import LSTM
from testbase import mean_relative_error, times
assert torch.cuda.is_available()
use_cuda = True
seq_len = 64
batch_size = 3
input_size = 1792
hidden_size = 384
num_layers = 3
norm_type = 'LN'
dropout = 0 # 0.1
# Note: need open load_params for hpc_lstm to validation
# Note: only used to case of num_layers = 3
def lstm_val():
ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)
hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)
ori_x = torch.randn(seq_len, batch_size, input_size)
ori_h0 = torch.randn(num_layers, batch_size, hidden_size)
ori_c0 = torch.randn(num_layers, batch_size, hidden_size)
if use_cuda:
ori_x = ori_x.cuda()
ori_h0 = ori_h0.cuda()
ori_c0 = ori_c0.cuda()
ori_lstm = ori_lstm.cuda()
hpc_lstm = hpc_lstm.cuda()
ori_x.requires_grad_(True)
ori_output, ori_next_state = ori_lstm(ori_x, [ori_h0, ori_c0])
ori_loss = ori_output.mean()
ori_loss.backward()
hpc_x = ori_x.clone().detach()
hpc_h0 = ori_h0.clone().detach()
hpc_c0 = ori_c0.clone().detach()
hpc_x.requires_grad_(True)
hpc_output, hpc_next_state = hpc_lstm(hpc_x, [hpc_h0, hpc_c0])
hpc_loss = hpc_output.mean()
hpc_loss.backward()
torch.cuda.synchronize()
mre = mean_relative_error(
torch.flatten(ori_loss).cpu().detach().numpy(),
torch.flatten(hpc_loss).cpu().detach().numpy()
)
print("lstm fp mean_relative_error: " + str(mre))
mre = mean_relative_error(
torch.flatten(ori_x.grad).cpu().detach().numpy(),
torch.flatten(hpc_x.grad).cpu().detach().numpy()
)
print("lstm bp mean_relative_error: " + str(mre))
ori_wx_grad = torch.cat((ori_lstm.wx[0].grad, ori_lstm.wx[1].grad, ori_lstm.wx[2].grad))
hpc_wx_grad = hpc_lstm.wx.grad
mre = mean_relative_error(torch.flatten(ori_wx_grad).cpu().numpy(), torch.flatten(hpc_wx_grad).cpu().numpy())
print("wx grad mean_relative_error: " + str(mre))
ori_wh_grad = torch.cat((ori_lstm.wh[0].grad, ori_lstm.wh[1].grad, ori_lstm.wh[2].grad))
hpc_wh_grad = hpc_lstm.wh.grad
mre = mean_relative_error(torch.flatten(ori_wh_grad).cpu().numpy(), torch.flatten(hpc_wh_grad).cpu().numpy())
print("wh grad mean_relative_error: " + str(mre))
ori_bias_grad = ori_lstm.bias.grad
hpc_bias_grad = hpc_lstm.bias.grad
mre = mean_relative_error(torch.flatten(ori_bias_grad).cpu().numpy(), torch.flatten(hpc_bias_grad).cpu().numpy())
print("bias grad mean_relative_error: " + str(mre))
params = list(ori_lstm.parameters())
gamma_0_x = params[1]
beta_0_x = params[2]
gamma_0_h = params[3]
beta_0_h = params[4]
gamma_1_x = params[5]
beta_1_x = params[6]
gamma_1_h = params[7]
beta_1_h = params[8]
gamma_2_x = params[9]
beta_2_x = params[10]
gamma_2_h = params[11]
beta_2_h = params[12]
ori_gamma_grad = torch.cat(
(gamma_0_x.grad, gamma_0_h.grad, gamma_1_x.grad, gamma_1_h.grad, gamma_2_x.grad, gamma_2_h.grad)
)
ori_beta_grad = torch.cat(
(beta_0_x.grad, beta_0_h.grad, beta_1_x.grad, beta_1_h.grad, beta_2_x.grad, beta_2_h.grad)
)
hpc_gamma_grad = hpc_lstm.ln_gamma.grad
hpc_beta_grad = hpc_lstm.ln_beta.grad
mre = mean_relative_error(torch.flatten(ori_gamma_grad).cpu().numpy(), torch.flatten(hpc_gamma_grad).cpu().numpy())
print("ln gamma grad mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_beta_grad).cpu().numpy(), torch.flatten(hpc_beta_grad).cpu().numpy())
print("ln beta grad mean_relative_error: " + str(mre))
def lstm_perf():
ori_lstm = get_lstm('normal', input_size, hidden_size, num_layers, norm_type, dropout)
hpc_lstm = LSTM(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)
lstms = {'normal': ori_lstm, 'hpc': hpc_lstm}
for lstm_type, lstm in lstms.items():
x = torch.rand(seq_len, batch_size, input_size)
h0 = torch.randn(num_layers, batch_size, hidden_size)
c0 = torch.randn(num_layers, batch_size, hidden_size)
if use_cuda:
x = x.cuda()
h0 = h0.cuda()
c0 = c0.cuda()
lstm = lstm.cuda()
prev_state = [h0, c0]
x.requires_grad_(True)
for i in range(times):
t = time.time()
output, _ = lstm(x, prev_state)
loss = output.mean()
loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, {} lstm cost time: {}'.format(i, lstm_type, time.time() - t))
if __name__ == '__main__':
print(
"target problem: seq_len = {}, batch_size = {}, input_size = {}, hidden_size = {}, num_layers = {}, norm_type = {}, dropout = {}" # noqa
.format(seq_len, batch_size, input_size, hidden_size, num_layers, norm_type, dropout)
)
print("==============lstm has no validation test================")
#print("===============run lstm validation test==================")
#lstm_val()
print("===============run lstm performance test=================")
lstm_perf()
| [
"torch.rand",
"torch.cat",
"torch.cuda.synchronize",
"torch.cuda.is_available",
"torch.flatten",
"torch.randn"
] | 1.1.0 | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 |
1.1 | from typing import Optional, Dict, Union
import copy
import torch
import torch.nn as nn
from ding.utils import SequenceType, MODEL_REGISTRY
from .vac import VAC
@MODEL_REGISTRY.register('ppg')
class PPG(nn.Module):
mode = ['compute_actor', 'compute_critic', 'compute_actor_critic']
def __init__(
self,
obs_shape: Union[int, SequenceType],
action_shape: Union[int, SequenceType],
share_encoder: bool = True,
continuous: bool = False,
encoder_hidden_size_list: SequenceType = [128, 128, 64],
actor_head_hidden_size: int = 64,
actor_head_layer_num: int = 2,
critic_head_hidden_size: int = 64,
critic_head_layer_num: int = 1,
activation: Optional[nn.Module] = nn.ReLU(),
norm_type: Optional[str] = None,
) -> None:
super(PPG, self).__init__()
self.actor_critic = VAC(
obs_shape, action_shape, share_encoder, continuous, encoder_hidden_size_list, actor_head_hidden_size,
actor_head_layer_num, critic_head_hidden_size, critic_head_layer_num, activation, norm_type
)
self.aux_critic = copy.deepcopy(self.actor_critic.critic)
def forward(self, inputs: Union[torch.Tensor, Dict], mode: str) -> Dict:
assert mode in self.mode, "not support forward mode: {}/{}".format(mode, self.mode)
return getattr(self, mode)(inputs)
def compute_actor(self, x: torch.Tensor) -> Dict:
"""
ReturnsKeys:
- necessary: ``logit``
"""
return self.actor_critic(x, mode='compute_actor')
def compute_critic(self, x: torch.Tensor) -> Dict:
"""
ReturnsKeys:
- necessary: ``value``
"""
x = self.aux_critic[0](x) # encoder
x = self.aux_critic[1](x) # head
return {'value': x['pred']}
def compute_actor_critic(self, x: torch.Tensor) -> Dict:
"""
.. note::
``compute_actor_critic`` interface aims to save computation when shares encoder
ReturnsKeys:
- necessary: ``value``, ``logit``
"""
return self.actor_critic(x, mode='compute_actor_critic')
| [
"torch.nn.ReLU"
] | 1.1.0 | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 |
1.1 | import os
import torch
import numpy as np
import nn.vnn as vnn
import collections
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from model.seq2seq import Module as Base
from models.utils.metric import compute_f1, compute_exact
from gen.utils.image_util import decompress_mask
from torchvision import transforms
from PIL import Image
class Module(Base):
def __init__(self, args, vocab):
'''
Seq2Seq agent
'''
super().__init__(args, vocab)
# encoder and self-attention
self.enc = nn.LSTM(args.demb, args.dhid, bidirectional=True, batch_first=True)
self.enc_att = vnn.SelfAttn(args.dhid*2)
# subgoal monitoring
self.subgoal_monitoring = (self.args.pm_aux_loss_wt > 0 or self.args.subgoal_aux_loss_wt > 0)
# frame mask decoder
decoder = vnn.ConvFrameMaskDecoderProgressMonitorFinetune if self.subgoal_monitoring else vnn.ConvFrameMaskDecoder
self.dec = decoder(self.emb_action_low, args.dframe, 2*args.dhid,
pframe=args.pframe,
attn_dropout=args.attn_dropout,
hstate_dropout=args.hstate_dropout,
actor_dropout=args.actor_dropout,
input_dropout=args.input_dropout,
teacher_forcing=args.dec_teacher_forcing)
# dropouts
self.vis_dropout = nn.Dropout(args.vis_dropout)
self.lang_dropout = nn.Dropout(args.lang_dropout, inplace=True)
self.input_dropout = nn.Dropout(args.input_dropout)
# internal states
self.state_t = None
self.e_t = None
self.test_mode = False
# bce reconstruction loss
self.bce_with_logits = torch.nn.BCEWithLogitsLoss(reduction='none')
self.mse_loss = torch.nn.MSELoss(reduction='none')
# paths
self.root_path = os.getcwd()
self.feat_pt = 'feat_conv.pt'
# params
self.max_subgoals = 25
self.max_episode_len = args.max_episode_len
# reset model
self.reset()
def featurize(self, batch, load_mask=True, load_frames=True):
'''
tensorize and pad batch input
'''
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
feat = collections.defaultdict(list)
for ex in batch:
###########
# auxillary
###########
if not self.test_mode:
# subgoal completion supervision
if self.args.subgoal_aux_loss_wt > 0:
feat['subgoals_completed'].append(np.array(ex['num']['low_to_high_idx']) / self.max_subgoals)
# progress monitor supervision
if self.args.pm_aux_loss_wt > 0:
num_actions = len([a for sg in ex['num']['action_low'] for a in sg])
subgoal_progress = [(i+1)/float(num_actions) for i in range(num_actions)]
feat['subgoal_progress'].append(subgoal_progress)
#########
# inputs
#########
# serialize segments
self.serialize_lang_action(ex)
# goal and instr language
lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr']
# zero inputs if specified
lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal
lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr
# append goal + instr
lang_goal_instr = lang_goal + lang_instr
feat['lang_goal_instr'].append(lang_goal_instr)
episode_len = 0
# load Resnet features from disk
if load_frames and not self.test_mode:
root = self.get_task_root(ex)
#im = torch.load(os.path.join(root, self.feat_pt))
im = []
path = "{}/{}".format(root,'raw_images')
imgs = sorted(os.listdir(path))
tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
for img in imgs:
im.append(tfms(Image.open("{}/{}".format(path,img))))
im = torch.stack(im)
num_low_actions = len(ex['plan']['low_actions'])
num_feat_frames = im.shape[0]
if num_low_actions != num_feat_frames:
keep = [None] * len(ex['plan']['low_actions'])
for i, d in enumerate(ex['images']):
# only add frames linked with low-level actions (i.e. skip filler frames like smooth rotations and dish washing)
if keep[d['low_idx']] is None:
keep[d['low_idx']] = im[i]
keep.append(keep[-1]) # stop frame
episode_len = min(self.max_episode_len, len(keep))
keep = keep[:episode_len]
feat['frames'].append(torch.stack(keep, dim=0))
else:
episode_len = min(self.max_episode_len, len(im))
im = im[:episode_len]
feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0)) # add stop frame
#########
# outputs
#########
if self.args.subgoal_aux_loss_wt > 0:
feat['subgoals_completed'][-1] = feat['subgoals_completed'][-1][:episode_len]
if self.args.pm_aux_loss_wt > 0:
feat['subgoal_progress'][-1] = feat['subgoal_progress'][-1][:episode_len]
if not self.test_mode:
# low-level action
feat['action_low'].append([a['action'] for a in ex['num']['action_low']][:episode_len])
# low-level action mask
if load_mask:
feat['action_low_mask'].append([self.decompress_mask(a['mask']) for i,a in enumerate(ex['num']['action_low']) if a['mask'] is not None and i<episode_len])
# low-level valid interact
feat['action_low_valid_interact'].append([a['valid_interact'] for a in ex['num']['action_low']][:episode_len])
# tensorization and padding
for k, v in feat.items():
if k in {'lang_goal_instr'}:
# language embedding and padding
seqs = [torch.tensor(vv, device=device) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
seq_lengths = np.array(list(map(len, v)))
embed_seq = self.emb_word(pad_seq)
packed_input = pack_padded_sequence(embed_seq, seq_lengths, batch_first=True, enforce_sorted=False)
feat[k] = packed_input
elif k in {'action_low_mask'}:
# mask padding
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
feat[k] = seqs
elif k in {'subgoal_progress', 'subgoals_completed'}:
# auxillary padding
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
else:
# default: tensorize and pad sequence
seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
return feat
def serialize_lang_action(self, feat):
'''
append segmented instr language and low-level actions into single sequences
'''
is_serialized = not isinstance(feat['num']['lang_instr'][0], list)
if not is_serialized:
feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc]
if not self.test_mode:
feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group]
def decompress_mask(self, compressed_mask):
'''
decompress mask from json files
'''
mask = np.array(decompress_mask(compressed_mask))
mask = np.expand_dims(mask, axis=0)
return mask
def forward(self, feat, max_decode=300):
cont_lang, enc_lang = self.encode_lang(feat)
state_0 = cont_lang, torch.zeros_like(cont_lang)
frames = self.vis_dropout(feat['frames'])
res = self.dec(enc_lang, frames, max_decode=self.max_episode_len, gold=feat['action_low'], state_0=state_0)
feat.update(res)
return feat
def encode_lang(self, feat):
'''
encode goal+instr language
'''
emb_lang_goal_instr = feat['lang_goal_instr']
self.lang_dropout(emb_lang_goal_instr.data)
enc_lang_goal_instr, _ = self.enc(emb_lang_goal_instr)
enc_lang_goal_instr, _ = pad_packed_sequence(enc_lang_goal_instr, batch_first=True)
self.lang_dropout(enc_lang_goal_instr)
cont_lang_goal_instr = self.enc_att(enc_lang_goal_instr)
return cont_lang_goal_instr, enc_lang_goal_instr
def reset(self):
'''
reset internal states (used for real-time execution during eval)
'''
self.r_state = {
'state_t': None,
'e_t': None,
'cont_lang': None,
'enc_lang': None
}
def step(self, feat, prev_action=None):
'''
forward the model for a single time-step (used for real-time execution during eval)
'''
# encode language features
if self.r_state['cont_lang'] is None and self.r_state['enc_lang'] is None:
self.r_state['cont_lang'], self.r_state['enc_lang'] = self.encode_lang(feat)
# initialize embedding and hidden states
if self.r_state['e_t'] is None and self.r_state['state_t'] is None:
self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_lang'].size(0), 1)
self.r_state['state_t'] = self.r_state['cont_lang'], torch.zeros_like(self.r_state['cont_lang'])
# previous action embedding
e_t = self.embed_action(prev_action) if prev_action is not None else self.r_state['e_t']
# decode and save embedding and hidden states
out_action_low, out_action_low_mask, state_t, *_ = self.dec.step(self.r_state['enc_lang'], feat['frames'][:, 0], e_t=e_t, state_tm1=self.r_state['state_t'])
# save states
self.r_state['state_t'] = state_t
self.r_state['e_t'] = self.dec.emb(out_action_low.max(1)[1])
# output formatting
feat['out_action_low'] = out_action_low.unsqueeze(0)
feat['out_action_low_mask'] = out_action_low_mask.unsqueeze(0)
return feat
def extract_preds(self, out, batch, feat, clean_special_tokens=True):
'''
output processing
'''
pred = {}
for ex, alow, alow_mask in zip(batch, feat['out_action_low'].max(2)[1].tolist(), feat['out_action_low_mask']):
# remove padding tokens
if self.pad in alow:
pad_start_idx = alow.index(self.pad)
alow = alow[:pad_start_idx]
alow_mask = alow_mask[:pad_start_idx]
if clean_special_tokens:
# remove <<stop>> tokens
if self.stop_token in alow:
stop_start_idx = alow.index(self.stop_token)
alow = alow[:stop_start_idx]
alow_mask = alow_mask[:stop_start_idx]
# index to API actions
words = self.vocab['action_low'].index2word(alow)
# sigmoid preds to binary mask
alow_mask = F.sigmoid(alow_mask)
p_mask = [(alow_mask[t] > 0.5).cpu().numpy() for t in range(alow_mask.shape[0])]
task_id_ann = self.get_task_and_ann_id(ex)
pred[task_id_ann] = {
'action_low': ' '.join(words),
'action_low_mask': p_mask,
}
return pred
def embed_action(self, action):
'''
embed low-level action
'''
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
action_num = torch.tensor(self.vocab['action_low'].word2index(action), device=device)
action_emb = self.dec.emb(action_num).unsqueeze(0)
return action_emb
def compute_loss(self, out, batch, feat):
'''
loss function for Seq2Seq agent
'''
losses = dict()
# GT and predictions
p_alow = out['out_action_low'].view(-1, len(self.vocab['action_low']))
l_alow = feat['action_low'].view(-1)
p_alow_mask = out['out_action_low_mask']
valid = feat['action_low_valid_interact']
# action loss
pad_valid = (l_alow != self.pad)
alow_loss = F.cross_entropy(p_alow, l_alow, reduction='none')
alow_loss *= pad_valid.float()
alow_loss = alow_loss.mean()
losses['action_low'] = alow_loss * self.args.action_loss_wt
# mask loss
valid_idxs = valid.view(-1).nonzero().view(-1)
flat_p_alow_mask = p_alow_mask.view(p_alow_mask.shape[0]*p_alow_mask.shape[1], *p_alow_mask.shape[2:])[valid_idxs]
if flat_p_alow_mask.shape[0]!=0:
flat_alow_mask = torch.cat(feat['action_low_mask'], dim=0)
alow_mask_loss = self.weighted_mask_loss(flat_p_alow_mask, flat_alow_mask)
losses['action_low_mask'] = alow_mask_loss * self.args.mask_loss_wt
# subgoal completion loss
if self.args.subgoal_aux_loss_wt > 0:
p_subgoal = feat['out_subgoal'].squeeze(2)
l_subgoal = feat['subgoals_completed']
sg_loss = self.mse_loss(p_subgoal, l_subgoal)
sg_loss = sg_loss.view(-1) * pad_valid.float()
subgoal_loss = sg_loss.mean()
losses['subgoal_aux'] = self.args.subgoal_aux_loss_wt * subgoal_loss
# progress monitoring loss
if self.args.pm_aux_loss_wt > 0:
p_progress = feat['out_progress'].squeeze(2)
l_progress = feat['subgoal_progress']
pg_loss = self.mse_loss(p_progress, l_progress)
pg_loss = pg_loss.view(-1) * pad_valid.float()
progress_loss = pg_loss.mean()
losses['progress_aux'] = self.args.pm_aux_loss_wt * progress_loss
return losses
def weighted_mask_loss(self, pred_masks, gt_masks):
'''
mask loss that accounts for weight-imbalance between 0 and 1 pixels
'''
bce = self.bce_with_logits(pred_masks, gt_masks)
flipped_mask = self.flip_tensor(gt_masks)
inside = (bce * gt_masks).sum() / (gt_masks).sum()
outside = (bce * flipped_mask).sum() / (flipped_mask).sum()
return inside + outside
def flip_tensor(self, tensor, on_zero=1, on_non_zero=0):
'''
flip 0 and 1 values in tensor
'''
res = tensor.clone()
res[tensor == 0] = on_zero
res[tensor != 0] = on_non_zero
return res
def compute_metric(self, preds, data):
'''
compute f1 and extract match scores for output
'''
m = collections.defaultdict(list)
for task in data:
ex = self.load_task_json(task)
i = self.get_task_and_ann_id(ex)
label = ' '.join([a['discrete_action']['action'] for a in ex['plan']['low_actions']])
m['action_low_f1'].append(compute_f1(label.lower(), preds[i]['action_low'].lower()))
m['action_low_em'].append(compute_exact(label.lower(), preds[i]['action_low'].lower()))
return {k: sum(v)/len(v) for k, v in m.items()}
| [
"torch.device",
"torch.nn.Dropout",
"torch.nn.functional.sigmoid",
"torch.nn.LSTM",
"torch.nn.MSELoss",
"torch.cat",
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.nn.functional.cross_entropy",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.zeros_like"
] | 1.1.0 | shivgarg/alfred_transformers | 3eab07d3a218eb9b809dec8b7120b92ebd00c890 |
1.0 | # -*- coding: utf-8 -*-
# file: text_classifier.py
# author: yangheng <[email protected]>
# Copyright (C) 2020. All Rights Reserved.
import json
import os
import pickle
import random
import numpy
import torch
from findfile import find_file
from termcolor import colored
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from pyabsa.functional.dataset import detect_infer_dataset
from ..models import GloVeClassificationModelList, BERTClassificationModelList
from ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset
from ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset
from ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer
from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError
class TextClassifier:
def __init__(self, model_arg=None, label_map=None, eval_batch_size=128):
'''
from_train_model: load inferring_tutorials model from trained model
'''
self.initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
# load from a training
if not isinstance(model_arg, str):
print('Load text classifier from training')
self.model = model_arg[0]
self.opt = model_arg[1]
self.tokenizer = model_arg[2]
else:
try:
if 'fine-tuned' in model_arg:
raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')
print('Load text classifier from', model_arg)
state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])
model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])
tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])
config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])
print('config: {}'.format(config_path))
print('state_dict: {}'.format(state_dict_path))
print('model: {}'.format(model_path))
print('tokenizer: {}'.format(tokenizer_path))
self.opt = pickle.load(open(config_path, mode='rb'))
if state_dict_path or model_path:
if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()):
if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args:
if 'pretrained_bert_name' in self.opt.args:
self.opt.pretrained_bert = self.opt.pretrained_bert_name
if state_dict_path:
try:
self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)
self.model = self.opt.model(self.bert, self.opt)
except ValueError:
raise TransformerConnectionError()
elif model_path:
if model_path:
self.model = torch.load(model_path, map_location='cpu')
if tokenizer_path:
self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))
else:
raise ValueError('No .tokenizer found!')
else:
self.tokenizer = build_tokenizer(
dataset_list=self.opt.dataset_file,
max_seq_len=self.opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
if model_path:
self.model = torch.load(model_path, map_location='cpu')
else:
self.embedding_matrix = build_embedding_matrix(
word2idx=self.tokenizer.word2idx,
embed_dim=self.opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
print('Config used in Training:')
print_args(self.opt, mode=1)
except Exception as e:
raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))
if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \
and not hasattr(BERTClassificationModelList, self.model.__class__.__name__):
raise KeyError('The checkpoint you are loading is not from classifier model.')
if hasattr(BERTClassificationModelList, self.opt.model.__name__):
self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeClassificationModelList, self.opt.model.__name__):
self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
self.opt.inputs_cols = self.model.inputs
self.infer_dataloader = None
self.opt.eval_batch_size = eval_batch_size
if self.opt.seed is not None:
random.seed(self.opt.seed)
numpy.random.seed(self.opt.seed)
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.opt.initializer = self.opt.initializer
self.label_map = None
self.set_label_map(label_map)
def set_label_map(self, label_map):
if label_map:
print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red'))
label_map[LABEL_PADDING] = ''
self.label_map = label_map
def to(self, device=None):
self.opt.device = device
self.model.to(device)
def cpu(self):
self.opt.device = 'cpu'
self.model.to('cpu')
def cuda(self, device='cuda:0'):
self.opt.device = device
self.model.to(device)
def _log_write_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print(
'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
for arg in vars(self.opt):
if getattr(self.opt, arg) is not None:
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def batch_infer(self,
target_file=None,
print_result=True,
save_result=False,
clear_input_samples=True,
ignore_error=True):
if clear_input_samples:
self.clear_input_samples()
save_path = os.path.join(os.getcwd(), 'text_classification.result.json')
target_file = detect_infer_dataset(target_file, task='text_classification')
if not target_file:
raise FileNotFoundError('Can not find inference datasets!')
self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)
return self._infer(save_path=save_path if save_result else None, print_result=print_result)
def infer(self, text: str = None,
print_result=True,
clear_input_samples=True):
if clear_input_samples:
self.clear_input_samples()
if text:
self.dataset.prepare_infer_sample(text)
else:
raise RuntimeError('Please specify your datasets path!')
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)
return self._infer(print_result=print_result)
def merge_results(self, results):
""" merge APC results have the same input text
"""
final_res = []
for result in results:
if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()):
final_res[-1]['label'].append(result['label'])
final_res[-1]['ref_label'].append(result['ref_label'])
final_res[-1]['ref_check'].append(result['ref_check'])
else:
final_res.append(
{
'text': result['text'].replace(' ', ' '),
'label': [result['label']],
'ref_label': [result['ref_label']],
'ref_check': [result['ref_check']]
}
)
return final_res
def _infer(self, save_path=None, print_result=True):
_params = filter(lambda p: p.requires_grad, self.model.parameters())
correct = {True: 'Correct', False: 'Wrong'}
results = []
with torch.no_grad():
self.model.eval()
n_correct = 0
n_labeled = 0
n_total = 0
for _, sample in enumerate(self.infer_dataloader):
inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']
self.model.eval()
outputs = self.model(inputs)
sen_logits = outputs
t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()
for i, i_probs in enumerate(t_probs):
if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)):
sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]
if sample['label'] != -999:
real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')
else:
real_sent = 'N.A.'
if real_sent != -999 and real_sent != '-999':
n_labeled += 1
if sent == real_sent:
n_correct += 1
else: # for the former versions until 1.2.0
sent = int(i_probs.argmax(axis=-1))
real_sent = int(sample['label'][i])
text_raw = sample['text_raw'][i]
results.append({
'text': text_raw,
'label': sent,
'ref_label': real_sent,
'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',
})
n_total += 1
if len(self.infer_dataloader) > 1:
print('Total samples:{}'.format(n_total))
print('Labeled samples:{}'.format(n_labeled))
print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))
try:
if print_result:
for result in results:
text_printing = result['text']
if result['ref_label'] != -999:
if result['label'] == result['ref_label']:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green')
else:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red')
else:
text_info = ' -> {}'.format(result['label'])
text_printing += text_info
print(text_printing)
if save_path:
fout = open(save_path, 'w', encoding='utf8')
json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)
# fout.write('Total samples:{}\n'.format(n_total))
# fout.write('Labeled samples:{}\n'.format(n_labeled))
# fout.write('Prediction Accuracy:{}%\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'
print('inference result saved in: {}'.format(save_path))
except Exception as e:
print('Can not save result: {}, Exception: {}'.format(text_raw, e))
return results
def clear_input_samples(self):
self.dataset.all_data = []
| [
"torch.cuda.manual_seed",
"torch.no_grad",
"torch.softmax",
"torch.manual_seed",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.0 | yangheng95/PyABSA | f5b46047a58fa8054a0469486be3f1cada933814 |
1.4 | from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy, register_policy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
"""
Action-Value (Q-Value) network for DQN
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super(QNetwork, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n # number of actions
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the q-values.
:param obs: Observation
:return: The estimated Q-Value for each action.
"""
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self.forward(observation)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
epsilon=self.epsilon,
)
)
return data
class DQNPolicy(BasePolicy):
"""
Policy class with Q-Value Net and target net for DQN
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(DQNPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [64, 64]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
"""
Policy class for DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)
| [
"torch.nn.Sequential"
] | 1.4.0 | LucasAlegre/stable-baselines3 | 6b598323ae070bb0a998d25230f6e11eca4cbe61 |
1.4 | import io
import os
import pathlib
import warnings
from collections import OrderedDict
from copy import deepcopy
import gym
import numpy as np
import pytest
import torch as th
from stable_baselines3 import A2C, DDPG, DQN, PPO, SAC, TD3
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.identity_env import FakeImageEnv, IdentityEnv, IdentityEnvBox
from stable_baselines3.common.save_util import load_from_pkl, open_path, save_to_pkl
from stable_baselines3.common.utils import get_device
from stable_baselines3.common.vec_env import DummyVecEnv
MODEL_LIST = [PPO, A2C, TD3, SAC, DQN, DDPG]
def select_env(model_class: BaseAlgorithm) -> gym.Env:
"""
Selects an environment with the correct action space as DQN only supports discrete action space
"""
if model_class == DQN:
return IdentityEnv(10)
else:
return IdentityEnvBox(10)
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_save_load(tmp_path, model_class):
"""
Test if 'save' and 'load' saves and loads model correctly
and if 'get_parameters' and 'set_parameters' and work correctly.
''warning does not test function of optimizer parameter load
:param model_class: (BaseAlgorithm) A RL model
"""
env = DummyVecEnv([lambda: select_env(model_class)])
# create model
model = model_class("MlpPolicy", env, policy_kwargs=dict(net_arch=[16]), verbose=1)
model.learn(total_timesteps=500)
env.reset()
observations = np.concatenate([env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0)
# Get parameters of different objects
# deepcopy to avoid referencing to tensors we are about to modify
original_params = deepcopy(model.get_parameters())
# Test different error cases of set_parameters.
# Test that invalid object names throw errors
invalid_object_params = deepcopy(original_params)
invalid_object_params["I_should_not_be_a_valid_object"] = "and_I_am_an_invalid_tensor"
with pytest.raises(ValueError):
model.set_parameters(invalid_object_params, exact_match=True)
with pytest.raises(ValueError):
model.set_parameters(invalid_object_params, exact_match=False)
# Test that exact_match catches when something was missed.
missing_object_params = dict((k, v) for k, v in list(original_params.items())[:-1])
with pytest.raises(ValueError):
model.set_parameters(missing_object_params, exact_match=True)
# Test that exact_match catches when something inside state-dict
# is missing but we have exact_match.
missing_state_dict_tensor_params = {}
for object_name in original_params:
object_params = {}
missing_state_dict_tensor_params[object_name] = object_params
# Skip last item in state-dict
for k, v in list(original_params[object_name].items())[:-1]:
object_params[k] = v
with pytest.raises(RuntimeError):
# PyTorch load_state_dict throws RuntimeError if strict but
# invalid state-dict.
model.set_parameters(missing_state_dict_tensor_params, exact_match=True)
# Test that parameters do indeed change.
random_params = {}
for object_name, params in original_params.items():
# Do not randomize optimizer parameters (custom layout)
if "optim" in object_name:
random_params[object_name] = params
else:
# Again, skip the last item in state-dict
random_params[object_name] = OrderedDict(
(param_name, th.rand_like(param)) for param_name, param in list(params.items())[:-1]
)
# Update model parameters with the new random values
model.set_parameters(random_params, exact_match=False)
new_params = model.get_parameters()
# Check that all params except the final item in each state-dict are different.
for object_name in original_params:
# Skip optimizers (no valid comparison with just th.allclose)
if "optim" in object_name:
continue
# state-dicts use ordered dictionaries, so key order
# is guaranteed.
last_key = list(original_params[object_name].keys())[-1]
for k in original_params[object_name]:
if k == last_key:
# Should be same as before
assert th.allclose(
original_params[object_name][k], new_params[object_name][k]
), "Parameter changed despite not included in the loaded parameters."
else:
# Should be different
assert not th.allclose(
original_params[object_name][k], new_params[object_name][k]
), "Parameters did not change as expected."
params = new_params
# get selected actions
selected_actions, _ = model.predict(observations, deterministic=True)
# Check
model.save(tmp_path / "test_save.zip")
del model
# Check if the model loads as expected for every possible choice of device:
for device in ["auto", "cpu", "cuda"]:
model = model_class.load(str(tmp_path / "test_save.zip"), env=env, device=device)
# check if the model was loaded to the correct device
assert model.device.type == get_device(device).type
assert model.policy.device.type == get_device(device).type
# check if params are still the same after load
new_params = model.get_parameters()
# Check that all params are the same as before save load procedure now
for object_name in new_params:
# Skip optimizers (no valid comparison with just th.allclose)
if "optim" in object_name:
continue
for key in params[object_name]:
assert new_params[object_name][key].device.type == get_device(device).type
assert th.allclose(
params[object_name][key].to("cpu"), new_params[object_name][key].to("cpu")
), "Model parameters not the same after save and load."
# check if model still selects the same actions
new_selected_actions, _ = model.predict(observations, deterministic=True)
assert np.allclose(selected_actions, new_selected_actions, 1e-4)
# check if learn still works
model.learn(total_timesteps=500)
del model
# clear file from os
os.remove(tmp_path / "test_save.zip")
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_set_env(model_class):
"""
Test if set_env function does work correct
:param model_class: (BaseAlgorithm) A RL model
"""
# use discrete for DQN
env = DummyVecEnv([lambda: select_env(model_class)])
env2 = DummyVecEnv([lambda: select_env(model_class)])
env3 = select_env(model_class)
kwargs = {}
if model_class in {DQN, DDPG, SAC, TD3}:
kwargs = dict(learning_starts=100)
elif model_class in {A2C, PPO}:
kwargs = dict(n_steps=100)
# create model
model = model_class("MlpPolicy", env, policy_kwargs=dict(net_arch=[16]), **kwargs)
# learn
model.learn(total_timesteps=300)
# change env
model.set_env(env2)
# learn again
model.learn(total_timesteps=300)
# change env test wrapping
model.set_env(env3)
# learn again
model.learn(total_timesteps=300)
@pytest.mark.parametrize("model_class", MODEL_LIST)
def test_exclude_include_saved_params(tmp_path, model_class):
"""
Test if exclude and include parameters of save() work
:param model_class: (BaseAlgorithm) A RL model
"""
env = DummyVecEnv([lambda: select_env(model_class)])
# create model, set verbose as 2, which is not standard
model = model_class("MlpPolicy", env, policy_kwargs=dict(net_arch=[16]), verbose=2)
# Check if exclude works
model.save(tmp_path / "test_save", exclude=["verbose"])
del model
model = model_class.load(str(tmp_path / "test_save.zip"))
# check if verbose was not saved
assert model.verbose != 2
# set verbose as something different then standard settings
model.verbose = 2
# Check if include works
model.save(tmp_path / "test_save", exclude=["verbose"], include=["verbose"])
del model
model = model_class.load(str(tmp_path / "test_save.zip"))
assert model.verbose == 2
# clear file from os
os.remove(tmp_path / "test_save.zip")
@pytest.mark.parametrize("model_class", [A2C, TD3])
def test_save_load_env_cnn(tmp_path, model_class):
"""
Test loading with an env that requires a ``CnnPolicy``.
This is to test wrapping and observation space check.
We test one on-policy and one off-policy
algorithm as the rest share the loading part.
"""
env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=False)
kwargs = dict(policy_kwargs=dict(net_arch=[32]))
if model_class == TD3:
kwargs.update(dict(buffer_size=100, learning_starts=50))
model = model_class("CnnPolicy", env, **kwargs).learn(100)
model.save(tmp_path / "test_save")
# Test loading with env and continuing training
model = model_class.load(str(tmp_path / "test_save.zip"), env=env).learn(100)
# clear file from os
os.remove(tmp_path / "test_save.zip")
@pytest.mark.parametrize("model_class", [SAC, TD3, DQN])
def test_save_load_replay_buffer(tmp_path, model_class):
path = pathlib.Path(tmp_path / "logs/replay_buffer.pkl")
path.parent.mkdir(exist_ok=True, parents=True) # to not raise a warning
model = model_class(
"MlpPolicy", select_env(model_class), buffer_size=1000, policy_kwargs=dict(net_arch=[64]), learning_starts=200
)
model.learn(300)
old_replay_buffer = deepcopy(model.replay_buffer)
model.save_replay_buffer(path)
model.replay_buffer = None
model.load_replay_buffer(path)
assert np.allclose(old_replay_buffer.observations, model.replay_buffer.observations)
assert np.allclose(old_replay_buffer.actions, model.replay_buffer.actions)
assert np.allclose(old_replay_buffer.rewards, model.replay_buffer.rewards)
assert np.allclose(old_replay_buffer.dones, model.replay_buffer.dones)
# test extending replay buffer
model.replay_buffer.extend(
old_replay_buffer.observations,
old_replay_buffer.observations,
old_replay_buffer.actions,
old_replay_buffer.rewards,
old_replay_buffer.dones,
)
@pytest.mark.parametrize("model_class", [DQN, SAC, TD3])
@pytest.mark.parametrize("optimize_memory_usage", [False, True])
def test_warn_buffer(recwarn, model_class, optimize_memory_usage):
"""
When using memory efficient replay buffer,
a warning must be emitted when calling `.learn()`
multiple times.
See https://github.com/DLR-RM/stable-baselines3/issues/46
"""
# remove gym warnings
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
warnings.filterwarnings(action="ignore", category=UserWarning, module="gym")
model = model_class(
"MlpPolicy",
select_env(model_class),
buffer_size=100,
optimize_memory_usage=optimize_memory_usage,
policy_kwargs=dict(net_arch=[64]),
learning_starts=10,
)
model.learn(150)
model.learn(150, reset_num_timesteps=False)
# Check that there is no warning
assert len(recwarn) == 0
model.learn(150)
if optimize_memory_usage:
assert len(recwarn) == 1
warning = recwarn.pop(UserWarning)
assert "The last trajectory in the replay buffer will be truncated" in str(warning.message)
else:
assert len(recwarn) == 0
@pytest.mark.parametrize("model_class", MODEL_LIST)
@pytest.mark.parametrize("policy_str", ["MlpPolicy", "CnnPolicy"])
def test_save_load_policy(tmp_path, model_class, policy_str):
"""
Test saving and loading policy only.
:param model_class: (BaseAlgorithm) A RL model
:param policy_str: (str) Name of the policy.
"""
kwargs = dict(policy_kwargs=dict(net_arch=[16]))
if policy_str == "MlpPolicy":
env = select_env(model_class)
else:
if model_class in [SAC, TD3, DQN, DDPG]:
# Avoid memory error when using replay buffer
# Reduce the size of the features
kwargs = dict(
buffer_size=250, learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32))
)
env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == DQN)
env = DummyVecEnv([lambda: env])
# create model
model = model_class(policy_str, env, verbose=1, **kwargs)
model.learn(total_timesteps=300)
env.reset()
observations = np.concatenate([env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0)
policy = model.policy
policy_class = policy.__class__
actor, actor_class = None, None
if model_class in [SAC, TD3]:
actor = policy.actor
actor_class = actor.__class__
# Get dictionary of current parameters
params = deepcopy(policy.state_dict())
# Modify all parameters to be random values
random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items())
# Update model parameters with the new random values
policy.load_state_dict(random_params)
new_params = policy.state_dict()
# Check that all params are different now
for k in params:
assert not th.allclose(params[k], new_params[k]), "Parameters did not change as expected."
params = new_params
# get selected actions
selected_actions, _ = policy.predict(observations, deterministic=True)
# Should also work with the actor only
if actor is not None:
selected_actions_actor, _ = actor.predict(observations, deterministic=True)
# Save and load policy
policy.save(tmp_path / "policy.pkl")
# Save and load actor
if actor is not None:
actor.save(tmp_path / "actor.pkl")
del policy, actor
policy = policy_class.load(tmp_path / "policy.pkl")
if actor_class is not None:
actor = actor_class.load(tmp_path / "actor.pkl")
# check if params are still the same after load
new_params = policy.state_dict()
# Check that all params are the same as before save load procedure now
for key in params:
assert th.allclose(params[key], new_params[key]), "Policy parameters not the same after save and load."
# check if model still selects the same actions
new_selected_actions, _ = policy.predict(observations, deterministic=True)
assert np.allclose(selected_actions, new_selected_actions, 1e-4)
if actor_class is not None:
new_selected_actions_actor, _ = actor.predict(observations, deterministic=True)
assert np.allclose(selected_actions_actor, new_selected_actions_actor, 1e-4)
assert np.allclose(selected_actions_actor, new_selected_actions, 1e-4)
# clear file from os
os.remove(tmp_path / "policy.pkl")
if actor_class is not None:
os.remove(tmp_path / "actor.pkl")
@pytest.mark.parametrize("pathtype", [str, pathlib.Path])
def test_open_file_str_pathlib(tmp_path, pathtype):
# check that suffix isn't added because we used open_path first
with open_path(pathtype(f"{tmp_path}/t1"), "w") as fp1:
save_to_pkl(fp1, "foo")
assert fp1.closed
with pytest.warns(None) as record:
assert load_from_pkl(pathtype(f"{tmp_path}/t1")) == "foo"
assert not record
# test custom suffix
with open_path(pathtype(f"{tmp_path}/t1.custom_ext"), "w") as fp1:
save_to_pkl(fp1, "foo")
assert fp1.closed
with pytest.warns(None) as record:
assert load_from_pkl(pathtype(f"{tmp_path}/t1.custom_ext")) == "foo"
assert not record
# test without suffix
with open_path(pathtype(f"{tmp_path}/t1"), "w", suffix="pkl") as fp1:
save_to_pkl(fp1, "foo")
assert fp1.closed
with pytest.warns(None) as record:
assert load_from_pkl(pathtype(f"{tmp_path}/t1.pkl")) == "foo"
assert not record
# test that a warning is raised when the path doesn't exist
with open_path(pathtype(f"{tmp_path}/t2.pkl"), "w") as fp1:
save_to_pkl(fp1, "foo")
assert fp1.closed
with pytest.warns(None) as record:
assert load_from_pkl(open_path(pathtype(f"{tmp_path}/t2"), "r", suffix="pkl")) == "foo"
assert len(record) == 0
with pytest.warns(None) as record:
assert load_from_pkl(open_path(pathtype(f"{tmp_path}/t2"), "r", suffix="pkl", verbose=2)) == "foo"
assert len(record) == 1
fp = pathlib.Path(f"{tmp_path}/t2").open("w")
fp.write("rubbish")
fp.close()
# test that a warning is only raised when verbose = 0
with pytest.warns(None) as record:
open_path(pathtype(f"{tmp_path}/t2"), "w", suffix="pkl", verbose=0).close()
open_path(pathtype(f"{tmp_path}/t2"), "w", suffix="pkl", verbose=1).close()
open_path(pathtype(f"{tmp_path}/t2"), "w", suffix="pkl", verbose=2).close()
assert len(record) == 1
def test_open_file(tmp_path):
# path must much the type
with pytest.raises(TypeError):
open_path(123, None, None, None)
p1 = tmp_path / "test1"
fp = p1.open("wb")
# provided path must match the mode
with pytest.raises(ValueError):
open_path(fp, "r")
with pytest.raises(ValueError):
open_path(fp, "randomstuff")
# test identity
_ = open_path(fp, "w")
assert _ is not None
assert fp is _
# Can't use a closed path
with pytest.raises(ValueError):
fp.close()
open_path(fp, "w")
buff = io.BytesIO()
assert buff.writable()
assert buff.readable() is ("w" == "w")
_ = open_path(buff, "w")
assert _ is buff
with pytest.raises(ValueError):
buff.close()
open_path(buff, "w")
| [
"torch.allclose",
"torch.rand_like"
] | 1.4.0 | LucasAlegre/stable-baselines3 | 6b598323ae070bb0a998d25230f6e11eca4cbe61 |
1.5 | import torch
import torch.distributions as td
class GMM2D(td.MixtureSameFamily):
def __init__(self, mixture_distribution, component_distribution):
super(GMM2D, self).__init__(mixture_distribution, component_distribution)
def mode_mode(self):
mode_k = torch.argmax(self.mixture_distribution.probs[0, 0]).item()
mode_gaussian = self.component_distribution.mean[:, 0, mode_k, :2]
return mode_gaussian
def position_log_prob(self, x):
# Computing the log probability over only the positions.
component_dist = td.MultivariateNormal(loc=self.component_distribution.mean[..., :2],
scale_tril=self.component_distribution.scale_tril[..., :2, :2])
position_dist = td.MixtureSameFamily(self.mixture_distribution, component_dist)
return position_dist.log_prob(x)
@property
def pis(self):
return self.mixture_distribution.probs[0, 0]
| [
"torch.distributions.MultivariateNormal",
"torch.argmax",
"torch.distributions.MixtureSameFamily"
] | 1.5.0 | StanfordASL/MATS | b31a86eb56728fc6025c71c7202ab425b078e3e5 |
1.0 | """
Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb
"""
import torch
from torch.optim import Optimizer
from colossalai.registry import OPTIMIZERS
@OPTIMIZERS.register_module
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
# * math.sqrt(bias_correction2) / bias_correction1
step_size = group['lr']
weight_norm = p.data.pow(2).sum().sqrt()
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(p.data, alpha=group['weight_decay'])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
| [
"torch.zeros_like"
] | 1.0 | xdjiangkai/ColossalAI | 4a3d3446b04065fa1c89b78cba673e96115c6325 |
1.1 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import distributed
from models.reporter_ext import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
def build_trainer(args, device_id, model, optim):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
# print(tr)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
# Basic attributes.
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
# Set model in training mode.
if (model):
self.model.train()
def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
# step = self.optim._step + 1
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
true_batchs.append(batch)
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.n_gpu > 1:
normalization = sum(distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):
self._save(step)
step += 1
if step > train_steps:
break
train_iter = train_iter_fct()
return total_stats
def validate(self, valid_iter, step=0):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in valid_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
self._report_step(0, step, valid_stats=stats)
return stats
def test(self, test_iter, step, cal_lead=False, cal_oracle=False):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
can_path = '%s_step%d.candidate' % (self.args.result_path, step)
gold_path = '%s_step%d.gold' % (self.args.result_path, step)
##
src_path = '%s_step%d.src' % (self.args.result_path, step)
f = open(src_path, 'w')
##
sent_no = 0
with open(can_path, 'w') as save_pred:
with open(gold_path, 'w') as save_gold:
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
gold = []
pred = []
src_fix = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
if labels.float().size()[1] != 0:
loss = self.loss(sent_scores, labels.float())
else:
continue
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
if len(selected_ids[0]) < 7:
continue
# selected_ids = np.sort(selected_ids,1)
for i, idx in enumerate(selected_ids):
_pred = []
if (len(batch.src_str[i]) == 0):
continue
for j in selected_ids[i][:len(batch.src_str[i])]:
if (j >= len(batch.src_str[i])):
continue
candidate = batch.src_str[i][j].strip()
if (self.args.block_trigram):
if (not _block_tri(candidate, _pred)):
_pred.append(candidate)
else:
_pred.append(candidate)
if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):
break
_pred = '<q>'.join(_pred)
if (self.args.recall_eval):
_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])
pred.append(_pred)
gold.append(batch.tgt_str[i])
src_fix.append(batch.src_str[i])
sent_no += 1
# print(sent_no)
# print('gold', gold)
# print(gold_path)
for i in range(len(gold)):
save_gold.write(str(sent_no) + "_" + str(i) + ': ' + gold[i].strip() + '\n')
for i in range(len(pred)):
save_pred.write(str(sent_no) + "_" + str(i) + ': ' + pred[i].strip() + '\n')
for i in range(len(pred)):
f.write(str(sent_no) + "_" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\n')
f.close()
if (step != -1 and self.args.report_rouge):
rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
self._report_step(0, step, valid_stats=stats)
return stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
# loss.div(float(normalization)).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
# real_generator = (self.generator.module
# if isinstance(self.generator, torch.nn.DataParallel)
# else self.generator)
model_state_dict = real_model.state_dict()
# generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
# 'generator': generator_state_dict,
'opt': self.args,
'optims': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step)
| [
"torch.no_grad",
"torch.save",
"torch.nn.BCELoss"
] | 1.1.0 | Katarina11/PreSumm | 616e72f038d512e9e9112af375d66a0b2e3db6cd |
0.4 | """
Common routines for models in PyTorch.
"""
__all__ = ['HSwish', 'get_activation_layer', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'IBN', 'Identity', 'DualPathSequential',
'Concurrent', 'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'Flatten']
import math
from inspect import isfunction
import torch
import torch.nn as nn
import torch.nn.functional as F
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
def depthwise_conv3x3(channels,
stride):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=1,
groups=channels,
bias=False)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
groups=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
padding=3,
bias=False,
activation=(lambda: nn.ReLU(inplace=True))):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
stride=stride,
padding=padding,
bias=bias,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
padding=2,
dilation=1,
bias=False,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return conv5x5_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
bn_eps=bn_eps,
activation=activation)
class PreConvBlock(nn.Module):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.bn = nn.BatchNorm2d(num_features=in_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
def forward(self, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
bias=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
bias : bool, default False
Whether the layer uses a bias vector.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
bias=bias,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
return_preact=return_preact,
activate=activate)
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
# assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
class ChannelShuffle2(nn.Module):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
# assert (channels % groups == 0)
if channels % groups != 0:
raise ValueError('channels must be divisible by groups')
self.groups = groups
def forward(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
activation : function, or str, or nn.Module
Activation function or name of activation function.
"""
def __init__(self,
channels,
reduction=16,
approx_sigmoid=False,
activation=(lambda: nn.ReLU(inplace=True))):
super(SEBlock, self).__init__()
mid_cannels = channels // reduction
self.pool = nn.AdaptiveAvgPool2d(output_size=1)
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_cannels,
bias=True)
self.activ = get_activation_layer(activation)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=channels,
bias=True)
self.sigmoid = HSigmoid() if approx_sigmoid else nn.Sigmoid()
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
class IBN(nn.Module):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
first_fraction=0.5,
inst_first=True):
super(IBN, self).__init__()
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm2d(
num_features=h1_channels,
affine=True)
self.batch_norm = nn.BatchNorm2d(num_features=h2_channels)
else:
self.batch_norm = nn.BatchNorm2d(num_features=h1_channels)
self.inst_norm = nn.InstanceNorm2d(
num_features=h2_channels,
affine=True)
def forward(self, x):
x1, x2 = torch.split(x, split_size_or_sections=self.split_sections, dim=1)
if self.inst_first:
x1 = self.inst_norm(x1.contiguous())
x2 = self.batch_norm(x2.contiguous())
else:
x1 = self.batch_norm(x1.contiguous())
x2 = self.inst_norm(x2.contiguous())
x = torch.cat((x1, x2), dim=1)
return x
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class DualPathSequential(nn.Sequential):
"""
A sequential container for modules with dual inputs/outputs.
Modules will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first modules with single input/output.
last_ordinals : int, default 0
Number of the final modules with single input/output.
dual_path_scheme : function
Scheme of dual path response for a module.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal module.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def forward(self, x1, x2=None):
length = len(self._modules.values())
for i, module in enumerate(self._modules.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(module, x1, x2)
else:
x1, x2 = self.dual_path_scheme(module, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(nn.Sequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False):
super(Concurrent, self).__init__()
self.axis = axis
self.stack = stack
def forward(self, x):
out = []
for module in self._modules.values():
out.append(module(x))
if self.stack:
out = torch.stack(tuple(out), dim=self.axis)
else:
out = torch.cat(tuple(out), dim=self.axis)
return out
class ParametricSequential(nn.Sequential):
"""
A sequential container for modules with parameters.
Modules will be executed in the order they are added.
"""
def __init__(self, *args):
super(ParametricSequential, self).__init__(*args)
def forward(self, x, **kwargs):
for module in self._modules.values():
x = module(x, **kwargs)
return x
class ParametricConcurrent(nn.Sequential):
"""
A container for concatenation of modules with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def forward(self, x, **kwargs):
out = []
for module in self._modules.values():
out.append(module(x, **kwargs))
out = torch.cat(tuple(out), dim=self.axis)
return out
class Hourglass(nn.Module):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.Sequential
Down modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip_seq : nn.Sequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
assert (len(up_seq) == len(down_seq))
assert (len(skip_seq) == len(down_seq))
assert (merge_type in ["add"])
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.depth = len(down_seq)
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def forward(self, x, **kwargs):
y = None
down_outs = [x]
for down_module in self.down_seq._modules.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
if (y is not None) and (self.merge_type == "add"):
x = x + y
if i != len(down_outs) - 1:
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(nn.Module):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'con'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = torch.cat((x, y), dim=1)
elif self.merge_type == "add":
x = x + y
return x
def forward(self, x, **kwargs):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(nn.Sequential):
"""
A sequential container with multiple outputs.
Modules will be executed in the order they are added.
"""
def __init__(self):
super(MultiOutputSequential, self).__init__()
def forward(self, x):
outs = []
for module in self._modules.values():
x = module(x)
if hasattr(module, "do_output") and module.do_output:
outs.append(x)
return [x] + outs
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
| [
"torch.sigmoid",
"torch.cat",
"torch.nn.functional.relu6",
"torch.nn.Sigmoid",
"torch.nn.BatchNorm2d",
"torch.split",
"torch.transpose",
"torch.nn.ReLU",
"torch.nn.ReLU6",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.AdaptiveAvgPool2d"
] | 0.4.0 | HyperGAN/imgclsmob | 88b9776a5a927dc9a54e85e31978c4a9ec5ecbf3 |
1.6 | ######################################
#######ORIGINAL IMPLEMENTATION########
######################################
# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py
# This code is copied directly from the official implementation
# so that we can make sure our implementation returns the same result.
# It's copied under the MIT license.
import torch
from torch.autograd import Variable
def softBinning(D, mid, Delta):
y = 1 - torch.abs(D - mid) / Delta
return torch.max(torch.tensor([0], dtype=D.dtype).to(D.device), y)
def dSoftBinning(D, mid, Delta):
side1 = (D > (mid - Delta)).type(D.dtype)
side2 = (D <= mid).type(D.dtype)
ind1 = side1 * side2 # .type(torch.uint8)
side1 = (D > mid).type(D.dtype)
side2 = (D <= (mid + Delta)).type(D.dtype)
ind2 = side1 * side2 # .type(torch.uint8)
return (ind1 - ind2) / Delta
######################################
#######ORIGINAL IMPLEMENTATION########
######################################
# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py
# This code is copied directly from the official implementation
# so that we can make sure our implementation returns the same result.
# It's copied under the MIT license.
class OriginalImplementationFastAP(torch.autograd.Function):
"""
FastAP - autograd function definition
This class implements the FastAP loss from the following paper:
"Deep Metric Learning to Rank",
F. Cakir, K. He, X. Xia, B. Kulis, S. Sclaroff. CVPR 2019
NOTE:
Given a input batch, FastAP does not sample triplets from it as it's not
a triplet-based method. Therefore, FastAP does not take a Sampler as input.
Rather, we specify how the input batch is selected.
"""
@staticmethod
def forward(ctx, input, target, num_bins):
"""
Args:
input: torch.Tensor(N x embed_dim), embedding matrix
target: torch.Tensor(N x 1), class labels
num_bins: int, number of bins in distance histogram
"""
N = target.size()[0]
assert input.size()[0] == N, "Batch size donesn't match!"
# 1. get affinity matrix
Y = target.unsqueeze(1)
Aff = 2 * (Y == Y.t()).type(input.dtype) - 1
Aff.masked_fill_(
torch.eye(N, N).bool().to(input.device), 0
) # set diagonal to 0
I_pos = (Aff > 0).type(input.dtype).to(input.device)
I_neg = (Aff < 0).type(input.dtype).to(input.device)
N_pos = torch.sum(I_pos, 1)
# 2. compute distances from embeddings
# squared Euclidean distance with range [0,4]
dist2 = 2 - 2 * torch.mm(input, input.t())
# 3. estimate discrete histograms
Delta = torch.tensor(4.0 / num_bins).to(input.device)
Z = torch.linspace(0.0, 4.0, steps=num_bins + 1).to(input.device)
L = Z.size()[0]
h_pos = torch.zeros((N, L), dtype=input.dtype).to(input.device)
h_neg = torch.zeros((N, L), dtype=input.dtype).to(input.device)
for l in range(L):
pulse = softBinning(dist2, Z[l], Delta)
h_pos[:, l] = torch.sum(pulse * I_pos, 1)
h_neg[:, l] = torch.sum(pulse * I_neg, 1)
H_pos = torch.cumsum(h_pos, 1)
h = h_pos + h_neg
H = torch.cumsum(h, 1)
# 4. compate FastAP
FastAP = h_pos * H_pos / H
FastAP[torch.isnan(FastAP) | torch.isinf(FastAP)] = 0
FastAP = torch.sum(FastAP, 1) / N_pos
FastAP = FastAP[~torch.isnan(FastAP)]
loss = 1 - torch.mean(FastAP)
# 6. save for backward
ctx.save_for_backward(input, target)
ctx.Z = Z
ctx.Delta = Delta
ctx.dist2 = dist2
ctx.I_pos = I_pos
ctx.I_neg = I_neg
ctx.h_pos = h_pos
ctx.h_neg = h_neg
ctx.H_pos = H_pos
ctx.N_pos = N_pos
ctx.h = h
ctx.H = H
ctx.L = torch.tensor(L)
return loss
@staticmethod
def backward(ctx, grad_output):
input, target = ctx.saved_tensors
Z = Variable(ctx.Z, requires_grad=False)
Delta = Variable(ctx.Delta, requires_grad=False)
dist2 = Variable(ctx.dist2, requires_grad=False)
I_pos = Variable(ctx.I_pos, requires_grad=False)
I_neg = Variable(ctx.I_neg, requires_grad=False)
h = Variable(ctx.h, requires_grad=False)
H = Variable(ctx.H, requires_grad=False)
h_pos = Variable(ctx.h_pos, requires_grad=False)
h_neg = Variable(ctx.h_neg, requires_grad=False)
H_pos = Variable(ctx.H_pos, requires_grad=False)
N_pos = Variable(ctx.N_pos, requires_grad=False)
L = Z.size()[0]
H2 = torch.pow(H, 2)
H_neg = H - H_pos
# 1. d(FastAP)/d(h+)
LTM1 = torch.tril(torch.ones(L, L), -1) # lower traingular matrix
tmp1 = h_pos * H_neg / H2
tmp1[torch.isnan(tmp1)] = 0
d_AP_h_pos = (H_pos * H + h_pos * H_neg) / H2
d_AP_h_pos = d_AP_h_pos + torch.mm(tmp1, LTM1.cuda())
d_AP_h_pos = d_AP_h_pos / N_pos.repeat(L, 1).t()
d_AP_h_pos[torch.isnan(d_AP_h_pos) | torch.isinf(d_AP_h_pos)] = 0
# 2. d(FastAP)/d(h-)
LTM0 = torch.tril(torch.ones(L, L), 0) # lower triangular matrix
tmp2 = -h_pos * H_pos / H2
tmp2[torch.isnan(tmp2)] = 0
d_AP_h_neg = torch.mm(tmp2, LTM0.cuda())
d_AP_h_neg = d_AP_h_neg / N_pos.repeat(L, 1).t()
d_AP_h_neg[torch.isnan(d_AP_h_neg) | torch.isinf(d_AP_h_neg)] = 0
# 3. d(FastAP)/d(embedding)
d_AP_x = 0
for l in range(L):
dpulse = dSoftBinning(dist2, Z[l], Delta)
dpulse[torch.isnan(dpulse) | torch.isinf(dpulse)] = 0
ddp = dpulse * I_pos
ddn = dpulse * I_neg
alpha_p = torch.diag(d_AP_h_pos[:, l]) # N*N
alpha_n = torch.diag(d_AP_h_neg[:, l])
Ap = torch.mm(ddp, alpha_p) + torch.mm(alpha_p, ddp)
An = torch.mm(ddn, alpha_n) + torch.mm(alpha_n, ddn)
# accumulate gradient
d_AP_x = d_AP_x - torch.mm(input.t(), (Ap + An))
grad_input = -d_AP_x
return grad_input.t(), None, None
######################################
#######ORIGINAL IMPLEMENTATION########
######################################
# FROM https://github.com/kunhe/FastAP-metric-learning/blob/master/pytorch/FastAP_loss.py
# This code is copied directly from the official implementation
# so that we can make sure our implementation returns the same result.
# It's copied under the MIT license.
class OriginalImplementationFastAPLoss(torch.nn.Module):
"""
FastAP - loss layer definition
This class implements the FastAP loss from the following paper:
"Deep Metric Learning to Rank",
F. Cakir, K. He, X. Xia, B. Kulis, S. Sclaroff. CVPR 2019
"""
def __init__(self, num_bins=10):
super(OriginalImplementationFastAPLoss, self).__init__()
self.num_bins = num_bins
def forward(self, batch, labels):
return OriginalImplementationFastAP.apply(batch, labels, self.num_bins)
### Testing this library's implementation ###
import unittest
from pytorch_metric_learning.losses import FastAPLoss
from .. import TEST_DEVICE, TEST_DTYPES
from ..zzz_testing_utils.testing_utils import angle_to_coord
class TestFastAPLoss(unittest.TestCase):
def test_fast_ap_loss(self):
num_bins = 5
loss_func = FastAPLoss(num_bins)
original_loss_func = OriginalImplementationFastAPLoss(num_bins)
ref_emb = torch.randn(32, 32)
ref_labels = torch.randint(0, 10, (32,))
for dtype in TEST_DTYPES:
embedding_angles = torch.arange(0, 180)
embeddings = torch.tensor(
[angle_to_coord(a) for a in embedding_angles],
requires_grad=True,
dtype=dtype,
).to(
TEST_DEVICE
) # 2D embeddings
labels = torch.randint(low=0, high=10, size=(180,)).to(TEST_DEVICE)
loss = loss_func(embeddings, labels)
loss.backward()
original_loss = original_loss_func(
torch.nn.functional.normalize(embeddings), labels
)
rtol = 1e-2 if dtype == torch.float16 else 1e-5
self.assertTrue(torch.isclose(loss, original_loss, rtol=rtol))
# fastap doesn't support ref_emb
self.assertRaises(
ValueError,
lambda: loss_func(
embeddings, labels, ref_emb=ref_emb, ref_labels=ref_labels
),
)
| [
"torch.isnan",
"torch.ones",
"torch.eye",
"torch.sum",
"torch.autograd.Variable",
"torch.abs",
"torch.randint",
"torch.tensor",
"torch.zeros",
"torch.linspace",
"torch.mm",
"torch.randn",
"torch.isclose",
"torch.isinf",
"torch.cumsum",
"torch.pow",
"torch.nn.functional.normalize",
"torch.arange",
"torch.diag",
"torch.mean"
] | 1.6.0 | cwkeam/pytorch-metric-learning | 63e4ecb781c5735ad714f61a3eecc55f72496905 |
1.6 | import unittest
import numpy as np
import torch
from pytorch_metric_learning.distances import CosineSimilarity, LpDistance
from pytorch_metric_learning.miners import BatchHardMiner
from .. import TEST_DEVICE, TEST_DTYPES, WITH_COLLECT_STATS
from ..zzz_testing_utils.testing_utils import angle_to_coord
class TestBatchHardMiner(unittest.TestCase):
@classmethod
def setUpClass(self):
self.dist_miner = BatchHardMiner(
distance=LpDistance(normalize_embeddings=False)
)
self.normalized_dist_miner = BatchHardMiner(
distance=LpDistance(normalize_embeddings=True)
)
self.normalized_dist_miner_squared = BatchHardMiner(
distance=LpDistance(normalize_embeddings=True, power=2)
)
self.sim_miner = BatchHardMiner(distance=CosineSimilarity())
self.labels = torch.LongTensor([0, 0, 1, 1, 0, 2, 1, 1, 1])
self.correct_a = torch.LongTensor([0, 1, 2, 3, 4, 6, 7, 8]).to(TEST_DEVICE)
self.correct_p = torch.LongTensor([4, 4, 8, 8, 0, 2, 2, 2]).to(TEST_DEVICE)
self.correct_n = [
torch.LongTensor([2, 2, 1, 4, 3, 5, 5, 5]).to(TEST_DEVICE),
torch.LongTensor([2, 2, 1, 4, 5, 5, 5, 5]).to(TEST_DEVICE),
]
@classmethod
def tearDown(self):
torch.cuda.empty_cache()
def test_dist_mining(self):
for dtype in TEST_DTYPES:
embeddings = torch.arange(9).type(dtype).unsqueeze(1).to(TEST_DEVICE)
a, p, n = self.dist_miner(embeddings, self.labels)
self.helper(a, p, n)
if WITH_COLLECT_STATS:
self.assertTrue(self.dist_miner.hardest_pos_pair == 6)
self.assertTrue(self.dist_miner.hardest_neg_pair == 1)
def test_normalized_dist_mining(self):
for dtype in TEST_DTYPES:
angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]
embeddings = torch.tensor(
[angle_to_coord(a, normalized=True) for a in angles], dtype=dtype
).to(TEST_DEVICE)
a, p, n = self.normalized_dist_miner(embeddings, self.labels)
self.helper(a, p, n)
correct_hardest_pos_pair = torch.sqrt(
torch.sum((embeddings[2] - embeddings[8]) ** 2)
).item()
correct_hardest_neg_pair = torch.sqrt(
torch.sum((embeddings[1] - embeddings[2]) ** 2)
).item()
places = 2 if dtype == torch.float16 else 5
if WITH_COLLECT_STATS:
self.assertAlmostEqual(
self.normalized_dist_miner.hardest_pos_pair,
correct_hardest_pos_pair,
places=places,
)
self.assertAlmostEqual(
self.normalized_dist_miner.hardest_neg_pair,
correct_hardest_neg_pair,
places=places,
)
def test_normalized_dist_squared_mining(self):
for dtype in TEST_DTYPES:
angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]
embeddings = torch.tensor(
[angle_to_coord(a, normalized=True) for a in angles], dtype=dtype
).to(TEST_DEVICE)
a, p, n = self.normalized_dist_miner_squared(embeddings, self.labels)
self.helper(a, p, n)
correct_hardest_pos_pair = torch.sum(
(embeddings[2] - embeddings[8]) ** 2
).item()
correct_hardest_neg_pair = torch.sum(
(embeddings[1] - embeddings[2]) ** 2
).item()
places = 2 if dtype == torch.float16 else 5
if WITH_COLLECT_STATS:
self.assertAlmostEqual(
self.normalized_dist_miner_squared.hardest_pos_pair,
correct_hardest_pos_pair,
places=places,
)
self.assertAlmostEqual(
self.normalized_dist_miner_squared.hardest_neg_pair,
correct_hardest_neg_pair,
places=places,
)
def test_sim_mining(self):
for dtype in TEST_DTYPES:
angles = [0, 20, 40, 60, 80, 100, 120, 140, 160]
embeddings = torch.tensor(
[angle_to_coord(a, normalized=True) for a in angles], dtype=dtype
).to(TEST_DEVICE)
a, p, n = self.sim_miner(embeddings, self.labels)
self.helper(a, p, n)
places = 2 if dtype == torch.float16 else 5
if WITH_COLLECT_STATS:
self.assertAlmostEqual(
self.sim_miner.hardest_pos_pair,
np.cos(np.radians(120)),
places=places,
)
self.assertAlmostEqual(
self.sim_miner.hardest_neg_pair,
np.cos(np.radians(20)),
places=places,
)
def helper(self, a, p, n):
self.assertTrue(torch.equal(a, self.correct_a))
self.assertTrue(torch.equal(p, self.correct_p))
self.assertTrue(any(torch.equal(n, cn) for cn in self.correct_n))
def test_empty_output(self):
batch_size = 32
for dtype in TEST_DTYPES:
embeddings = torch.randn(batch_size, 64).type(dtype).to(TEST_DEVICE)
labels = torch.arange(batch_size)
for miner in [
self.dist_miner,
self.normalized_dist_miner,
self.normalized_dist_miner_squared,
self.sim_miner,
]:
a, p, n = miner(embeddings, labels)
self.assertTrue(len(a) == 0)
self.assertTrue(len(p) == 0)
self.assertTrue(len(n) == 0)
if WITH_COLLECT_STATS:
self.assertTrue(miner.hardest_pos_pair == 0)
self.assertTrue(miner.hardest_neg_pair == 0)
self.assertTrue(miner.hardest_triplet == 0)
| [
"torch.arange",
"torch.cuda.empty_cache",
"torch.LongTensor",
"torch.equal",
"torch.randn",
"torch.sum"
] | 1.6.0 | cwkeam/pytorch-metric-learning | 63e4ecb781c5735ad714f61a3eecc55f72496905 |
1.6 | import math
import numpy as np
import scipy.special
import torch
from ..distances import CosineSimilarity
from ..utils import common_functions as c_f
from ..utils import loss_and_miner_utils as lmu
from .base_metric_loss_function import BaseMetricLossFunction
from .mixins import WeightRegularizerMixin
class LargeMarginSoftmaxLoss(WeightRegularizerMixin, BaseMetricLossFunction):
"""
Implementation of https://arxiv.org/pdf/1612.02295.pdf
"""
def __init__(self, num_classes, embedding_size, margin=4, scale=1, **kwargs):
super().__init__(**kwargs)
c_f.assert_distance_type(self, CosineSimilarity)
self.margin = margin
self.num_classes = num_classes
self.scale = scale
self.add_to_recordable_attributes(
list_of_names=["num_classes", "margin", "scale"], is_stat=False
)
self.add_to_recordable_attributes(name="avg_angle", is_stat=True)
self.init_margin()
self.W = torch.nn.Parameter(torch.Tensor(embedding_size, num_classes))
self.weight_init_func(self.W)
self.cross_entropy = torch.nn.CrossEntropyLoss(reduction="none")
def init_margin(self):
self.margin = int(self.margin)
self.max_n = self.margin // 2
## For the trigonometric multiple-angle formula ##
self.n_range = torch.tensor([n for n in range(0, self.max_n + 1)])
self.margin_choose_n = torch.tensor(
[scipy.special.binom(self.margin, 2 * n) for n in self.n_range]
)
self.cos_powers = torch.tensor([self.margin - (2 * n) for n in self.n_range])
self.alternating = torch.tensor([(-1) ** n for n in self.n_range])
def get_cos_with_margin(self, cosine):
cosine = cosine.unsqueeze(1)
for attr in ["n_range", "margin_choose_n", "cos_powers", "alternating"]:
setattr(self, attr, c_f.to_device(getattr(self, attr), cosine))
cos_powered = cosine**self.cos_powers
sin_powered = (1 - cosine**2) ** self.n_range
terms = (
self.alternating * self.margin_choose_n * cos_powered * sin_powered
) # Equation 7 in the paper
return torch.sum(terms, dim=1)
def get_cosine(self, embeddings):
return self.distance(embeddings, self.W.t())
def get_angles(self, cosine_of_target_classes):
angles = torch.acos(torch.clamp(cosine_of_target_classes, -1, 1))
if self.collect_stats:
with torch.no_grad():
self.avg_angle = np.degrees(torch.mean(angles).item())
return angles
def get_target_mask(self, embeddings, labels):
batch_size = labels.size(0)
mask = torch.zeros(
batch_size,
self.num_classes,
dtype=embeddings.dtype,
device=embeddings.device,
)
mask[torch.arange(batch_size), labels] = 1
return mask
def modify_cosine_of_target_classes(self, cosine_of_target_classes):
cos_with_margin = self.get_cos_with_margin(cosine_of_target_classes)
angles = self.get_angles(cosine_of_target_classes)
with torch.no_grad():
k = (
angles / (math.pi / self.margin)
).floor() # Equation 6: angles needs to be between [k*pi/m and (k+1)*pi/m]
return ((-1) ** k) * cos_with_margin - (2 * k)
def scale_logits(self, logits, embeddings):
embedding_norms = self.distance.get_norm(embeddings)
weight_norms = self.distance.get_norm(self.W, dim=0)
product_of_magnitudes = weight_norms.unsqueeze(0) * embedding_norms.unsqueeze(1)
return logits * product_of_magnitudes * self.scale
def cast_types(self, dtype, device):
self.W.data = c_f.to_device(self.W.data, device=device, dtype=dtype)
self.n_range = c_f.to_device(self.n_range, device=device, dtype=dtype)
self.margin_choose_n = c_f.to_device(
self.margin_choose_n, device=device, dtype=dtype
)
self.cos_powers = c_f.to_device(self.cos_powers, device=device, dtype=dtype)
self.alternating = c_f.to_device(self.alternating, device=device, dtype=dtype)
def compute_loss(self, embeddings, labels, indices_tuple, ref_emb, ref_labels):
c_f.ref_not_supported(embeddings, labels, ref_emb, ref_labels)
dtype, device = embeddings.dtype, embeddings.device
self.cast_types(dtype, device)
miner_weights = lmu.convert_to_weights(indices_tuple, labels, dtype=dtype)
mask = self.get_target_mask(embeddings, labels)
cosine = self.get_cosine(embeddings)
cosine_of_target_classes = cosine[mask == 1]
modified_cosine_of_target_classes = self.modify_cosine_of_target_classes(
cosine_of_target_classes
)
diff = (modified_cosine_of_target_classes - cosine_of_target_classes).unsqueeze(
1
)
logits = cosine + (mask * diff)
logits = self.scale_logits(logits, embeddings)
unweighted_loss = self.cross_entropy(logits, labels)
miner_weighted_loss = unweighted_loss * miner_weights
loss_dict = {
"loss": {
"losses": miner_weighted_loss,
"indices": c_f.torch_arange_from_size(embeddings),
"reduction_type": "element",
}
}
self.add_weight_regularization_to_loss_dict(loss_dict, self.W.t())
return loss_dict
def get_default_distance(self):
return CosineSimilarity()
def get_logits(self, embeddings):
logits = self.get_cosine(embeddings)
return self.scale_logits(logits, embeddings)
| [
"torch.zeros",
"torch.arange",
"torch.no_grad",
"torch.clamp",
"torch.tensor",
"torch.mean",
"torch.Tensor",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.6.0 | cwkeam/pytorch-metric-learning | 63e4ecb781c5735ad714f61a3eecc55f72496905 |
1.6 | import unittest
import torch
from pytorch_metric_learning.miners import EmbeddingsAlreadyPackagedAsTriplets
from pytorch_metric_learning.samplers import FixedSetOfTriplets
from pytorch_metric_learning.utils import common_functions as c_f
class TestFixedSetOfTriplet(unittest.TestCase):
def test_fixed_set_of_triplets_with_batch_size(self):
miner = EmbeddingsAlreadyPackagedAsTriplets()
for batch_size in [3, 33, 99]:
batch_of_fake_embeddings = torch.randn(batch_size, 2)
for num_labels in [2, 10, 55]:
for num_triplets in [100, 999, 10000]:
fake_embeddings = torch.randn(10000, 2)
labels = torch.randint(low=0, high=num_labels, size=(10000,))
dataset = c_f.EmbeddingDataset(fake_embeddings, labels)
sampler = FixedSetOfTriplets(labels, num_triplets)
iterator = iter(sampler)
for _ in range(1000):
x = []
for _ in range(batch_size):
iterator, curr_batch = c_f.try_next_on_generator(
iterator, sampler
)
x.append(curr_batch)
curr_labels = labels[x]
a, p, n = miner(batch_of_fake_embeddings, curr_labels)
self.assertTrue(len(a) == batch_size // 3)
self.assertTrue(torch.all(curr_labels[a] == curr_labels[p]))
self.assertTrue(torch.all(curr_labels[a] != curr_labels[n]))
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, sampler=sampler, drop_last=True
)
for _ in range(2):
for (embeddings, curr_labels) in dataloader:
a, p, n = miner(batch_of_fake_embeddings, curr_labels)
self.assertTrue(len(a) == batch_size // 3)
self.assertTrue(torch.all(curr_labels[a] == curr_labels[p]))
self.assertTrue(torch.all(curr_labels[a] != curr_labels[n]))
if __name__ == "__main__":
unittest.main()
| [
"torch.randint",
"torch.all",
"torch.randn",
"torch.utils.data.DataLoader"
] | 1.6.0 | cwkeam/pytorch-metric-learning | 63e4ecb781c5735ad714f61a3eecc55f72496905 |
1.8 | import torch
import torch.nn as nn
class ImprovedSNL(nn.Module):
def __init__(self, in_channels, transfer_channels, stage_num=2):
super(ImprovedSNL, self).__init__()
self.in_channels = in_channels
self.transfer_channels = transfer_channels
self.stage_num = stage_num
self.transform_t = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.transform_p = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.row_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.column_transform = nn.Conv2d(in_channels, transfer_channels, kernel_size=1, stride=1, bias=False)
self.w1 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.w2 = nn.Conv2d(transfer_channels, in_channels, kernel_size=1, stride=1, bias=False)
self.bn = nn.BatchNorm2d(in_channels)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def getAtt(self, x):
t = self.transform_t(x)
p = self.transform_p(x)
b, c, h, w = t.size()
t = t.view(b, c, -1).permute(0, 2, 1)
p = p.view(b, c, -1)
m = torch.bmm(torch.relu(t), torch.relu(p))
m += m.permute(0, 2, 1)
m_hat = m / 2
degree = torch.sum(m_hat, dim=2)
degree[degree != 0] = torch.sqrt(1.0 / degree[degree != 0])
affinity_matrix = m_hat * degree.unsqueeze(1)
affinity_matrix *= degree.unsqueeze(2)
return affinity_matrix
def stage(self, x):
affinity_matrix = self.getAtt(x)
column_features = self.column_transform(x)
b, c, h, w = column_features.size()
column_features = column_features.view(b, c, -1)
column_features = torch.bmm(column_features, affinity_matrix).contiguous().view(b,c,h,w)
column_features = self.w1(column_features)
row_features = self.row_transform(x)
b, c, h, w = row_features.size()
row_features = row_features.view(b, c, -1).permute(0, 2, 1)
row_features = torch.bmm(affinity_matrix, row_features).permute(0, 2, 1).contiguous().view(b,c,h,w)
row_features = self.w2(row_features)
output = column_features + row_features
output = self.bn(output)
output = output + x
return output
def forward(self, x):
for stage in range(self.stage_num):
x = self.stage(x)
return x
| [
"torch.sqrt",
"torch.relu",
"torch.nn.init.constant_",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.bmm",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.sum"
] | 1.8.1 | ustbjdl1021/improved_snl_unet | 7f7bf092153e1a535337b80bd1b673eff3ddec52 |
1.0 | from typing import Dict, List
import torch
import torch.nn.functional as F
def compute_loss(states: torch.Tensor,
actions: torch.Tensor,
next_states: torch.Tensor,
log_probs_old: torch.Tensor,
ext_returns: torch.Tensor,
ext_advantages: torch.Tensor,
std_ext_advantages: torch.Tensor,
int_returns: torch.Tensor,
int_advantages: torch.Tensor,
std_int_advantages: torch.Tensor,
target_random_features: torch.Tensor,
states_mean: torch.Tensor,
states_std: torch.Tensor,
model: torch.nn.Module,
pred_intr_model: torch.nn.Module,
intrinsic_reward_ratio: float,
ratio_clip: float,
entropy_weight: float,
value_weight: float,
rnd_weight: float,
rnd_obs_clip: float,
summary_writer: object = None,
iteration_count: int = 0,
rnn_states: Dict[str, Dict[str, List[torch.Tensor]]] = None) -> torch.Tensor:
'''
Computes the loss of an actor critic model using the
loss function from equation (9) in the paper:
Proximal Policy Optimization Algorithms: https://arxiv.org/abs/1707.06347
:param states: Dimension: batch_size x state_size: States visited by the agent.
:param actions: Dimension: batch_size x action_size. Actions which the agent
took at every state in :param states: with the same index.
:param log_probs_old: Dimension: batch_size x 1. Log probability of taking
the action with the same index in :param actions:.
Used to compute the policy probability ratio.
Refer to original paper equation (6)
:param ext_returns: Dimension: batch_size x 1. Empirical returns obtained via
calculating the discounted return from the environment's rewards
:param ext_advantages: Dimension: batch_size x 1. Estimated advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param std_ext_advantages: Dimension: batch_size x 1. Estimated standardized advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param int_returns: Dimension: batch_size x 1. Empirical intrinsic returns obtained via
calculating the discounted intrinsic return from the intrinsic rewards.
:param int_advantages: Dimension: batch_size x 1. Estimated intrisinc advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param std_int_advantages: Dimension: batch_size x 1. Estimated standardized intrinsic advantage function
for every state and action in :param states: and
:param actions: (respectively) with the same index.
:param target_random_features: target random features used to compute the intrinsic rewards.
:param states_mean: mean over the previous training step's states.
:param states_std: standard deviation over the previous training step's states.
:param model: torch.nn.Module used to compute the policy probability ratio
as specified in equation (6) of original paper.
:param predict_intr_model: intrinsic reward prediction model.
:param intrinsic_reward_ratio: ratio of intrinsic reward to extrinsic reward.
:param ratio_clip: Epsilon value used to clip the policy ratio's value.
This parameter acts as the radius of the Trust Region.
Refer to original paper equation (7).
:param entropy_weight: Coefficient to be used for the entropy bonus
for the loss function. Refer to original paper eq (9)
:param value_weight: Coefficient to be used for the value loss
for the loss function. Refer to original paper eq (9)
:param rnd_weight: Coefficient to be used for the rnd loss
for the loss function.
:param rnn_states: The :param model: can be made up of different submodules.
Some of these submodules will feature an LSTM architecture.
This parameter is a dictionary which maps recurrent submodule names
to a dictionary which contains 2 lists of tensors, each list
corresponding to the 'hidden' and 'cell' states of
the LSTM submodules. These tensors are used by the
:param model: when calculating the policy probability ratio.
'''
advantages = ext_advantages + intrinsic_reward_ratio*int_advantages
std_advantages = std_ext_advantages + intrinsic_reward_ratio*std_int_advantages
prediction = model(states, actions, rnn_states=rnn_states)
ratio = torch.exp((prediction['log_pi_a'] - log_probs_old))
obj = ratio * std_advantages
obj_clipped = torch.clamp(ratio,
1.0 - ratio_clip,
1.0 + ratio_clip) * std_advantages
policy_val = -torch.min(obj, obj_clipped).mean()
entropy_val = prediction['ent'].mean()
policy_loss = policy_val - entropy_weight * entropy_val # L^{clip} and L^{S} from original paper
#policy_loss = -torch.min(obj, obj_clipped).mean() - entropy_weight * prediction['ent'].mean() # L^{clip} and L^{S} from original paper
# Random Network Distillation loss:
norm_next_states = (next_states-states_mean) / (states_std+1e-8)
if rnd_obs_clip > 1e-1:
norm_next_states = torch.clamp( norm_next_states, -rnd_obs_clip, rnd_obs_clip)
pred_random_features = pred_intr_model(norm_next_states)
# Clamping:
#pred_random_features = torch.clamp(pred_random_features, -1e20, 1e20)
#target_random_features = torch.clamp(target_random_features, -1e20, 1e20)
# Softmax:
#pred_random_features = F.softmax(pred_random_features)
# Losses:
#int_reward_loss = torch.nn.functional.smooth_l1_loss(target_random_features.detach(), pred_random_features)
int_reward_loss = torch.nn.functional.mse_loss( pred_random_features, target_random_features.detach())
#ext_returns = torch.clamp(ext_returns, -1e10, 1e10)
#int_returns = torch.clamp(int_returns, -1e10, 1e10)
#prediction['v'] = torch.clamp(prediction['v'], -1e10, 1e10)
#prediction['int_v'] = torch.clamp(prediction['int_v'], -1e10, 1e10)
#ext_v_loss = torch.nn.functional.smooth_l1_loss(ext_returns, prediction['v'])
#int_v_loss = torch.nn.functional.smooth_l1_loss(int_returns, prediction['int_v'])
ext_v_loss = torch.nn.functional.mse_loss(input=prediction['v'], target=ext_returns)
int_v_loss = torch.nn.functional.mse_loss(input=prediction['int_v'], target=int_returns)
value_loss = (ext_v_loss + int_v_loss)
#value_loss = ext_v_loss
rnd_loss = int_reward_loss
total_loss = policy_loss + rnd_weight * rnd_loss + value_weight * value_loss
#total_loss = policy_loss + value_weight * value_loss
if summary_writer is not None:
summary_writer.add_scalar('Training/RatioMean', ratio.mean().cpu().item(), iteration_count)
#summary_writer.add_histogram('Training/Ratio', ratio.cpu(), iteration_count)
summary_writer.add_scalar('Training/ExtAdvantageMean', ext_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntAdvantageMean', int_advantages.mean().cpu().item(), iteration_count)
summary_writer.add_scalar('Training/AdvantageMean', advantages.mean().cpu().item(), iteration_count)
#summary_writer.add_histogram('Training/ExtAdvantage', ext_advantages.cpu(), iteration_count)
#summary_writer.add_histogram('Training/IntAdvantage', int_advantages.cpu(), iteration_count)
#summary_writer.add_histogram('Training/Advantage', advantages.cpu(), iteration_count)
summary_writer.add_scalar('Training/RNDLoss', int_reward_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/ExtVLoss', ext_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/IntVLoss', int_v_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/MeanVValues', prediction['v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanReturns', ext_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdVValues', prediction['v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdReturns', ext_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntVValues', prediction['int_v'].cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/MeanIntReturns', int_returns.cpu().mean().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntVValues', prediction['int_v'].cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/StdIntReturns', int_returns.cpu().std().item(), iteration_count)
summary_writer.add_scalar('Training/ValueLoss', value_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyVal', policy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/EntropyVal', entropy_val.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/PolicyLoss', policy_loss.cpu().item(), iteration_count)
summary_writer.add_scalar('Training/TotalLoss', total_loss.cpu().item(), iteration_count)
return total_loss
| [
"torch.nn.functional.mse_loss",
"torch.exp",
"torch.min",
"torch.clamp"
] | 1.0.1 | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a |
1.0 | import regym
from regym.rl_algorithms.agents import build_PPO_Agent
from regym.rl_loops.singleagent_loops import rl_loop
from regym.environments import parse_environment
from test_fixtures import ppo_rnd_config_dict_ma
from tqdm import tqdm
from tensorboardX import SummaryWriter
import os
import math
import copy
import random
import torch
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import time
offset_worker_id = 50
gif_interval = 100
def make_gif(trajectory, episode=0, actor_idx=0, path='./'):
fig = plt.figure()
imgs = []
for state in trajectory:
if state.shape[-1] == 12:
# handled Stacked images...
per_image_first_channel_indices = range(0,state.shape[-1]+1,3)
ims = [ state[...,idx_begin:idx_end] for idx_begin, idx_end in zip(per_image_first_channel_indices,per_image_first_channel_indices[1:])]
for img in ims:
imgs.append( img)
else:
imgs.append(state)
for idx,img in enumerate(imgs):
imgs[idx] = [plt.imshow(img, animated=True)]
gif = anim.ArtistAnimation(fig, imgs, interval=200, blit=True, repeat_delay=None)
path = os.path.join(path, f'./traj-ep{episode}-actor{actor_idx}.gif')
gif.save(path, dpi=None, writer='imagemagick')
#plt.show()
plt.close(fig)
def make_gif_with_graph(trajectory, data, episode=0, actor_idx=0, path='./'):
fig = plt.figure()
imgs = []
gd = []
for idx, (state, d) in enumerate(zip(trajectory,data)):
if state.shape[-1] == 12:
# handled Stacked images...
per_image_first_channel_indices = range(0,state.shape[-1]+1,3)
ims = [ state[...,idx_begin:idx_end] for idx_begin, idx_end in zip(per_image_first_channel_indices,per_image_first_channel_indices[1:])]
for img in ims:
imgs.append( img)
gd.append(d)
else:
imgs.append(state)
gd.append(d)
for idx,img in enumerate(imgs):
plt.subplot(211)
img = plt.imshow(img, animated=True)
ax = plt.subplot(212)
x = np.arange(0,idx,1)
y = np.asarray(gd[:idx])
ax.set_xlim(left=0,right=idx+10)
line = ax.plot(x, y, color='blue', marker='o', linestyle='dashed',linewidth=2, markersize=10)
imgs[idx] = [img]+line
gif = anim.ArtistAnimation(fig, imgs, interval=200, blit=True, repeat_delay=None)
path = os.path.join(path, f'./traj-ep{episode}-actor{actor_idx}.gif')
gif.save(path, dpi=None, writer='imagemagick')
#plt.show()
plt.close(fig)
def check_path_for_agent(filepath):
#filepath = os.path.join(path,filename)
agent = None
offset_episode_count = 0
if os.path.isfile(filepath):
print('==> loading checkpoint {}'.format(filepath))
agent = torch.load(filepath)
offset_episode_count = agent.episode_count
#setattr(agent, 'episode_count', offset_episode_count)
print('==> loaded checkpoint {}'.format(filepath))
return agent, offset_episode_count
def update_configs(env_param2range, nbr_actors):
env_configs = list()
tower_seed = random.choice(env_param2range['tower-seed'])
#allowed_floors = random.choice(env_param2range['allowed-floors'])
for a_i in range(nbr_actors):
env_config = copy.deepcopy(env_param2range)
env_config['worker_id'] = a_i+offset_worker_id
for k in env_config:
if k == 'tower-seed':
env_config[k] = tower_seed
continue
'''
elif k == 'allowed-floors':
env_config[k] = allowed_floors
continue
'''
if isinstance(env_config[k], list):
v = random.choice(env_config[k])
env_config[k] = v
env_configs.append(env_config)
return env_configs
def test_train_ppo_rnd(ppo_rnd_config_dict_ma):
global gif_interval
task = parse_environment('MontezumaRevenge-v0',
nbr_parallel_env=ppo_rnd_config_dict_ma['nbr_actor'],
nbr_frame_stacking=ppo_rnd_config_dict_ma['nbr_frame_stacking'])
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_cnn80phi256gru128_a4_b256_h128_1e-4_OTC_frameskip4/'
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_cnn80phi256gru128_a8_b128_h128_3e-4_OTC_frameskip4/'
#logdir = './test_10floors0_Theme1_LABC-light_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicGAE_NormRetMeanStd_cnn80phi256gru128_a8_b128_h128_3e-4_MZ_frameskip4/'
logdir = './test_gru_ppo_rnd512-InitSqrt2_ObsUP1e5_IntrUP1e5_NonEpisodicTrueGAE_NormRetMeanStd_cnn80phi256gru256_ac128_a32_b1024_h128_3e-4_MZ_frameskip4/'
#logdir = './test_gif'
if not os.path.exists(logdir):
os.mkdir(logdir)
sum_writer = SummaryWriter(logdir)
save_path = os.path.join(logdir,'./ppo_rnd.agent')
agent, offset_episode_count = check_path_for_agent(save_path)
if agent is None: agent = build_PPO_Agent(config=ppo_rnd_config_dict_ma, task=task, agent_name='PPO_RND_MZ')
regym.rl_algorithms.PPO.ppo.summary_writer = sum_writer
agent.save_path = save_path
nbr_episodes = 1e7
max_episode_length = 1e5
nbr_actors = ppo_rnd_config_dict_ma['nbr_actor']
env_param2range = { 'tower-seed': list(range(-1,101)), #Sets the seed used to generate the tower. -1 corresponds to a random tower on every reset() call.
'starting-floor': 0, #list(range(100)), #Sets the starting floor for the agent on reset().
'total-floors': 10, #list(range(1, 100)) #Sets the maximum number of possible floors in the tower.
'dense-reward': 0, #(0, 1) #Whether to use the sparse (0) or dense (1) reward function.
'lighting-type': [0, 1, 2], #Whether to use no realtime light (0), a single realtime light with minimal color variations (1), or a realtime light with large color variations (2).
'visual-theme': 0, #[0, 1, 2], #Whether to use only the default-theme (0), the normal ordering or themes (1), or a random theme every floor (2).
'agent-perspective':1, #(0, 1), #Whether to use first-person (0) or third-person (1) perspective for the agent.
'allowed-rooms': 2, #(0, 1, 2), #Whether to use only normal rooms (0), normal and key rooms (1), or normal, key, and puzzle rooms (2).
'allowed-modules': 2, #(0, 1, 2), #Whether to fill rooms with no modules (0), only easy modules (1), or the full range of modules (2).
'allowed-floors': 0, #[0, 1, 2], #Whether to include only straightforward floor layouts (0), layouts that include branching (1), or layouts that include branching and circling (2).
'default-theme': 1 #[0, 1, 2, 3, 4] #Whether to set the default theme to Ancient (0), Moorish (1), Industrial (2), Modern (3), or Future (4).
}
# PARAMETERS with curriculum since they only include straightforward floors...
env_configs = update_configs(env_param2range, nbr_actors)
for i in tqdm(range(offset_episode_count, int(nbr_episodes))):
trajectory = rl_loop.run_episode_parallel(task.env, agent,
training=True,
max_episode_length=max_episode_length,
)#env_configs=env_configs)
total_return = [ sum([ exp[2] for exp in t]) for t in trajectory]
mean_total_return = sum( total_return) / len(trajectory)
std_ext_return = math.sqrt( sum( [math.pow( r-mean_total_return ,2) for r in total_return]) / len(total_return) )
total_int_return = [ sum([ exp[3] for exp in t]) for t in trajectory]
mean_total_int_return = sum( total_int_return) / len(trajectory)
std_int_return = math.sqrt( sum( [math.pow( r-mean_total_int_return ,2) for r in total_int_return]) / len(total_int_return) )
for idx, (ext_ret, int_ret) in enumerate(zip(total_return, total_int_return)):
sum_writer.add_scalar('Training/TotalReturn', ext_ret, i*len(trajectory)+idx)
sum_writer.add_scalar('Training/TotalIntReturn', int_ret, i*len(trajectory)+idx)
sum_writer.add_scalar('Training/StdIntReturn', std_int_return, i)
sum_writer.add_scalar('Training/StdExtReturn', std_ext_return, i)
episode_lengths = [ len(t) for t in trajectory]
mean_episode_length = sum( episode_lengths) / len(trajectory)
std_episode_length = math.sqrt( sum( [math.pow( l-mean_episode_length ,2) for l in episode_lengths]) / len(trajectory) )
sum_writer.add_scalar('Training/MeanTotalReturn', mean_total_return, i)
sum_writer.add_scalar('Training/MeanTotalIntReturn', mean_total_int_return, i)
sum_writer.add_scalar('Training/MeanEpisodeLength', mean_episode_length, i)
sum_writer.add_scalar('Training/StdEpisodeLength', std_episode_length, i)
# Update configs:
env_configs = update_configs(env_param2range, nbr_actors)
agent.episode_count += 1
if (i+nbr_actors)%gif_interval == 0:
for actor_idx in range(nbr_actors):
gif_traj = [ exp[0] for exp in trajectory[actor_idx]]
gif_data = [ exp[3] for exp in trajectory[actor_idx]]
begin = time.time()
#make_gif(gif_traj, episode=i, actor_idx=actor_idx, path=logdir)
make_gif_with_graph(gif_traj, gif_data, episode=i, actor_idx=actor_idx, path=logdir)
end = time.time()
eta = end-begin
print(f'Time: {eta} sec.')
task.env.close()
assert trajectory is not None
assert isinstance(trajectory, list)
if __name__ == '__main__':
# https://pytorch.org/docs/master/multiprocessing.html#multiprocessing-cuda-sharing-details
torch.multiprocessing.set_start_method('forkserver')
test_train_ppo_rnd(ppo_rnd_config_dict_ma()) | [
"torch.multiprocessing.set_start_method",
"torch.load"
] | 1.0.1 | KnwSondess/Regym | 825c7dacf955a3e2f6c658c0ecb879a0ca036c1a |
1.8 | import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..registry import NECKS
@NECKS.register_module
class FPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
attention=False,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
init_cfg=dict(type='Xavier',
layer='Conv2d',
distribution='uniform'),
cfg=None):
super(FPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.attention = attention
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
if extra_convs_on_inputs:
# TODO: deprecate `extra_convs_on_inputs`
warnings.simplefilter('once')
warnings.warn(
'"extra_convs_on_inputs" will be deprecated in v2.9.0,'
'Please use "add_extra_convs"', DeprecationWarning)
self.add_extra_convs = 'on_input'
else:
self.add_extra_convs = 'on_output'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) >= len(self.in_channels)
if len(inputs) > len(self.in_channels):
for _ in range(len(inputs) - len(self.in_channels)):
del inputs[0]
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
laterals[i - 1] += F.interpolate(laterals[i],
**self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] += F.interpolate(laterals[i],
size=prev_shape,
**self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| [
"torch.nn.functional.relu",
"torch.nn.functional.interpolate",
"torch.nn.functional.max_pool2d",
"torch.nn.ModuleList"
] | 1.8.0 | Turoad/CLRNet | 51e082db12973943bddefd76fd0d431fcb3350ff |
1.6 | # Copyright (c) Facebook, Inc. and its affiliates.
import importlib
import logging
import os
import pickle
import re
from collections import OrderedDict
from copy import deepcopy
from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any
import torch
import torchvision
from mmf.common.registry import registry
from mmf.models.frcnn import GeneralizedRCNN
from mmf.modules.embeddings import ProjectionEmbedding, TextEmbedding
from mmf.modules.hf_layers import BertModelJit
from mmf.modules.layers import Identity
from mmf.utils.build import build_image_encoder, build_text_encoder
from mmf.utils.download import download_pretrained_model
from mmf.utils.file_io import PathManager
from mmf.utils.general import get_absolute_path
from mmf.utils.logger import log_class_usage
from omegaconf import MISSING, OmegaConf
from torch import nn, Tensor
from transformers.configuration_auto import AutoConfig
from transformers.modeling_auto import AutoModel
try:
from detectron2.modeling import build_resnet_backbone, ShapeSpec
except ImportError:
pass
logger = logging.getLogger()
class Encoder(nn.Module):
@dataclass
class Config:
name: str = MISSING
def __init__(self):
super().__init__()
log_class_usage("Encoder", self.__class__)
@classmethod
def from_params(cls, **kwargs):
config = OmegaConf.structured(cls.Config(**kwargs))
return cls(config)
class EncoderFactory(nn.Module):
@dataclass
class Config:
type: str = MISSING
params: Encoder.Config = MISSING
class ImageFeatureEncoderTypes(Enum):
default = "default"
identity = "identity"
projection = "projection"
frcnn_fc7 = "finetune_faster_rcnn_fpn_fc7"
class ImageFeatureEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
in_dim: int = MISSING
class ImageFeatureEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageFeatureEncoderTypes = MISSING
params: ImageFeatureEncoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
encoder_type = config.type
if isinstance(encoder_type, ImageFeatureEncoderTypes):
encoder_type = encoder_type.value
assert (
"in_dim" in config.params
), "ImageFeatureEncoder require 'in_dim' param in config"
params = config.params
if encoder_type == "default" or encoder_type == "identity":
self.module = Identity()
self.module.in_dim = params.in_dim
self.module.out_dim = params.in_dim
elif encoder_type == "projection":
if "module" not in params:
params = deepcopy(params)
params.module = "linear"
self.module = ProjectionEmbedding(**params)
elif encoder_type == "finetune_faster_rcnn_fpn_fc7":
self.module = FinetuneFasterRcnnFpnFc7(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % encoder_type)
self.out_dim = self.module.out_dim
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("finetune_faster_rcnn_fpn_fc7")
class FinetuneFasterRcnnFpnFc7(ImageFeatureEncoder):
@dataclass
class Config(ImageFeatureEncoder.Config):
name: str = "finetune_faster_rcnn_fpn_fc7"
in_dim: int = MISSING
weights_file: str = "fc7_w.pkl"
bias_file: str = "fc7_b.pkl"
model_data_dir: str = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
model_data_dir = get_absolute_path(config.model_data_dir)
if not os.path.isabs(config.weights_file):
weights_file = os.path.join(model_data_dir, config.weights_file)
if not os.path.isabs(config.bias_file):
bias_file = os.path.join(model_data_dir, config.bias_file)
if not PathManager.exists(bias_file) or not PathManager.exists(weights_file):
download_path = download_pretrained_model("detectron.vmb_weights")
weights_file = get_absolute_path(os.path.join(download_path, "fc7_w.pkl"))
bias_file = get_absolute_path(os.path.join(download_path, "fc7_b.pkl"))
with PathManager.open(weights_file, "rb") as w:
weights = pickle.load(w)
with PathManager.open(bias_file, "rb") as b:
bias = pickle.load(b)
out_dim = bias.shape[0]
self.lc = nn.Linear(config.in_dim, out_dim)
self.lc.weight.data.copy_(torch.from_numpy(weights))
self.lc.bias.data.copy_(torch.from_numpy(bias))
self.out_dim = out_dim
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
old_prefix = prefix + "module."
for k in list(state_dict.keys()):
if k.startswith(old_prefix):
new_k = k.replace(old_prefix, prefix)
state_dict[new_k] = state_dict.pop(k)
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def forward(self, image):
i2 = self.lc(image)
i3 = nn.functional.relu(i2)
return i3
@registry.register_encoder("identity")
class IdentityEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "identity"
# Random in_dim if not specified
in_dim: int = 100
def __init__(self, config: Config):
super().__init__()
self.module = nn.Identity()
self.in_dim = config.get("in_dim", 100)
self.out_dim = self.in_dim
def forward(self, x):
return self.module(x)
class ImageEncoderTypes(Enum):
default = "default"
identity = "identity"
torchvision_resnet = "torchvision_resnet"
resnet152 = "resnet152"
detectron2_resnet = "detectron2_resnet"
class ImageEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
type: ImageEncoderTypes = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, ImageEncoderTypes):
self._type = self._type.value
params = config.params
if self._type == "default" or self._type == "identity":
self.module = nn.Identity()
self.module.out_dim = params.in_dim
elif self._type == "resnet152":
self.module = ResNet152ImageEncoder(params)
elif self._type == "torchvision_resnet":
self.module = TorchvisionResNetImageEncoder(params)
elif self._type == "detectron2_resnet":
self.module = Detectron2ResnetImageEncoder(params)
elif self._type == "frcnn":
self.module = FRCNNImageEncoder(params)
else:
raise NotImplementedError("Unknown Image Encoder: %s" % self._type)
@property
def out_dim(self):
return self.module.out_dim
def forward(self, image):
return self.module(image)
# Taken from facebookresearch/mmbt with some modifications
@registry.register_encoder("resnet152")
class ResNet152ImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet152"
pretrained: bool = True
# "avg" or "adaptive"
pool_type: str = "avg"
num_output_features: int = 1
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = torchvision.models.resnet152(pretrained=config.get("pretrained", True))
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
self.pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
self.pool = pool_func((2, 2))
elif config.num_output_features == 6:
self.pool = pool_func((3, 2))
elif config.num_output_features == 8:
self.pool = pool_func((4, 2))
elif config.num_output_features == 9:
self.pool = pool_func((3, 3))
self.out_dim = 2048
def forward(self, x):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
out = self.pool(self.model(x))
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out # BxNx2048
@registry.register_encoder("torchvision_resnet")
class TorchvisionResNetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "resnet50"
pretrained: bool = False
zero_init_residual: bool = True
num_output_features: int = -1
pool_type: str = "avg"
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
model = getattr(torchvision.models, config.name)(
pretrained=config.pretrained, zero_init_residual=config.zero_init_residual
)
# checks if use_avgpool exists to maintain the old logic
self.use_avgpool = config.get("use_avgpool", None)
if self.use_avgpool: # use_avgpool is True
config.num_output_features = 1
config.pool_type = "avg"
elif self.use_avgpool is False: # use_avgpool is False
config.num_output_features = -1
if config.pretrained:
model = self._load_pretrained(model, config)
modules = list(model.children())[:-2]
self.model = nn.Sequential(*modules)
self.pool = self._pool_func(config)
self.out_dim = config.get("out_dim", 2048)
def _load_pretrained(self, model, config: Config):
pretrained_model = config.get("pretrained_model", "supervised")
if pretrained_model == "supervised":
pass # this is already loaded via torchvision using pretrained=True
elif os.path.exists(pretrained_model):
model.load_state_dict(torch.load(pretrained_model))
else:
try:
with PathManager.open(pretrained_model, "rb") as f:
model.load_state_dict(
torch.load(f, map_location=lambda storage, loc: storage),
strict=False,
)
except Exception:
raise Exception(f"unknown pretrained ResNet model: {pretrained_model}")
return model
def _pool_func(self, config: Config):
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
# -1 will keep the original feature size
if config.num_output_features == -1:
pool = nn.Identity()
elif config.num_output_features in [1, 2, 3, 5, 7]:
pool = pool_func((config.num_output_features, 1))
elif config.num_output_features == 4:
pool = pool_func((2, 2))
elif config.num_output_features == 6:
pool = pool_func((3, 2))
elif config.num_output_features == 8:
pool = pool_func((4, 2))
elif config.num_output_features == 9:
pool = pool_func((3, 3))
return pool
def forward(self, x):
# B x 3 x 224 x 224 -> B x out_dim x 7 x 7
out = self.pool(self.model(x))
if self.use_avgpool is None:
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous() # BxNxout_dim
else:
out = torch.flatten(out, start_dim=1) # BxN*out_dim
return out
@registry.register_encoder("detectron2_resnet")
class Detectron2ResnetImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "detectron2_resnet"
pretrained: bool = True
pretrained_path: str = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.resnet = build_resnet_backbone(config, ShapeSpec(channels=3))
if pretrained:
state_dict = torch.hub.load_state_dict_from_url(
pretrained_path, progress=False
)
new_state_dict = OrderedDict()
replace_layer = {"backbone.": ""}
for key, value in state_dict["model"].items():
new_key = re.sub(
r"(backbone\.)", lambda x: replace_layer[x.groups()[0]], key
)
new_state_dict[new_key] = value
self.resnet.load_state_dict(new_state_dict, strict=False)
self.out_dim = 2048
def forward(self, x):
x = self.resnet(x)
return x["res5"]
@registry.register_encoder("frcnn")
class FRCNNImageEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "frcnn"
pretrained: bool = True
pretrained_path: str = None
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
pretrained = config.get("pretrained", False)
pretrained_path = config.get("pretrained_path", None)
self.frcnn = GeneralizedRCNN(config)
if pretrained:
state_dict = torch.load(pretrained_path)
self.frcnn.load_state_dict(state_dict)
self.frcnn.eval()
def forward(
self,
x: torch.Tensor,
sizes: torch.Tensor = None,
scales_yx: torch.Tensor = None,
padding: torch.Tensor = None,
max_detections: int = 0,
return_tensors: str = "pt",
):
x = self.frcnn(
x,
sizes,
scales_yx=scales_yx,
padding=padding,
max_detections=max_detections,
return_tensors=return_tensors,
)
return x
class TextEncoderTypes(Enum):
identity = "identity"
transformer = "transformer"
embedding = "embedding"
class TextEncoderFactory(EncoderFactory):
@dataclass
class Config(EncoderFactory.Config):
# identity, transformer or embedding as of now
type: TextEncoderTypes = MISSING
params: Encoder.Config = MISSING
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self._type = config.type
if isinstance(self._type, TextEncoderTypes):
self._type = self._type.value
if self._type == "identity":
self.module = nn.Identity()
elif self._type == "transformer":
self._module = TransformerEncoder(config.params)
self.module = self._module.module
elif self._type == "embedding":
self.module = TextEmbeddingEncoder(config.params)
else:
raise NotImplementedError(f"Unknown Text Encoder {self._type}")
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
@registry.register_encoder("text_embedding")
class TextEmbeddingEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "text_embedding"
operator: str = MISSING
# Keeping this Any for now as this
# needs a separate refactor PR.
embedding_params: Any = MISSING
def __init__(self, config: Config):
super().__init__()
self._operator = config.operator
self._embedding_params = config.embedding_params
self.module = TextEmbedding(
self._embedding_params.type, **self._embedding_params.params
)
def forward(self, x):
x = self.module(x)
if self._operator == "sum":
x = x.sum(dim=1)
elif self._operator == "concat":
x = torch.cat(x, dim=1)
elif self._operator == "mul":
x = torch.prod(x, dim=1)
return x.squeeze()
@registry.register_encoder("transformer")
class TransformerEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "transformer"
num_segments: int = 2
bert_model_name: str = "bert-base-uncased"
# Options below can be overridden to update the bert configuration used
# to initialize the bert encoder. If some option is missing or
# if you are using an encoder different then BERT, add extra parameters
# by inheriting and extending this config
# Those options will automatically override the options for your transformer
# encoder's configuration. For e.g. vocab_size is missing here, just add
# vocab_size: x to update the size of the vocabulary with which encoder is
# initialized. If you update the default values, the transformer you
# will get will be initialized from scratch.
hidden_size: int = 768
num_hidden_layers: int = 12
num_attention_heads: int = 12
output_attentions: bool = False
output_hidden_states: bool = False
random_init: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
hf_params = {"config": self._build_encoder_config(config)}
should_random_init = self.config.get("random_init", False)
# For BERT models, initialize using Jit version
if self.config.bert_model_name.startswith("bert-"):
if should_random_init:
self.module = BertModelJit(**hf_params)
else:
self.module = BertModelJit.from_pretrained(
self.config.bert_model_name, **hf_params
)
else:
if should_random_init:
self.module = AutoModel.from_config(**hf_params)
else:
self.module = AutoModel.from_pretrained(
self.config.bert_model_name, **hf_params
)
self.embeddings = self.module.embeddings
self.original_config = self.config
self.config = self.module.config
self._init_segment_embeddings()
def _init_segment_embeddings(self):
if self.original_config.get("num_segments", None):
num_segments = self.original_config.num_segments
if hasattr(self.embeddings, "token_type_embeddings"):
new_embeds = nn.Embedding(num_segments, self.config.hidden_size)
new_embeds.weight.data[:2].copy_(
self.embeddings.token_type_embeddings.weight
)
for idx in range(2, num_segments - 1):
new_embeds.weight.data[idx].copy_(
self.embeddings.token_type_embeddings.weight.data.mean(dim=0)
)
self.embeddings.token_type_embeddings = new_embeds
def _build_encoder_config(self, config: Config):
return AutoConfig.from_pretrained(
config.bert_model_name, **OmegaConf.to_container(config)
)
def forward(self, *args, return_sequence=False, **kwargs) -> Tensor:
# Only return pooled output
output = self.module(*args, **kwargs)
return output[0] if return_sequence else output[1]
class MultiModalEncoderBase(Encoder):
__jit_unused_properties__ = ["encoder_config"]
@dataclass
class Config(Encoder.Config):
# This actually is Union[ImageEncoderConfig, ImageFeatureEncoderConfig]
modal_encoder: EncoderFactory.Config = ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152, params=ResNet152ImageEncoder.Config()
)
text_encoder: EncoderFactory.Config = TextEncoderFactory.Config(
type=TextEncoderTypes.transformer, params=TransformerEncoder.Config()
)
direct_features_input: bool = False
modal_hidden_size: int = 2048
text_hidden_size: int = 768
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self._modal_encoder_config = self.config.get("modal_encoder", None)
self._is_direct_features_input = self.config.get("direct_features_input", False)
self.build()
self.modal_hidden_size = self.config.get("modal_hidden_size", None)
self.text_hidden_size = self.config.get("text_hidden_size", None)
def build(self):
encoders = self._build_encoders(self.config)
self.text_encoder, self.modal_encoder = encoders[0], encoders[1]
self._encoder_config = None
if self.text_encoder:
self._encoder_config = self.text_encoder.config
@property
def encoder_config(self):
return self._encoder_config
def _build_encoders(self, config):
text_encoder = None
if config.get("text_encoder", None):
text_encoder = build_text_encoder(config.text_encoder)
modal_encoder = None
if config.get("modal_encoder", None):
modal_encoder = self._build_modal_encoder(config.modal_encoder)
return (text_encoder, modal_encoder)
def _build_modal_encoder(self, config):
return build_image_encoder(
config, direct_features=self._is_direct_features_input
)
class PooledEncoder(Encoder):
"""
Standard pooled encoder class which takes in an input, encodes it with an encoder
implemented and returned from `self.build_encoder` function, pools it based
`pool_type` and `num_output_features` specified, flattens it and returns it
back as a tensor.
"""
@dataclass
class Config(Encoder.Config):
num_output_features: int = 1 # How many output features need to be returned.
pool_type: str = "avg" # type of pooling to apply "avg" | "adaptive"
out_dim: int = MISSING # size of out dim expected
three_d: bool = False # if input requires 3D pooling (for video)
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.encoder = self.build_encoder(config)
pool_func = (
nn.AdaptiveAvgPool2d if config.pool_type == "avg" else nn.AdaptiveMaxPool2d
)
params = (config.num_output_features, 1)
if config.three_d:
pool_func = (
nn.AdaptiveAvgPool3d
if config.pool_type == "avg"
else nn.AdaptiveMaxPool3d
)
params = (config.num_output_features, 1, 1)
# -1 will keep the original feature size
if config.num_output_features == -1:
self.pool = nn.Identity()
else:
self.pool = pool_func(params)
self.out_dim = config.out_dim
def build_encoder(self, config: Config, *args, **kwargs):
"""Build an encoder on whose output the pooling will be applied.
Args:
config (Config): Config parameter required to build the encoder.
Raises:
NotImplementedError: Not implemented by default.
"""
raise NotImplementedError()
def forward(self, x: Tensor) -> Tensor:
out = self.encoder(x)
out = self.pool(out)
out = torch.flatten(out, start_dim=2)
out = out.transpose(1, 2).contiguous()
return out
@registry.register_encoder("pytorchvideo")
class PytorchVideoEncoder(Encoder):
"""A thin wrapper around pytorchvideo models.
This class is responsible for integrating pytorchvideo models as encoders.
THis class attempts to construct a pytorchvideo model from torch hub.
If this fails for a random weight model, and pytorchvideo package is available,
build the model with random weights from pytorchvideo.models.
Config:
name (str): Always 'pytorchvideo' Used for builder_encoder()
random_init (bool): Flag to load pretrained weights
model_name (str): Name of the pytorchvideo model to use
drop_last_n_layers (int):
<=0 value for the number of layers to drop off the end
pooler_name (str): Name of pooler used on model output
Raises:
ImportError:
The constructor raises an ImportError if pytorchvideo is not installed.
"""
@dataclass
class Config(Encoder.Config):
name: str = "pytorchvideo"
random_init: bool = False
model_name: str = "slowfast_r50"
drop_last_n_layers: int = -1
pooler_name: str = "identity"
PYTORCHVIDEO_REPO = "facebookresearch/pytorchvideo:main"
def __init__(self, config: Config):
super().__init__()
config = OmegaConf.create({**asdict(self.Config()), **config})
if config.random_init:
params = dict(**OmegaConf.to_container(config))
params = {
k: v
for k, v in params.items()
if k not in PytorchVideoEncoder.Config().__dict__
}
try:
model = torch.hub.load(
PytorchVideoEncoder.PYTORCHVIDEO_REPO,
model=config.model_name,
pretrained=False,
**params,
)
except BaseException as err:
pytorchvideo_spec = importlib.util.find_spec("pytorchvideo")
if pytorchvideo_spec is None:
raise err
import pytorchvideo.models.hub as hub
model_create_fn = getattr(hub, config.model_name)
model = model_create_fn(pretrained=False, **params)
else:
# load weights from TorchHub
model = torch.hub.load(
PytorchVideoEncoder.PYTORCHVIDEO_REPO,
model=config.model_name,
pretrained=True,
)
encoder_list = []
if config.drop_last_n_layers == 0:
encoder_list += [model]
else:
modules_list = list(model.children())
if len(modules_list) == 1:
modules_list = list(modules_list[0].children())
modules = modules_list[: config.drop_last_n_layers]
encoder_list += modules
pooler = registry.get_pool_class(config.pooler_name)()
encoder_list += [pooler]
self.encoder = nn.Sequential(*encoder_list)
def forward(self, *args, **kwargs):
# pass along input to model
# assumes caller obeys the dynamic model signature
return self.encoder(*args, **kwargs)
@registry.register_encoder("r2plus1d_18")
class R2Plus1D18VideoEncoder(PooledEncoder):
"""
R2Plus1D based video encoder. Returns back a tensor of dim 2048.
By default, pretrained version is used.
See https://arxiv.org/abs/1711.11248.
"""
@dataclass
class Config(PooledEncoder.Config):
name: str = "r2plus1d_18"
out_dim: int = 512 # out dim
pretrained: bool = True # if should use pretrained version or not
three_d: bool = True
def build_encoder(self, config: Config, *args, **kwargs):
model = torchvision.models.video.r2plus1d_18(
pretrained=config.get("pretrained", True)
)
modules = list(model.children())[:-2]
return nn.Sequential(*modules)
@registry.register_encoder("resnet18_audio")
class ResNet18AudioEncoder(PooledEncoder):
"""
Audio encoder based on ResNet18 used in various audio classification paper
as a baseline. By default, not pretrained version is used.
"""
@dataclass
class Config(PooledEncoder.Config):
name: str = "resnet18_audio"
out_dim: int = 512
pretrained: bool = False
def build_encoder(self, config: Config, *args, **kwargs):
model = torchvision.models.resnet18(pretrained=config.get("pretrained", False))
model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
modules = list(model.children())[:-2]
return nn.Sequential(*modules)
@registry.register_encoder("vit")
class ViTEncoder(Encoder):
@dataclass
class Config(Encoder.Config):
name: str = "vit"
# See https://huggingface.co/models?filter=vit for available options
pretrained_model_name: str = "google/vit-base-patch16-224"
random_init: bool = False
gradient_checkpointing: bool = False
def __init__(self, config: Config, *args, **kwargs):
super().__init__()
self.config = config
self.module, self.hf_config = self._model_class.from_config(config)
self.embeddings = self.module.embeddings
self.out_dim = self.hf_config.hidden_size
@property
def _model_class(self):
from mmf.modules.vit import ViTModel
return ViTModel
def forward(self, *args, **kwargs):
if "output_hidden_states" not in kwargs:
kwargs["output_hidden_states"] = False
output = self.module(*args, **kwargs)
return output["last_hidden_state"], output.get("hidden_states", None)
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.cat",
"torch.prod",
"torch.flatten",
"torch.nn.Sequential",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.hub.load_state_dict_from_url",
"torch.load",
"torch.nn.functional.relu",
"torch.nn.Embedding",
"torch.hub.load"
] | 1.6.0 | facebookresearch/pythia | 079740bee4b357a7b1b866d35e2f1fad6edba8a4 |
0.4 | import logging
import os
import math
from tqdm import tqdm
import numpy as np
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def seq_collate(data):
(obs_seq_list, pred_seq_list, obs_seq_rel_list, pred_seq_rel_list,
obs_team_vec_list, obs_pos_vec_list, pred_team_vec_list, pred_pos_vec_list,
non_linear_ped_list, loss_mask_list) = zip(*data)
_len = [len(seq) for seq in obs_seq_list]
cum_start_idx = [0] + np.cumsum(_len).tolist()
seq_start_end = [[start, end]
for start, end in zip(cum_start_idx, cum_start_idx[1:])]
# Data format: batch, input_size, seq_len
# LSTM input format: seq_len, batch, input_size
obs_traj = torch.cat(obs_seq_list, dim=0).permute(2, 0, 1)
pred_traj = torch.cat(pred_seq_list, dim=0).permute(2, 0, 1)
obs_traj_rel = torch.cat(obs_seq_rel_list, dim=0).permute(2, 0, 1)
pred_traj_rel = torch.cat(pred_seq_rel_list, dim=0).permute(2, 0, 1)
obs_team_vec = torch.cat(obs_team_vec_list, dim=0).permute(2, 0, 1)
obs_pos_vec = torch.cat(obs_pos_vec_list, dim=0).permute(2, 0, 1)
pred_team_vec = torch.cat(pred_team_vec_list, dim=0).permute(2, 0, 1)
pred_pos_vec = torch.cat(pred_pos_vec_list, dim=0).permute(2, 0, 1)
non_linear_ped = torch.cat(non_linear_ped_list)
loss_mask = torch.cat(loss_mask_list, dim=0)
seq_start_end = torch.LongTensor(seq_start_end)
out = [
obs_traj, pred_traj, obs_traj_rel, pred_traj_rel,
obs_team_vec, obs_pos_vec, pred_team_vec, pred_pos_vec,
non_linear_ped, loss_mask, seq_start_end
]
return tuple(out)
def read_file(_path, delim='\t'):
lines = []
if delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
with open(_path, 'r') as f:
next(f)
for line in f:
line = line.strip().split(delim)
line = [float(i) if isfloat(i) else i for i in line]
lines.append(line)
return lines
def parse_file(_path, delim='\t'):
data = []
if delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
lines = read_file(_path, delim)
team_ids = np.unique([int(line[2]) for line in lines if isfloat(line[2])]).tolist()
posi_ids = ["C", "F", "G", "ball"]
for line in lines:
row = []
team_vector = [0.0] * 3 # 0 1 ball
pos_vector = [0.0] * 4 # 0 1 2 ball
for col, value in enumerate(line):
if col == 2: # team_id
if value == "ball":
team_vector[2] = 1.0
else:
team = team_ids.index(int(value))
team_vector[team] = 1.0
elif col == 3: # player_id
if value == "ball":
row.append(-1.0)
else:
row.append(value) # float
elif col == 6: # player_position
positions = value.strip('"').split(",")
for pos in positions:
pos_vector[posi_ids.index(pos)] = 1.0
else:
row.append(value) # float
row += team_vector # team_id
row += pos_vector # player_position
data.append(row)
return np.asarray(data)
def poly_fit(traj, traj_len, threshold):
"""
Input:
- traj: Numpy array of shape (2, traj_len)
- traj_len: Len of trajectory
- threshold: Minimum error to be considered for non linear traj
Output:
- int: 1 -> Non Linear 0-> Linear
"""
t = np.linspace(0, traj_len - 1, traj_len)
res_x = np.polyfit(t, traj[0, -traj_len:], 2, full=True)[1]
res_y = np.polyfit(t, traj[1, -traj_len:], 2, full=True)[1]
if res_x + res_y >= threshold:
return 1.0
else:
return 0.0
class TrajectoryDataset(Dataset):
"""Dataloder for the Trajectory datasets"""
def __init__(
self, data_dir, obs_len=8, pred_len=12, skip=1, threshold=0.002,
min_ped=1, delim='\t', metric="meter"
):
"""
Args:
- data_dir: Directory containing dataset files in the format
<frame_id> <ped_id> <x> <y>
- obs_len: Number of time-steps in input trajectories
- pred_len: Number of time-steps in output trajectories
- skip: Number of frames to skip while making the dataset
- threshold: Minimum error to be considered for non linear traj
when using a linear predictor
- min_ped: Minimum number of pedestrians that should be in a seqeunce
- delim: Delimiter in the dataset files
columns in csv file:
(idx), frame_id,team_id,player_id,pos_x, pos_y, player_position
->
data:
idx, frame_id,player_id,pos_x, pos_y, team_vector,position_vector
"""
super(TrajectoryDataset, self).__init__()
self.data_dir = data_dir
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = self.obs_len + self.pred_len
self.delim = delim
if metric=="meter":
self.factor=0.3048 # foot to meter
else:
self.factor=1.0 # foot to foot
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, _path) for _path in all_files]
num_peds_in_seq = []
seq_list = []
seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
team_vec_list = []
pos_vec_list = []
for path in tqdm(all_files):
data = parse_file(path, delim)
frames = np.unique(data[:, 0]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[frame == data[:, 1], :]) # frame_id
num_sequences = int(
math.ceil((len(frames) - self.seq_len + 1) / skip))
for idx in range(0, num_sequences * self.skip + 1, skip):
curr_seq_data = np.concatenate(
frame_data[idx:idx + self.seq_len], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[:, 2]) # player_id
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2,
self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq),
self.seq_len))
# vectors
curr_team = np.zeros((len(peds_in_curr_seq), 3, self.seq_len)) # 0 1 ball
curr_position = np.zeros((len(peds_in_curr_seq), 4, self.seq_len)) # C F G ball
num_peds_considered = 0
_non_linear_ped = []
for _, ped_id in enumerate(peds_in_curr_seq):
curr_ped_seq_full = curr_seq_data[curr_seq_data[:, 2] == ped_id, :] # player_id
curr_ped_seq_full = np.around(curr_ped_seq_full, decimals=4)
pad_front = frames.index(curr_ped_seq_full[0, 1]) - idx # frame_id
pad_end = frames.index(curr_ped_seq_full[-1, 1]) - idx + 1 # frame_id
if pad_end - pad_front != self.seq_len or curr_ped_seq_full.shape[0] != self.seq_len:
continue
curr_ped_seq = np.transpose(curr_ped_seq_full[:, 3:5]) # x,y
curr_ped_seq = curr_ped_seq * self.factor # conversion
# Make coordinates relative
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
rel_curr_ped_seq[:, 1:] = curr_ped_seq[:, 1:] - curr_ped_seq[:, :-1]
_idx = num_peds_considered
curr_seq[_idx, :, pad_front:pad_end] = curr_ped_seq
curr_seq_rel[_idx, :, pad_front:pad_end] = rel_curr_ped_seq
# Linear vs Non-Linear Trajectory
_non_linear_ped.append(
poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[_idx, pad_front:pad_end] = 1
# Team vector
curr_ped_team = np.transpose(curr_ped_seq_full[:, 5:8]) # [ 0 1 ball]
curr_team[_idx, :, pad_front:pad_end] = curr_ped_team
# Position Vector
curr_ped_pos = np.transpose(curr_ped_seq_full[:, 8:]) # [ C F G ball]
curr_position[_idx, :, pad_front:pad_end] = curr_ped_pos
num_peds_considered += 1
if num_peds_considered > min_ped:
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
team_vec_list.append(curr_team[:num_peds_considered]) # team vector
pos_vec_list.append(curr_position[:num_peds_considered]) # pos_vec_list
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
team_vec_list = np.concatenate(team_vec_list, axis=0)
pos_vec_list = np.concatenate(pos_vec_list, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
# Convert numpy -> Torch Tensor
self.obs_traj = torch.from_numpy(
seq_list[:, :, :self.obs_len]).type(torch.float)
self.pred_traj = torch.from_numpy(
seq_list[:, :, self.obs_len:]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(
seq_list_rel[:, :, :self.obs_len]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(
seq_list_rel[:, :, self.obs_len:]).type(torch.float)
self.obs_team_vec = torch.from_numpy(
team_vec_list[:, :, :self.obs_len]).type(torch.float)
self.obs_pos_vec = torch.from_numpy(
pos_vec_list[:, :, :self.obs_len]).type(torch.float)
self.obs_team_vec_pred = torch.from_numpy(
team_vec_list[:, :, self.obs_len:]).type(torch.float)
self.obs_pos_vec_pred = torch.from_numpy(
pos_vec_list[:, :, self.obs_len:]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = [0] + np.cumsum(num_peds_in_seq).tolist()
self.seq_start_end = [
(start, end)
for start, end in zip(cum_start_idx, cum_start_idx[1:])
]
def __len__(self):
return self.num_seq
def __getitem__(self, index):
start, end = self.seq_start_end[index]
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.obs_team_vec[start:end, :], self.obs_pos_vec[start:end, :],
self.obs_team_vec_pred[start: end, :], self.obs_pos_vec_pred[start: end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :]
]
return out
| [
"torch.cat",
"torch.from_numpy",
"torch.LongTensor"
] | 0.4.1 | szhaofelicia/sgan | ead42d4bb3b1278c4c9ffcae8fa9c2dc036a52ff |
1.0 | #!/usr/bin/env python3
import torch
import unittest
from gpytorch.lazy import NonLazyTensor, DiagLazyTensor, AddedDiagLazyTensor
from test.lazy._lazy_tensor_test_case import LazyTensorTestCase
class TestAddedDiagLazyTensor(LazyTensorTestCase, unittest.TestCase):
seed = 0
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor([1.0, 2.0, 4.0, 2.0, 3.0], requires_grad=True)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + diag.diag()
class TestAddedDiagLazyTensorBatch(LazyTensorTestCase, unittest.TestCase):
seed = 4
should_test_sample = True
def create_lazy_tensor(self):
tensor = torch.randn(3, 5, 5)
tensor = tensor.transpose(-1, -2).matmul(tensor)
tensor.requires_grad_(True)
diag = torch.tensor(
[[1.0, 2.0, 4.0, 2.0, 3.0], [2.0, 1.0, 2.0, 1.0, 4.0], [1.0, 2.0, 2.0, 3.0, 4.0]], requires_grad=True
)
return AddedDiagLazyTensor(NonLazyTensor(tensor), DiagLazyTensor(diag))
def evaluate_lazy_tensor(self, lazy_tensor):
diag = lazy_tensor._diag_tensor._diag
tensor = lazy_tensor._lazy_tensor.tensor
return tensor + torch.cat([diag[i].diag().unsqueeze(0) for i in range(3)])
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor",
"torch.randn"
] | 1.0.0 | cdgreenidge/gpytorch | d4cc610963bd812052e43e3aed84fb8b2ec94aa6 |
1.1 | import os
import torch
import torch.nn as nn
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
"""Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
if moving_average:
save_model = deepcopy(self.model)
for avg, param in zip(moving_average, save_model.parameters()):
param.data.copy_(avg.data)
else:
save_model = self.model
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
del save_model
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
"""Save a resumable checkpoint.
Args:
step (int): step number
Returns:
(object, str):
* checkpoint: the saved object
* checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""Simple model saver to filesystem"""
def _save(self, step, model):
real_model = (model.module
if isinstance(model, nn.DataParallel)
else model)
real_generator = (real_model.generator.module
if isinstance(real_model.generator, nn.DataParallel)
else real_model.generator)
model_state_dict = real_model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = real_generator.state_dict()
# NOTE: We need to trim the vocab to remove any unk tokens that
# were not originally here.
vocab = deepcopy(self.fields)
if hasattr(model.encoder, 'is_graph_encoder'):
sides = ["src", "node1", "node2", "tgt"]
else:
sides = ["src", "tgt"]
for side in sides:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name)
| [
"torch.save"
] | 1.1 | UKPLab/emnlp2019-dualgraph | 0c58fb7f3ad3b9da3b92b2d2841558807fc79fd0 |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Dict
import torch
from detectron2.layers import ShapeSpec, cat
from detectron2.modeling import ROI_HEADS_REGISTRY
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from detectron2.modeling.roi_heads.roi_heads import StandardROIHeads, select_foreground_proposals
from pytorch3d.ops import cubify
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
from meshrcnn.modeling.roi_heads.mask_head import mask_rcnn_loss
from meshrcnn.modeling.roi_heads.mesh_head import (
build_mesh_head,
mesh_rcnn_inference,
mesh_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.voxel_head import (
build_voxel_head,
voxel_rcnn_inference,
voxel_rcnn_loss,
)
from meshrcnn.modeling.roi_heads.z_head import build_z_head, z_rcnn_inference, z_rcnn_loss
from meshrcnn.utils import vis as vis_utils
@ROI_HEADS_REGISTRY.register()
class MeshRCNNROIHeads(StandardROIHeads):
"""
The ROI specific heads for Mesh R-CNN
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self._init_z_head(cfg, input_shape)
self._init_voxel_head(cfg, input_shape)
self._init_mesh_head(cfg, input_shape)
# If MODEL.VIS_MINIBATCH is True we store minibatch targets
# for visualization purposes
self._vis = cfg.MODEL.VIS_MINIBATCH
self._misc = {}
self._vis_dir = cfg.OUTPUT_DIR
def _init_z_head(self, cfg, input_shape):
# fmt: off
self.zpred_on = cfg.MODEL.ZPRED_ON
if not self.zpred_on:
return
z_pooler_resolution = cfg.MODEL.ROI_Z_HEAD.POOLER_RESOLUTION
z_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
z_sampling_ratio = cfg.MODEL.ROI_Z_HEAD.POOLER_SAMPLING_RATIO
z_pooler_type = cfg.MODEL.ROI_Z_HEAD.POOLER_TYPE
# fmt: on
self.z_loss_weight = cfg.MODEL.ROI_Z_HEAD.Z_REG_WEIGHT
self.z_smooth_l1_beta = cfg.MODEL.ROI_Z_HEAD.SMOOTH_L1_BETA
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.z_pooler = ROIPooler(
output_size=z_pooler_resolution,
scales=z_pooler_scales,
sampling_ratio=z_sampling_ratio,
pooler_type=z_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=z_pooler_resolution, height=z_pooler_resolution
)
self.z_head = build_z_head(cfg, shape)
def _init_voxel_head(self, cfg, input_shape):
# fmt: off
self.voxel_on = cfg.MODEL.VOXEL_ON
if not self.voxel_on:
return
voxel_pooler_resolution = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_RESOLUTION
voxel_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
voxel_sampling_ratio = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_SAMPLING_RATIO
voxel_pooler_type = cfg.MODEL.ROI_VOXEL_HEAD.POOLER_TYPE
# fmt: on
self.voxel_loss_weight = cfg.MODEL.ROI_VOXEL_HEAD.LOSS_WEIGHT
self.cls_agnostic_voxel = cfg.MODEL.ROI_VOXEL_HEAD.CLS_AGNOSTIC_VOXEL
self.cubify_thresh = cfg.MODEL.ROI_VOXEL_HEAD.CUBIFY_THRESH
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.voxel_pooler = ROIPooler(
output_size=voxel_pooler_resolution,
scales=voxel_pooler_scales,
sampling_ratio=voxel_sampling_ratio,
pooler_type=voxel_pooler_type,
)
shape = ShapeSpec(
channels=in_channels, width=voxel_pooler_resolution, height=voxel_pooler_resolution
)
self.voxel_head = build_voxel_head(cfg, shape)
def _init_mesh_head(self, cfg, input_shape):
# fmt: off
self.mesh_on = cfg.MODEL.MESH_ON
if not self.mesh_on:
return
mesh_pooler_resolution = cfg.MODEL.ROI_MESH_HEAD.POOLER_RESOLUTION
mesh_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
mesh_sampling_ratio = cfg.MODEL.ROI_MESH_HEAD.POOLER_SAMPLING_RATIO
mesh_pooler_type = cfg.MODEL.ROI_MESH_HEAD.POOLER_TYPE
# fmt: on
self.chamfer_loss_weight = cfg.MODEL.ROI_MESH_HEAD.CHAMFER_LOSS_WEIGHT
self.normals_loss_weight = cfg.MODEL.ROI_MESH_HEAD.NORMALS_LOSS_WEIGHT
self.edge_loss_weight = cfg.MODEL.ROI_MESH_HEAD.EDGE_LOSS_WEIGHT
self.gt_num_samples = cfg.MODEL.ROI_MESH_HEAD.GT_NUM_SAMPLES
self.pred_num_samples = cfg.MODEL.ROI_MESH_HEAD.PRED_NUM_SAMPLES
self.gt_coord_thresh = cfg.MODEL.ROI_MESH_HEAD.GT_COORD_THRESH
self.ico_sphere_level = cfg.MODEL.ROI_MESH_HEAD.ICO_SPHERE_LEVEL
in_channels = [input_shape[f].channels for f in self.in_features][0]
self.mesh_pooler = ROIPooler(
output_size=mesh_pooler_resolution,
scales=mesh_pooler_scales,
sampling_ratio=mesh_sampling_ratio,
pooler_type=mesh_pooler_type,
)
self.mesh_head = build_mesh_head(
cfg,
ShapeSpec(
channels=in_channels, height=mesh_pooler_resolution, width=mesh_pooler_resolution
),
)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
if self._vis:
self._misc["images"] = images
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
if self._vis:
self._misc["proposals"] = proposals
if self.training:
losses = self._forward_box(features, proposals)
# During training the proposals used by the box head are
# used by the z, mask, voxel & mesh head.
losses.update(self._forward_z(features, proposals))
losses.update(self._forward_mask(features, proposals))
losses.update(self._forward_shape(features, proposals))
# print minibatch examples
if self._vis:
vis_utils.visualize_minibatch(self._misc["images"], self._misc, self._vis_dir, True)
return [], losses
else:
pred_instances = self._forward_box(features, proposals)
# During inference cascaded prediction is used: the mask and keypoints heads are only
# applied to the top scoring box detections.
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def forward_with_given_boxes(self, features, instances):
"""
Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
Args:
features: same as in `forward()`
instances (list[Instances]): instances to predict other outputs. Expect the keys
"pred_boxes" and "pred_classes" to exist.
Returns:
instances (Instances): the same `Instances` object, with extra
fields such as `pred_masks` or `pred_voxels`.
"""
assert not self.training
assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
instances = self._forward_z(features, instances)
instances = self._forward_mask(features, instances)
instances = self._forward_shape(features, instances)
return instances
def _forward_z(self, features, instances):
"""
Forward logic of the z prediction branch.
"""
if not self.zpred_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
z_features = self.z_pooler(features, proposal_boxes)
z_pred = self.z_head(z_features)
src_boxes = cat([p.tensor for p in proposal_boxes])
loss_z_reg = z_rcnn_loss(
z_pred,
proposals,
src_boxes,
loss_weight=self.z_loss_weight,
smooth_l1_beta=self.z_smooth_l1_beta,
)
return {"loss_z_reg": loss_z_reg}
else:
pred_boxes = [x.pred_boxes for x in instances]
z_features = self.z_pooler(features, pred_boxes)
z_pred = self.z_head(z_features)
z_rcnn_inference(z_pred, instances)
return instances
def _forward_mask(self, features, instances):
"""
Forward logic of the mask prediction branch.
Args:
features (dict[str,Tensor]): mapping from names to backbone features
instances (list[Instances]): the per-image instances to train/predict masks.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_masks" and return it.
"""
if not self.mask_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
mask_features = self.mask_pooler(features, proposal_boxes)
mask_logits = self.mask_head.layers(mask_features)
loss_mask, target_masks = mask_rcnn_loss(mask_logits, proposals)
if self._vis:
self._misc["target_masks"] = target_masks
self._misc["fg_proposals"] = proposals
return {"loss_mask": loss_mask}
else:
pred_boxes = [x.pred_boxes for x in instances]
mask_features = self.mask_pooler(features, pred_boxes)
return self.mask_head(mask_features, instances)
def _forward_shape(self, features, instances):
"""
Forward logic for the voxel and mesh refinement branch.
Args:
features (list[Tensor]): #level input features for voxel prediction
instances (list[Instances]): the per-image instances to train/predict meshes.
In training, they can be the proposals.
In inference, they can be the predicted boxes.
Returns:
In training, a dict of losses.
In inference, update `instances` with new fields "pred_voxels" & "pred_meshes" and return it.
"""
if not self.voxel_on and not self.mesh_on:
return {} if self.training else instances
features = [features[f] for f in self.in_features]
if self.training:
# The loss is only defined on positive proposals.
proposals, _ = select_foreground_proposals(instances, self.num_classes)
proposal_boxes = [x.proposal_boxes for x in proposals]
losses = {}
if self.voxel_on:
voxel_features = self.voxel_pooler(features, proposal_boxes)
voxel_logits = self.voxel_head(voxel_features)
loss_voxel, target_voxels = voxel_rcnn_loss(
voxel_logits, proposals, loss_weight=self.voxel_loss_weight
)
losses.update({"loss_voxel": loss_voxel})
if self._vis:
self._misc["target_voxels"] = target_voxels
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)
init_mesh = cubify(vox_in, self.cubify_thresh) # 1
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, proposal_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
# loss weights
loss_weights = {
"chamfer": self.chamfer_loss_weight,
"normals": self.normals_loss_weight,
"edge": self.edge_loss_weight,
}
if not pred_meshes[0].isempty():
loss_chamfer, loss_normals, loss_edge, target_meshes = mesh_rcnn_loss(
pred_meshes,
proposals,
loss_weights=loss_weights,
gt_num_samples=self.gt_num_samples,
pred_num_samples=self.pred_num_samples,
gt_coord_thresh=self.gt_coord_thresh,
)
if self._vis:
self._misc["init_meshes"] = init_mesh
self._misc["target_meshes"] = target_meshes
else:
loss_chamfer = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_normals = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
loss_edge = sum(k.sum() for k in self.mesh_head.parameters()) * 0.0
losses.update(
{
"loss_chamfer": loss_chamfer,
"loss_normals": loss_normals,
"loss_edge": loss_edge,
}
)
return losses
else:
pred_boxes = [x.pred_boxes for x in instances]
if self.voxel_on:
voxel_features = self.voxel_pooler(features, pred_boxes)
voxel_logits = self.voxel_head(voxel_features)
voxel_rcnn_inference(voxel_logits, instances)
if self.cls_agnostic_voxel:
with torch.no_grad():
vox_in = voxel_logits.sigmoid().squeeze(1) # (N, V, V, V)
init_mesh = cubify(vox_in, self.cubify_thresh) # 1
else:
raise ValueError("No support for class specific predictions")
if self.mesh_on:
mesh_features = self.mesh_pooler(features, pred_boxes)
if not self.voxel_on:
if mesh_features.shape[0] > 0:
init_mesh = ico_sphere(self.ico_sphere_level, mesh_features.device)
init_mesh = init_mesh.extend(mesh_features.shape[0])
else:
init_mesh = Meshes(verts=[], faces=[])
pred_meshes = self.mesh_head(mesh_features, init_mesh)
mesh_rcnn_inference(pred_meshes[-1], instances)
else:
assert self.voxel_on
mesh_rcnn_inference(init_mesh, instances)
return instances
| [
"torch.no_grad"
] | 3 | hsk9767/mesh_rcnn_copy | 6dd4d9ea8af33c03a084e34c7d16eeaddfe924ae |
1.3 | import torch
from torch import nn
from torch.distributions import Categorical
class SoftmaxCategoricalHead(nn.Module):
def forward(self, logits):
return torch.distributions.Categorical(logits=logits)
# class MultiSoftmaxCategoricalHead(nn.Module):
# def forward(self, logits):
# return Independent(Categorical(logits=logits), reinterpreted_batch_ndims=1)
class MultiCategorical():
def __init__(self, dims=None, logits=None):
self.dims = dims
logits = torch.split(logits, tuple(dims), dim=1)
self.dists = [Categorical(logits=logits_dim) for logits_dim in logits]
def log_prob(self, actions):
actions = torch.unbind(actions, dim=1)
logprobs = torch.stack([
dist.log_prob(action) for dist, action in zip(self.dists, actions)
], dim=1)
return logprobs.sum(dim=1)
def entropy(self):
return torch.stack([dist.entropy() for dist in self.dists], dim=1).sum(dim=1)
def sample(self):
return torch.stack([dist.sample() for dist in self.dists], dim=1)
def mode(self):
return torch.stack([
torch.argmax(dist.probs, dim=1) for dist in self.dists
], dim=1)
class MultiSoftmaxCategoricalHead(nn.Module):
def __init__(self, dims=None):
self.dims = dims
super().__init__()
def forward(self, logits):
return MultiCategorical(dims=self.dims, logits=logits)
| [
"torch.unbind",
"torch.distributions.Categorical",
"torch.argmax"
] | 1.3.0 | tkelestemur/pfrl | 388855fb30313185d43ae0d0f4b694be647a5c43 |
1.6 | import argparse
import os
import pandas as pd
import numpy as np
import torch as t
from torch.optim import Adam
import pickle5 as pickle
import json
import random
from sample import sample_with_input, sample_with_beam
from utils.batch_loader import BatchLoader, clean_str
from model.paraphraser import Paraphraser
from model.generator import Generator
from synonym_paraphraser import SynonymParaphraser
def main():
parser = argparse.ArgumentParser(description='Paraphraser')
parser.add_argument('--use-cuda', type=bool, default=False, metavar='CUDA', help='use cuda (default: False)')
parser.add_argument('--seq-len', default=30, metavar='SL', help='max length of sequence (default: 30)')
parser.add_argument('--ml', type=bool, default=True, metavar='ML', help='sample by maximum likelihood')
args = parser.parse_args()
# Read data
if not os.path.exists('datasets/human_test.csv'):
source_file = 'datasets/test.csv'
source_data = pd.read_csv(source_file)[['question1', 'question2']]
sentence_categories = [[] for _ in range(5)]
for i in range(len(source_data)):
sent = clean_str(source_data['question1'][i])
sent_len = len(sent.split())
if sent_len < 6:
j = 0
elif sent_len < 11:
j = 1
elif sent_len < 16:
j = 2
elif sent_len < 21:
j = 3
else:
j = 4
sentence_categories[j].append([source_data['question1'][i], source_data['question2'][i]])
sample_data = []
for category in sentence_categories:
sample_data += random.sample(category, 20)
source_data = pd.DataFrame(sample_data, columns=['question1', 'question2'])
source_data.to_csv('datasets/human_test.csv')
else:
source_data = pd.read_csv('datasets/human_test_1.csv')[['question1', 'question2']]
# Sample from Guptas original model
batch_loader = BatchLoader()
from model.parameters import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_ori_32', map_location=t.device('cpu')))
samples_ori, target, source_ori = sample_with_input(batch_loader, paraphraser, args,
decoder_only=True,
file_name='datasets/human_test.csv')
ref_items = generate_items(source_ori, target, 'ref')
ori_items = generate_items(source_ori, samples_ori[0], 'ori')
# Sample from Guptas model with two-path-loss
batch_loader = BatchLoader()
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size, use_two_path_loss=True)
paraphraser = Paraphraser(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_paraphraser_tpl_16_32', map_location=t.device('cpu')))
samples_tpl, target, source_tpl = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
tpl_items = generate_items(source_tpl, samples_tpl[0], 'tpl')
# Sample from GAN model
batch_loader = BatchLoader()
from model.parametersGAN import Parameters
parameters = Parameters(batch_loader.max_seq_len, batch_loader.vocab_size)
paraphraser = Generator(parameters)
paraphraser.load_state_dict(t.load('saved_models/trained_generator_gan_140k', map_location=t.device('cpu')))
samples_gan, target, source_gan = sample_with_input(batch_loader, paraphraser, args,
decoder_only=False,
file_name='datasets/human_test.csv')
gan_items = generate_items(source_gan, samples_gan[0], 'gan')
# Sample from synonym model
paraphraser = SynonymParaphraser()
samples_synonym = paraphraser.generate_paraphrases('datasets/human_test.csv')
base_items = generate_items(source_data['question1'], samples_synonym, 'base')
all_items = ref_items + ori_items + tpl_items + gan_items + base_items
eval_results = {'name' : 'Paraphrase Survey Full Ordered', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_ordered.json', 'w') as f:
f.write(res)
random.shuffle(all_items)
eval_results = {'name' : 'Paraphrase Survey Full Shuffled', 'items' : all_items}
res = json.dumps(eval_results, ensure_ascii=False)
with open('datasets/human_test_shuffled.json', 'w') as f:
f.write(res)
for i in range(10):
eval_results = {'name' : f'Paraphrase Survey Part {i+1}/{10}', 'items' : all_items[i*50:((i+1)*50)-1]}
res = json.dumps(eval_results, ensure_ascii=False)
with open(f'datasets/human_test_p_{i}_{10}.json', 'w') as f:
f.write(res)
def generate_items(original, paraphrase, model):
items = []
for i in range(len(original)):
questions = 'Fråga 1: ' + original[i] + '?<br>Fråga 2: ' + paraphrase[i] + '?'
item = {
'question' : questions,
'required' : True,
'extra' : {'model' : model},
'order': -1,
'answer_sets' : [
{
"type": "radio",
"name": "Fråga 1 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är grammatiskt korrekt: ",
"choices": [ "0", "1", "2", "3"]
},
{
"type": "radio",
"name": "Fråga 2 är betyder samma sak som Fråga 1: ",
"choices": [ "0", "1", "2", "3"]
}]
}
items.append(item)
return items
if __name__ == '__main__':
main()
| [
"torch.device"
] | 1.6.0 | nlindqv/pytorch_RVAE | d9e58134965f69aad557fb3bd2478500a51210f8 |
1.4 | import sys
import os
import torch
import torch.onnx
import torch.distributed as dist
import torch.nn as nn
import onnxruntime
from datetime import datetime
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from pepper_variant.modules.python.models.dataloader_predict import SequenceDataset
from pepper_variant.modules.python.models.ModelHander import ModelHandler
from pepper_variant.modules.python.Options import ImageSizeOptions, TrainOptions
from pepper_variant.modules.python.DataStorePredict import DataStore
def predict(input_filepath, file_chunks, output_filepath, model_path, batch_size, num_workers, threads, thread_id):
# session options
sess_options = onnxruntime.SessionOptions()
sess_options.intra_op_num_threads = threads
sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
ort_session = onnxruntime.InferenceSession(model_path + ".onnx", sess_options=sess_options)
torch.set_num_threads(threads)
# create output file
output_filename = output_filepath + "pepper_prediction_" + str(thread_id) + ".hdf"
prediction_data_file = DataStore(output_filename, mode='w')
# data loader
input_data = SequenceDataset(input_filepath, file_chunks)
data_loader = DataLoader(input_data,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
batch_completed = 0
total_batches = len(data_loader)
with torch.no_grad():
for contig, contig_start, contig_end, chunk_id, images, position, index in data_loader:
images = images.type(torch.FloatTensor)
hidden = torch.zeros(images.size(0), 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
prediction_base_tensor = torch.zeros((images.size(0), images.size(1), ImageSizeOptions.TOTAL_LABELS))
for i in range(0, ImageSizeOptions.SEQ_LENGTH, TrainOptions.WINDOW_JUMP):
if i + TrainOptions.TRAIN_WINDOW > ImageSizeOptions.SEQ_LENGTH:
break
chunk_start = i
chunk_end = i + TrainOptions.TRAIN_WINDOW
# chunk all the data
image_chunk = images[:, chunk_start:chunk_end]
# run inference on onnx mode, which takes numpy inputs
ort_inputs = {ort_session.get_inputs()[0].name: image_chunk.cpu().numpy(),
ort_session.get_inputs()[1].name: hidden.cpu().numpy()}
output_base, hidden = ort_session.run(None, ort_inputs)
output_base = torch.from_numpy(output_base)
hidden = torch.from_numpy(hidden)
# now calculate how much padding is on the top and bottom of this chunk so we can do a simple
# add operation
top_zeros = chunk_start
bottom_zeros = ImageSizeOptions.SEQ_LENGTH - chunk_end
# do softmax and get prediction
# we run a softmax a padding to make the output tensor compatible for adding
inference_layers = nn.Sequential(
nn.Softmax(dim=2),
nn.ZeroPad2d((0, 0, top_zeros, bottom_zeros))
)
# run the softmax and padding layers
base_prediction = (inference_layers(output_base) * 10).type(torch.IntTensor)
# now simply add the tensor to the global counter
prediction_base_tensor = torch.add(prediction_base_tensor, base_prediction)
# base_values, base_labels = torch.max(prediction_base_tensor, 2)
#
# predicted_base_labels = base_labels.cpu().numpy()
prediction_base_tensor = prediction_base_tensor.cpu().numpy().astype(int)
for i in range(images.size(0)):
prediction_data_file.write_prediction(contig[i],
contig_start[i],
contig_end[i],
chunk_id[i],
position[i],
index[i],
prediction_base_tensor[i])
batch_completed += 1
if thread_id == 0 and batch_completed % 5 == 0:
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] " +
"INFO: BATCHES PROCESSED " + str(batch_completed) + "/" + str(total_batches) + ".\n")
sys.stderr.flush()
def cleanup():
dist.destroy_process_group()
def setup(rank, total_callers, args, all_input_files):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=total_callers)
filepath, output_filepath, model_path, batch_size, threads, num_workers = args
# Explicitly setting seed to make sure that models created in two processes
# start from same random weights and biases.
predict(filepath, all_input_files[rank], output_filepath, model_path, batch_size, num_workers, threads, rank)
cleanup()
def predict_distributed_cpu(filepath, file_chunks, output_filepath, model_path, batch_size, callers, threads, num_workers):
"""
Create a prediction table/dictionary of an images set using a trained model.
:param filepath: Path to image files to predict on
:param file_chunks: Path to chunked files
:param batch_size: Batch size used for prediction
:param model_path: Path to a trained model
:param output_filepath: Path to output directory
:param callers: Number of callers to start
:param threads: Number of threads per caller.
:param num_workers: Number of workers to be used by the dataloader
:return: Prediction dictionary
"""
transducer_model, hidden_size, gru_layers, prev_ite = \
ModelHandler.load_simple_model_for_training(model_path,
input_channels=ImageSizeOptions.IMAGE_CHANNELS,
image_features=ImageSizeOptions.IMAGE_HEIGHT,
seq_len=ImageSizeOptions.SEQ_LENGTH,
num_classes=ImageSizeOptions.TOTAL_LABELS)
transducer_model.eval()
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] INFO: MODEL LOADING TO ONNX\n")
x = torch.zeros(1, TrainOptions.TRAIN_WINDOW, ImageSizeOptions.IMAGE_HEIGHT)
h = torch.zeros(1, 2 * TrainOptions.GRU_LAYERS, TrainOptions.HIDDEN_SIZE)
if not os.path.isfile(model_path + ".onnx"):
sys.stderr.write("[" + str(datetime.now().strftime('%m-%d-%Y %H:%M:%S')) + "] INFO: SAVING MODEL TO ONNX\n")
torch.onnx.export(transducer_model, (x, h),
model_path + ".onnx",
training=False,
opset_version=10,
do_constant_folding=True,
input_names=['input_image', 'input_hidden'],
output_names=['output_pred', 'output_hidden'],
dynamic_axes={'input_image': {0: 'batch_size'},
'input_hidden': {0: 'batch_size'},
'output_pred': {0: 'batch_size'},
'output_hidden': {0: 'batch_size'}})
transducer_model.eval()
args = (filepath, output_filepath, model_path, batch_size, threads, num_workers)
mp.spawn(setup,
args=(callers, args, file_chunks),
nprocs=callers,
join=True)
| [
"torch.zeros",
"torch.nn.Softmax",
"torch.distributed.destroy_process_group",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.add",
"torch.from_numpy",
"torch.utils.data.DataLoader",
"torch.onnx.export",
"torch.nn.ZeroPad2d",
"torch.set_num_threads"
] | 1.4.0 | Samteymoori/pepper | 734d226de47a855952e3b58145c1fcfbe221d3b4 |
1.7 | """
Evaluate
"""
import re
import math
import datetime
import random
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from loss import iou_loss, HairMattingLoss, acc_loss, F1_loss
from utils import create_multi_figure
USE_CUDA = torch.cuda.is_available()
DEVICE = torch.device("cuda" if USE_CUDA else "cpu")
def evalTest(test_data, model, args):
testloader = DataLoader(test_data, batch_size=4, shuffle=False)
hairmat_loss = HairMattingLoss(args.grad_lambda)
total_loss, total_iou, total_acc, total_f1 = 0, 0, 0, 0
for batch in testloader:
image, mask = (i.to(DEVICE) for i in batch)
pred = model(image)
total_loss += hairmat_loss(pred, mask, image).item()
iloss = iou_loss(pred, mask).item()
total_iou += iloss
aloss = acc_loss(pred, mask).item()
total_acc += aloss
floss = F1_loss(pred, mask).item()
total_f1 += floss
print("Testing Loss: ", total_loss / len(testloader))
print("Testing IOU: ", total_iou / len(testloader))
print("Testing Acc: ", total_acc / len(testloader))
print("Testing F1: ", total_f1 / len(testloader))
def evaluateOne(img, model, absolute=True):
img = img.to(DEVICE).unsqueeze(0)
pred = model(img)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
# pred[pred < .90] = 0
rows = [[img[0], pred[0]]]
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
def evaluate(test_data, model, num, absolute=True):
rows = [None] * num
for i in range(num):
idx = random.randint(0, len(test_data) - 1)
image, mask = (i.to(DEVICE).unsqueeze(0) for i in test_data[idx])
pred = model(image)
if absolute:
pred[pred > 0.5] = 1.0
pred[pred <= 0.5] = 0.0
else:
pred[pred < 0.4] = 0
rows[i] = [image[0], mask[0], pred[0]] # get batch
create_multi_figure(rows, dye=True)
plt.savefig("result.jpg")
| [
"torch.device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.7.1 | eric91sanchez/hair_seg | 4f688daac0ec4ea906ff0462ae51634293e35447 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from argparse import ArgumentParser
from typing import Any, Dict
from unittest import mock
from unittest.mock import call, PropertyMock
import pytest
import torch
from pytorch_lightning import LightningDataModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.utilities import AttributeDict
from pytorch_lightning.utilities.model_helpers import is_overridden
from tests.helpers import BoringDataModule, BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
from tests.helpers.utils import reset_seed
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.node_rank", new_callable=PropertyMock)
@mock.patch("pytorch_lightning.trainer.trainer.Trainer.local_rank", new_callable=PropertyMock)
def test_can_prepare_data(local_rank, node_rank):
model = BoringModel()
dm = BoringDataModule()
trainer = Trainer()
trainer.model = model
trainer.datamodule = dm
# 1 no DM
# prepare_data_per_node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 0
assert trainer.local_rank == 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
# local rank = 1 (False)
dm.random_full = None
dm._has_prepared_data = False
local_rank.return_value = 1
assert trainer.local_rank == 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
# prepare_data_per_node = False (prepare across all nodes)
# global rank = 0 (True)
dm.random_full = None
dm._has_prepared_data = False
trainer.prepare_data_per_node = False
node_rank.return_value = 0
local_rank.return_value = 0
assert trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is not None
# global rank = 1 (False)
dm.random_full = None
dm._has_prepared_data = False
node_rank.return_value = 1
local_rank.return_value = 0
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
node_rank.return_value = 0
local_rank.return_value = 1
assert not trainer.data_connector.can_prepare_data()
trainer.data_connector.prepare_data()
assert dm.random_full is None
# 2 dm
# prepar per node = True
# local rank = 0 (True)
trainer.prepare_data_per_node = True
local_rank.return_value = 0
# is_overridden prepare data = True
# has been called
# False
dm._has_prepared_data = True
assert not trainer.data_connector.can_prepare_data()
# has not been called
# True
dm._has_prepared_data = False
assert trainer.data_connector.can_prepare_data()
# is_overridden prepare data = False
# True
dm.prepare_data = None
assert trainer.data_connector.can_prepare_data()
def test_hooks_no_recursion_error():
# hooks were appended in cascade every tine a new data module was instantiated leading to a recursion error.
# See https://github.com/PyTorchLightning/pytorch-lightning/issues/3652
class DummyDM(LightningDataModule):
def setup(self, *args, **kwargs):
pass
def prepare_data(self, *args, **kwargs):
pass
for i in range(1005):
dm = DummyDM()
dm.setup()
dm.prepare_data()
def test_helper_boringdatamodule():
dm = BoringDataModule()
dm.prepare_data()
dm.setup()
def test_helper_boringdatamodule_with_verbose_setup():
dm = BoringDataModule()
dm.prepare_data()
dm.setup("fit")
dm.setup("test")
def test_data_hooks_called():
dm = BoringDataModule()
assert not dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.prepare_data()
assert dm.has_prepared_data
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.teardown()
assert dm.has_prepared_data
assert dm.has_setup_fit
assert dm.has_setup_test
assert dm.has_setup_validate
assert not dm.has_setup_predict
assert dm.has_teardown_fit
assert dm.has_teardown_test
assert dm.has_teardown_validate
assert not dm.has_teardown_predict
@pytest.mark.parametrize("use_kwarg", (False, True))
def test_data_hooks_called_verbose(use_kwarg):
dm = BoringDataModule()
dm.prepare_data()
assert not dm.has_setup_fit
assert not dm.has_setup_test
assert not dm.has_setup_validate
assert not dm.has_setup_predict
assert not dm.has_teardown_fit
assert not dm.has_teardown_test
assert not dm.has_teardown_validate
assert not dm.has_teardown_predict
dm.setup(stage="fit") if use_kwarg else dm.setup("fit")
assert dm.has_setup_fit
assert not dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="validate") if use_kwarg else dm.setup("validate")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert not dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="test") if use_kwarg else dm.setup("test")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert not dm.has_setup_predict
dm.setup(stage="predict") if use_kwarg else dm.setup("predict")
assert dm.has_setup_fit
assert dm.has_setup_validate
assert dm.has_setup_test
assert dm.has_setup_predict
dm.teardown(stage="fit") if use_kwarg else dm.teardown("fit")
assert dm.has_teardown_fit
assert not dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="validate") if use_kwarg else dm.teardown("validate")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert not dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="test") if use_kwarg else dm.teardown("test")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert not dm.has_teardown_predict
dm.teardown(stage="predict") if use_kwarg else dm.teardown("predict")
assert dm.has_teardown_fit
assert dm.has_teardown_validate
assert dm.has_teardown_test
assert dm.has_teardown_predict
def test_dm_add_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
assert args.data_dir == str(tmpdir)
def test_dm_init_from_argparse_args(tmpdir):
parser = ArgumentParser()
parser = BoringDataModule.add_argparse_args(parser)
args = parser.parse_args(["--data_dir", str(tmpdir)])
dm = BoringDataModule.from_argparse_args(args)
dm.prepare_data()
dm.setup()
assert dm.data_dir == args.data_dir == str(tmpdir)
def test_dm_pickle_after_init():
dm = BoringDataModule()
pickle.dumps(dm)
def test_train_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
# fit model
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_train_val_loop_only(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None)
# fit model
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.callback_metrics["train_loss"] < 1.0
def test_dm_checkpoint_save(tmpdir):
class CustomBoringModel(BoringModel):
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
self.log("early_stop_on", out["x"])
return out
class CustomBoringDataModule(BoringDataModule):
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
checkpoint[self.__class__.__name__] = self.__class__.__name__
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self.checkpoint_state = checkpoint.get(self.__class__.__name__)
reset_seed()
dm = CustomBoringDataModule()
model = CustomBoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=2,
limit_val_batches=1,
weights_summary=None,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on")],
)
# fit model
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
checkpoint_path = list(trainer.checkpoint_callback.best_k_models.keys())[0]
checkpoint = torch.load(checkpoint_path)
assert dm.__class__.__name__ in checkpoint
assert checkpoint[dm.__class__.__name__] == dm.__class__.__name__
def test_full_loop(tmpdir):
reset_seed()
dm = ClassifDataModule()
model = ClassificationModel()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, weights_summary=None, deterministic=True)
# fit model
trainer.fit(model, dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert dm.trainer is not None
# validate
result = trainer.validate(model, dm)
assert dm.trainer is not None
assert result[0]["val_acc"] > 0.7
# test
result = trainer.test(model, dm)
assert dm.trainer is not None
assert result[0]["test_acc"] > 0.6
@RunIf(min_gpus=1)
@mock.patch("pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module", new_callable=PropertyMock)
def test_dm_apply_batch_transfer_handler(get_module_mock):
expected_device = torch.device("cuda", 0)
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestDM(LightningDataModule):
rank = 0
transfer_batch_to_device_hook_rank = None
on_before_batch_transfer_hook_rank = None
on_after_batch_transfer_hook_rank = None
def on_before_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
self.on_before_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.samples += 1
return batch
def on_after_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx == 0
assert batch.samples.device == batch.targets.device == expected_device
self.on_after_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.targets *= 2
return batch
def transfer_batch_to_device(self, batch, device, dataloader_idx):
assert dataloader_idx == 0
self.transfer_batch_to_device_hook_rank = self.rank
self.rank += 1
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
return batch
dm = CurrentTestDM()
model = BoringModel()
batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(gpus=1)
# running .fit() would require us to implement custom data loaders, we mock the model reference instead
get_module_mock.return_value = model
if is_overridden("transfer_batch_to_device", dm):
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_before_batch_transfer = dm.on_before_batch_transfer
model.transfer_batch_to_device = dm.transfer_batch_to_device
model.on_after_batch_transfer = dm.on_after_batch_transfer
batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)
assert dm.on_before_batch_transfer_hook_rank == 0
assert dm.transfer_batch_to_device_hook_rank == 1
assert dm.on_after_batch_transfer_hook_rank == 2
assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))
assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
def test_dm_reload_dataloaders_every_n_epochs(tmpdir):
"""
Test datamodule, where trainer argument
reload_dataloaders_every_n_epochs is set to a non negative integer
"""
class CustomBoringDataModule(BoringDataModule):
def __init__(self):
super().__init__()
self._epochs_called_for = []
def train_dataloader(self):
assert self.trainer.current_epoch not in self._epochs_called_for
self._epochs_called_for.append(self.trainer.current_epoch)
return super().train_dataloader()
dm = CustomBoringDataModule()
model = BoringModel()
model.validation_step = None
model.validation_step_end = None
model.validation_epoch_end = None
model.test_step = None
model.test_step_end = None
model.test_epoch_end = None
trainer = Trainer(default_root_dir=tmpdir, max_epochs=3, limit_train_batches=2, reload_dataloaders_every_n_epochs=2)
trainer.fit(model, dm)
class DummyDS(torch.utils.data.Dataset):
def __getitem__(self, index):
return 1
def __len__(self):
return 100
class DummyIDS(torch.utils.data.IterableDataset):
def __iter__(self):
yield 1
@pytest.mark.parametrize("iterable", (False, True))
def test_dm_init_from_datasets_dataloaders(iterable):
ds = DummyIDS if iterable else DummyDS
train_ds = ds()
dm = LightningDataModule.from_datasets(train_ds, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_called_once_with(train_ds, batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
train_ds_sequence = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds_sequence, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.train_dataloader()
dl_mock.assert_has_calls(
[
call(train_ds_sequence[0], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
call(train_ds_sequence[1], batch_size=4, shuffle=not iterable, num_workers=0, pin_memory=True),
]
)
assert dm.val_dataloader() is None
assert dm.test_dataloader() is None
valid_ds = ds()
test_ds = ds()
dm = LightningDataModule.from_datasets(val_dataset=valid_ds, test_dataset=test_ds, batch_size=2, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dl_mock.assert_called_with(valid_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
dm.test_dataloader()
dl_mock.assert_called_with(test_ds, batch_size=2, shuffle=False, num_workers=0, pin_memory=True)
assert dm.train_dataloader() is None
valid_dss = [ds(), ds()]
test_dss = [ds(), ds()]
dm = LightningDataModule.from_datasets(train_ds, valid_dss, test_dss, batch_size=4, num_workers=0)
with mock.patch("pytorch_lightning.core.datamodule.DataLoader") as dl_mock:
dm.val_dataloader()
dm.test_dataloader()
dl_mock.assert_has_calls(
[
call(valid_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(valid_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[0], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
call(test_dss[1], batch_size=4, shuffle=False, num_workers=0, pin_memory=True),
]
)
class DataModuleWithHparams(LightningDataModule):
def __init__(self, arg0, arg1, kwarg0=None):
super().__init__()
self.save_hyperparameters()
def test_simple_hyperparameters_saving():
data = DataModuleWithHparams(10, "foo", kwarg0="bar")
assert data.hparams == AttributeDict({"arg0": 10, "arg1": "foo", "kwarg0": "bar"})
| [
"torch.zeros",
"torch.device",
"torch.load",
"torch.ones"
] | 1.6 | lsqshr/pytorch-lightning | c6b68883879e38719688865aceac746477f0a9b9 |
1.2 | import torch
from torch.utils.data import DataLoader
from torch import nn
from pytorch_transformers import AdamW, WEIGHTS_NAME, WarmupLinearSchedule
import csv
import numpy as np
import os
import logging
from fp16 import FP16_Module, FP16_Optimizer
from parallel import DataParallelModel, DataParallelCriterion
from collections import OrderedDict
from utils import *
from settings import args, TASK_DICT, init_logging, MODEL_CONFIG, MODEL_CLASS, SPECIAL_TOKENS, CONFIG_CLASS
from settings import TOKENIZER, SPECIAL_TOKEN_IDS, FILL_VAL, SAVE_NAME, FINAL_SAVE_NAME, TOKENS_WEIGHT, CONFIG_NAME
from scheduler import AnnealingLR
from regularizers import REG_TYPES, REG_TYPE_KEYS, Weight_Regularized_AdamW, Weight_Regularized_SGD
from torch.nn import CrossEntropyLoss
logger = logging.getLogger(__name__)
def train(task_ids, model):
tasks = [args.tasks[task_id] for task_id in task_ids]
logger.info("start to train { task: %s, seq train type: %s }" % (tasks, args.seq_train_type))
model_dir = get_model_dir(tasks)
make_dir(model_dir)
train_dataset = [TASK_DICT[t]["train"] for t in tasks]
train_extra_data = []
if "lll" in args.seq_train_type and task_ids[0] > 0 and not args.skip_tasks:
prev_task = args.tasks[task_ids[0]-1]
with torch.no_grad():
create_extra_data(tasks[0], prev_task, model, train_extra_data)
elif "gem" in args.seq_train_type and task_ids[0] > 0:
get_real_data(tasks[0], train_extra_data, accum=False, encode=True)
args.memory_data.append(train_extra_data)
train_extra_data = []
logger.info('extra training data size: {}'.format(len(train_extra_data)))
if not model:
# which_model_to_load = model_dir if os.path.isfile(os.path.join(model_dir, FINAL_SAVE_NAME)) else args.model_name
model = MODEL_CLASS.from_pretrained(args.model_name).cuda()
model.resize_token_embeddings(len(TOKENIZER))
if not args.fp32:
model = FP16_Module(model)
gen_token = get_gen_token(tasks[0])
TOKENIZER.add_tokens([gen_token])
TOKENIZER.save_pretrained(model_dir)
SPECIAL_TOKENS[tasks[0]] = gen_token
SPECIAL_TOKEN_IDS[tasks[0]] = TOKENIZER.convert_tokens_to_ids(gen_token)
logger.info('gen token = {} , gen token id = {}'.format(gen_token, SPECIAL_TOKEN_IDS[tasks[0]]))
MODEL_CONFIG.vocab_size = len(TOKENIZER)
MODEL_CONFIG.to_json_file(os.path.join(model_dir,CONFIG_NAME))
global TOKENS_WEIGHT
if len(TOKENIZER) != TOKENS_WEIGHT.shape[0]:
TOKENS_WEIGHT = torch.cat((TOKENS_WEIGHT, torch.ones([1]).cuda()))
if args.skip_tasks and len(tasks) == 1:
logger.info("*********** skip task: {} ***********".format(tasks[0]))
if tasks[0] in args.skip_tasks:
if len(args.skip_tasks) == 1:
model_dir = get_model_dir(tasks)
model_path = os.path.join(model_dir, FINAL_SAVE_NAME)
config_path = os.path.join(model_dir,CONFIG_NAME)
model_config = CONFIG_CLASS.from_json_file(config_path)
model = MODEL_CLASS(model_config).cuda()
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
if not args.fp32:
model = FP16_Module(model)
if args.seq_train_type in REG_TYPE_KEYS:
logger.info("calulating reg_params ...")
train_qadata = QADataset(train_dataset, "train", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)
max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)
train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
parallel_model = DataParallelModel(WrapModel(model), args.device_ids)
regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [train_dataloader], tasks[0])
regularizer.task_start_do()
regularizer.task_end_do()
torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))
logger.info("done reg_params!")
args.skip_tasks.remove(tasks[0])
return model
model.resize_token_embeddings(len(TOKENIZER))
if not args.fp32: # again because resize_token_embeddings makes embedding layer fp32
model = FP16_Module(model)
parallel_model = DataParallelModel(WrapModel(model), args.device_ids)
train_qadata = QADataset(train_dataset, "train", SPECIAL_TOKEN_IDS[tasks[0]], train_extra_data)
max_train_batch_size = max(len(train_qadata) // args.min_n_steps, args.min_batch_size)
train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
if not args.unbound and args.seq_train_type != "multitask":
#n_train_epochs = TASK_DICT[tasks[0]]["n_train_epochs"]
n_train_epochs = args.n_train_epochs[tasks[0]]
else:
n_train_epochs = args.n_train_epochs['_'.join(tasks)]
n_train_optimization_steps = len(train_qadata) * n_train_epochs
logger.info('len of train dataset: {} , max train batch size {} , num of opt steps: {}'.format(
len(train_qadata), max_train_batch_size, n_train_optimization_steps))
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if "gem" in args.seq_train_type:
model.task_id = task_ids[0]
if not hasattr(model, "grad_dims"):
model.grad_dims = []
for param in model.parameters():
model.grad_dims.append(param.data.numel())
if not hasattr(model, "grads"):
model.grads = torch.zeros(sum(model.grad_dims),len(args.tasks))
model.grads = model.grads.cuda()
if args.seq_train_type in REG_TYPE_KEYS:
optimizer = Weight_Regularized_AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
if not args.fp32:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=None, dynamic_loss_scale=True,
dynamic_loss_args={'scale_window': 100, 'min_scale': 1, 'delayed_shift': 2})
scheduler = AnnealingLR(optimizer, start_lr=args.learning_rate, warmup_iter=int(args.n_warmup_ratio*len(train_qadata)),
num_iters=int(n_train_optimization_steps), decay_style=args.decay_style)
train_loss_fct = DataParallelCriterion(CrossEntropyLoss(ignore_index=FILL_VAL, weight=TOKENS_WEIGHT), args.device_ids)
if args.seq_train_type in REG_TYPE_KEYS:
copy_train_dataloader = create_dataloader(train_qadata, "train", max_train_batch_size)
prev_task = args.tasks[task_ids[0]-1]
regularizer = REG_TYPES[args.seq_train_type](model, parallel_model, [copy_train_dataloader], tasks[0], prev_task)
regularizer.task_start_do()
tot_n_steps = 0
train_once = TrainStep(model, optimizer, scheduler)
if "gem" in args.seq_train_type and task_ids[0] != 0:
gem_step = GEMStep(model, parallel_model, train_loss_fct, optimizer)
model.train()
for ep in range(n_train_epochs):
cum_loss, cum_qa_loss, cum_lm_loss, cur_n_inputs = 0, 0, 0, 0
for n_steps, (_, _, cqa, _, Y, gen_X, gen_Y) in enumerate(train_dataloader):
n_inputs = sum(_cqa.shape[0] for _cqa in cqa)
for i in range(len(cqa)):
cqa[i] = (cqa[i].to(args.device_ids[i]),)
Y[i] = Y[i].to(args.device_ids[i])
gen_X[i] = (gen_X[i].to(args.device_ids[i]),)
gen_Y[i] = gen_Y[i].to(args.device_ids[i])
losses = get_losses(parallel_model, cqa, Y, gen_X, gen_Y, train_loss_fct)
loss = sum(losses)
if "gem" in args.seq_train_type and task_ids[0] != 0:
gem_step(task_ids[0])
train_once(loss, n_inputs)
qa_loss = losses[0].item() * n_inputs
lm_loss = losses[1].item() * n_inputs
cum_loss += (qa_loss + lm_loss)
cum_qa_loss += qa_loss
cum_lm_loss += lm_loss
cur_n_inputs += n_inputs
if (n_steps + 1 ) % args.logging_steps == 0:
logger.info('progress {:.3f} , lr {:.1E} , loss {:.3f} , qa loss {:.3f} , lm loss {:.3f} , avg batch size {:.1f}'.format(
ep + cur_n_inputs/len(train_qadata), scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs,
cur_n_inputs/(n_steps + 1)
))
torch.save(model.state_dict(), os.path.join(model_dir, SAVE_NAME+str(ep+1)))
tot_n_steps += (n_steps + 1)
logger.info('epoch {}/{} done , tot steps {} , lr {:.1E} , loss {:.2f} , qa loss {:.2f} , lm loss {:.2f} , avg batch size {:.1f}'.format(
ep+1, n_train_epochs, tot_n_steps, scheduler.get_lr(), cum_loss/cur_n_inputs, cum_qa_loss/cur_n_inputs, cum_lm_loss/cur_n_inputs, cur_n_inputs/(n_steps+1)
))
# task end do for reg
if args.seq_train_type in REG_TYPE_KEYS:
regularizer.task_end_do()
torch.save(model.state_dict(), os.path.join(model_dir, FINAL_SAVE_NAME))
return model
if __name__ == '__main__':
if not args.debug:
logging.getLogger("pytorch_transformers").setLevel(logging.WARNING)
logging.getLogger("pytorch_transformers.tokenization_utils").setLevel(logging.CRITICAL)
make_dir(args.model_dir_root)
init_logging(os.path.join(args.model_dir_root, 'log_train.txt'))
logger.info('args = {}'.format(str(args)))
model = None
if args.seq_train_type == "multitask":
model = train(list(range(len(args.tasks))), model)
else:
if args.unbound:
TASK_DICT = lll_unbound_setting(split_size=args.unbound)
for task_id in range(len(args.tasks)):
model = train([task_id], model)
| [
"torch.no_grad",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.ones"
] | 1.2.0 | jojotenya/LAMOL | 03c31d9f0c7bf71295bc2d362ddf40a7656956e1 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager, suppress
from copy import copy, deepcopy
import numpy as np
import torch
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.plugins import ParallelPlugin
from pytorch_lightning.trainer.states import RunningStage, TrainerState
from pytorch_lightning.trainer.supporters import Accumulator, TensorRunningAccum
from pytorch_lightning.utilities import _TPU_AVAILABLE, AMPType, DeviceType, parsing
from pytorch_lightning.utilities.distributed import rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.memory import recursive_detach
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.warnings import WarningCache
class TrainLoop:
def __init__(self, trainer, multiple_trainloader_mode):
self.trainer = trainer
self.early_stopping_accumulator = None
self.checkpoint_accumulator = None
self.accumulated_loss = None
self.warning_cache = WarningCache()
self._teardown_already_run = False
self.running_loss = TensorRunningAccum(window_length=20)
self.automatic_optimization = True
self._curr_step_result = None
self._cur_grad_norm_dict = None
self._multiple_trainloader_mode = multiple_trainloader_mode
self._skip_backward = False
self.trainer._multiple_trainloader_mode = multiple_trainloader_mode
def on_trainer_init(
self,
max_epochs,
min_epochs,
max_steps,
min_steps,
num_sanity_val_steps,
automatic_optimization,
weights_summary,
):
self.trainer.global_step = 0
self.trainer.current_epoch = 0
self.trainer.interrupted = False
self.trainer.should_stop = False
self.trainer._state = TrainerState.INITIALIZING
self.trainer.total_batch_idx = 0
self.trainer.batch_idx = 0
self.trainer.num_training_batches = 0
self.trainer.train_dataloader = None
self.automatic_optimization = automatic_optimization
# If neither max_epochs or max_steps is set, then use existing default of max_epochs = 1000
self.trainer.max_epochs = 1000 if (max_epochs is None and max_steps is None) else max_epochs
# If neither min_epochs or min_steps is set, then use existing default of min_epochs = 1
self.trainer.min_epochs = 1 if (min_epochs is None and min_steps is None) else min_epochs
self.trainer.max_steps = max_steps
self.trainer.min_steps = min_steps
if num_sanity_val_steps == -1:
self.trainer.num_sanity_val_steps = float("inf")
else:
self.trainer.num_sanity_val_steps = num_sanity_val_steps
self.trainer.weights_summary = weights_summary
if weights_summary is not None and weights_summary not in ModelSummary.MODES:
raise MisconfigurationException(
f"`weights_summary` can be None, {', '.join(ModelSummary.MODES)}, got {weights_summary}"
)
@property
def num_optimizers(self):
num_optimizers = len(self.get_optimizers_iterable())
return num_optimizers
def should_skip_training(self):
should_by_max_steps = self.trainer.max_steps is not None and self.trainer.global_step >= self.trainer.max_steps
should_by_epoch = self.trainer.max_epochs is not None and self.trainer.current_epoch >= self.trainer.max_epochs
return should_by_max_steps or should_by_epoch or self.trainer.num_training_batches == 0
def on_train_start(self):
# hook
self.trainer.call_hook("on_train_start")
# provide rank to profiler
self.trainer.profile_connector.on_train_start(self.trainer)
def setup_fit(self, model, train_dataloader, val_dataloaders, datamodule):
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# links data to the trainer
self.trainer.data_connector.attach_data(model, train_dataloader, val_dataloaders, datamodule)
# check that model is configured correctly
self.trainer.config_validator.verify_loop_configurations(model)
# attach model log function to callback
self.trainer.callback_connector.attach_model_logging_functions(model)
def on_train_end(self):
if self._teardown_already_run:
return
self._teardown_already_run = True
# trigger checkpoint check. need to temporarily decrease the global step to avoid saving duplicates
# when a checkpoint was saved at the last step
self.trainer.global_step -= 1
self.check_checkpoint_callback(should_update=True, is_last=True)
self.trainer.global_step += 1
# hook
self.trainer.call_hook("on_train_end")
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu
# kill loggers
if self.trainer.logger is not None and self.trainer.training_type_plugin.should_finalize:
self.trainer.logger.finalize("success")
# summarize profile results
if self.trainer.global_rank == 0:
self.trainer.profiler.describe()
# give accelerators a chance to finish
self.trainer.accelerator_backend.on_train_end()
# clear mem
if self.trainer._device_type == DeviceType.GPU:
model = self.trainer.get_model()
model.cpu()
torch.cuda.empty_cache()
def check_checkpoint_callback(self, should_update, is_last=False):
# TODO bake this logic into the ModelCheckpoint callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = self.trainer.checkpoint_callbacks
if is_last and any(cb.save_last for cb in callbacks):
rank_zero_info("Saving latest checkpoint...")
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def check_early_stopping_callback(self, should_update):
# TODO bake this logic into the EarlyStopping callback
if should_update and self.trainer.checkpoint_connector.has_trained:
callbacks = [c for c in self.trainer.callbacks if isinstance(c, EarlyStopping)]
model = self.trainer.get_model()
for cb in callbacks:
cb.on_validation_end(self.trainer, model)
def on_train_epoch_start(self, epoch):
# update training progress in trainer
self.trainer.current_epoch = epoch
model = self.trainer.get_model()
# reset train dataloader
if epoch != 0 and self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
# todo: specify the possible exception
with suppress(Exception):
# set seed for distributed sampler (enables shuffling for each epoch)
self.trainer.train_dataloader.sampler.set_epoch(epoch)
# changing gradient according accumulation_scheduler
self.trainer.accumulation_scheduler.on_epoch_start(self.trainer, self.trainer.get_model())
# stores accumulated grad fractions per batch
self.accumulated_loss = TensorRunningAccum(window_length=self.trainer.accumulate_grad_batches)
# structured result accumulators for callbacks
self.early_stopping_accumulator = Accumulator()
self.checkpoint_accumulator = Accumulator()
# hook
self.trainer.call_hook("on_epoch_start")
self.trainer.call_hook("on_train_epoch_start")
def on_train_batch_end(self, epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx):
# hook
self.trainer.call_hook('on_train_batch_end', batch_end_outputs, batch, batch_idx, dataloader_idx)
self.trainer.call_hook('on_batch_end')
# figure out what to track for epoch end
self.track_epoch_end_reduce_metrics(epoch_output, batch_end_outputs)
# reset batch logger internals
self.trainer.logger_connector.on_train_batch_end()
def reset_train_val_dataloaders(self, model):
if self.trainer.train_dataloader is None or not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_train_dataloader(model)
if self.trainer.val_dataloaders is None and not self.trainer.reload_dataloaders_every_epoch:
self.trainer.reset_val_dataloader(model)
def track_epoch_end_reduce_metrics(self, epoch_output, batch_end_outputs):
# track the outputs to reduce at the end of the epoch
for opt_idx, opt_outputs in enumerate(batch_end_outputs):
sample_output = opt_outputs[-1]
# decide if we need to reduce at the end of the epoch automatically
auto_reduce_tng_result = isinstance(sample_output, Result) and sample_output.should_reduce_on_epoch_end
hook_overridden = (
is_overridden("training_epoch_end", model=self.trainer.get_model())
or is_overridden("on_train_epoch_end", model=self.trainer.get_model())
)
# only track when a) it needs to be autoreduced OR b) the user wants to manually reduce on epoch end
if not (hook_overridden or auto_reduce_tng_result):
continue
# with 1 step (no tbptt) don't use a sequence at epoch end
if isinstance(opt_outputs, list) and len(opt_outputs) == 1 and not isinstance(opt_outputs[0], Result):
opt_outputs = opt_outputs[0]
epoch_output[opt_idx].append(opt_outputs)
def get_optimizers_iterable(self):
"""
Generates an iterable with (idx, optimizer) for each optimizer.
"""
if not self.trainer.optimizer_frequencies:
# call training_step once per optimizer
return list(enumerate(self.trainer.optimizers))
optimizer_freq_cumsum = np.cumsum(self.trainer.optimizer_frequencies)
optimizers_loop_length = optimizer_freq_cumsum[-1]
current_place_in_loop = self.trainer.total_batch_idx % optimizers_loop_length
# find optimzier index by looking for the first {item > current_place} in the cumsum list
opt_idx = np.argmax(optimizer_freq_cumsum > current_place_in_loop)
return [[opt_idx, self.trainer.optimizers[opt_idx]]]
def on_after_backward(self, training_step_output, batch_idx, untouched_loss):
is_result_obj = isinstance(training_step_output, Result)
if is_result_obj:
training_step_output.detach()
else:
training_step_output.batch_loss = training_step_output.batch_loss.detach()
# insert after step hook
self.trainer.call_hook("on_after_backward")
# when in dev debugging track the losses
self.trainer.dev_debugger.track_train_loss_history(batch_idx, untouched_loss.detach())
def _check_training_step_output(self, training_step_output):
if isinstance(training_step_output, torch.Tensor) and not self.automatic_optimization:
if training_step_output.grad_fn is None:
# TODO: Find why - RuntimeError: Expected to mark a variable ready only once ...
raise MisconfigurationException("In manual optimization, `training_step` should not return a Tensor")
def training_step(self, split_batch, batch_idx, opt_idx, hiddens):
# give the PL module a result for logging
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("model_forward"):
args = self.build_train_args(split_batch, batch_idx, opt_idx, hiddens)
# manually capture logged metrics
model_ref._current_fx_name = 'training_step'
model_ref._results = Result()
with self.trainer.profiler.profile("training_step"):
training_step_output = self.trainer.accelerator_backend.training_step(args)
self.trainer.accelerator_backend.post_training_step()
self.trainer.logger_connector.cache_logged_metrics()
self._check_training_step_output(training_step_output)
training_step_output = self.trainer.call_hook("training_step_end", training_step_output)
training_step_output_for_epoch_end, training_step_output = self._process_training_step_output(
training_step_output, split_batch
)
is_result_obj = isinstance(training_step_output, Result)
if training_step_output_for_epoch_end is None:
return None
# enable empty loss when using manual opt
closure_loss = None
untouched_loss = None
if self.trainer.train_loop.automatic_optimization:
# accumulate loss
# (if accumulate_grad_batches = 1 no effect)
if is_result_obj:
closure_loss = training_step_output.minimize
else:
closure_loss = training_step_output.batch_loss
closure_loss = closure_loss / self.trainer.accumulate_grad_batches
# the loss will get scaled for amp. avoid any modifications to it
untouched_loss = closure_loss.detach().clone()
# result
result = AttributeDict(
closure_loss=closure_loss,
loss=untouched_loss,
training_step_output=training_step_output,
training_step_output_for_epoch_end=training_step_output_for_epoch_end,
hiddens=training_step_output.hiddens,
)
return result
def _process_training_step_output(self, training_step_output, split_batch):
training_step_output_for_epoch_end = training_step_output
# enable validation_step return None
if training_step_output_for_epoch_end is None:
return None, None
# -----------------------------------------
# process result return (DEPRECATE in 1.0)
# -----------------------------------------
if isinstance(training_step_output, Result):
training_step_output_for_epoch_end = self._process_result(training_step_output, split_batch)
return training_step_output_for_epoch_end, training_step_output
# -----------------------------------------
# process hybrid (1.0)
# -----------------------------------------
# no need for these checks in 1.0.0
# TODO: remove checks in 1.0.0
is_tensor = isinstance(training_step_output_for_epoch_end, torch.Tensor)
is_1_0_output = is_tensor or ("log" not in training_step_output and "progress_bar" not in training_step_output)
if is_1_0_output:
return self._process_training_step_output_1_0(training_step_output, split_batch)
# -----------------------------------------
# process old dict (deprecate 1.0)
# -----------------------------------------
training_step_output = self.trainer.process_dict_result(training_step_output, train=True)
training_step_output = AttributeDict(
batch_loss=training_step_output[0],
pbar_on_batch_end=training_step_output[1],
log_metrics=training_step_output[2],
callback_metrics=training_step_output[3],
hiddens=training_step_output[4],
)
# if the user decides to finally reduce things in epoch_end, save raw output without graphs
if isinstance(training_step_output_for_epoch_end, torch.Tensor):
training_step_output_for_epoch_end = training_step_output_for_epoch_end.detach()
else:
training_step_output_for_epoch_end = recursive_detach(training_step_output_for_epoch_end)
return training_step_output_for_epoch_end, training_step_output
def _process_training_step_output_1_0(self, training_step_output, split_batch):
result = self.trainer.get_model()._results
loss = None
hiddens = None
# handle dict return
if isinstance(training_step_output, dict):
loss = training_step_output.pop("loss", None)
hiddens = training_step_output.pop("hiddens", None)
result["extra"] = training_step_output
# handle scalar return
elif isinstance(training_step_output, torch.Tensor):
loss = training_step_output
result["extra"] = {}
# map to results under the hood
result.minimize = loss
result.hiddens = hiddens
# track batch for manual reduction with result
result.track_batch_size(len(split_batch))
# track metrics without grads for epoch reduction
training_step_output_for_epoch_end = copy(result)
training_step_output_for_epoch_end.detach()
if self.trainer.move_metrics_to_cpu:
training_step_output_for_epoch_end.cpu()
# what flows back into the system
training_step_output = result
return training_step_output_for_epoch_end, training_step_output
def _process_result(self, training_step_output, split_batch):
training_step_output.track_batch_size(len(split_batch))
m = """
TrainResult and EvalResult were deprecated in 0.9.1 and support will drop in 1.0.0.
Use self.log and .write from the LightningModule to log metrics and write predictions.
training_step can now only return a scalar (for the loss) or a dictionary with anything you want.
Option 1:
return loss
Option 2:
return {'loss': loss, 'anything_else': ...}
Option 3:
return {'loss': loss, 'hiddens': hiddens, 'anything_else': ...}
"""
rank_zero_warn(m)
training_step_output_for_epoch_end = copy(training_step_output)
training_step_output_for_epoch_end.detach()
return training_step_output_for_epoch_end
def optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure):
model_ref = self.trainer.get_model()
is_lbfgs = isinstance(optimizer, torch.optim.LBFGS)
using_native_amp = self.trainer.amp_backend == AMPType.NATIVE
# native amp + lbfgs is a no go right now
if using_native_amp and is_lbfgs:
raise MisconfigurationException(
'native PyTorch amp and lbfgs are not compatible.'
' To request, please file a Github issue in PyTorch and tag @mcarilli'
)
# wraps into LightningOptimizer only for running step
optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, opt_idx)
# model hook
model_ref.optimizer_step(
self.trainer.current_epoch,
batch_idx,
optimizer,
opt_idx,
train_step_and_backward_closure,
on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
using_native_amp=using_native_amp,
using_lbfgs=is_lbfgs,
)
def on_before_zero_grad(self, optimizer):
self.trainer.call_hook('on_before_zero_grad', optimizer)
def optimizer_zero_grad(self, batch_idx, optimizer, opt_idx):
self.trainer.accelerator_backend.optimizer_zero_grad(self.trainer.current_epoch, batch_idx, optimizer, opt_idx)
def track_and_norm_grad(self, optimizer):
# track gradient norms
grad_norm_dic = self._track_gradient_norm()
# clip gradients
self.trainer.accelerator_backend.clip_gradients(optimizer, self.trainer.gradient_clip_val)
self._cur_grad_norm_dict = grad_norm_dic
def _track_gradient_norm(self):
grad_norm_dict = {}
if (self.trainer.global_step + 1) % self.trainer.log_every_n_steps == 0:
if float(self.trainer.track_grad_norm) > 0:
model = self.trainer.get_model()
grad_norm_dict = model.grad_norm(self.trainer.track_grad_norm)
return grad_norm_dict
def process_hiddens(self, opt_closure_result):
hiddens = opt_closure_result.hiddens
if isinstance(opt_closure_result.training_step_output, Result):
opt_closure_result.training_step_output_for_epoch_end.drop_hiddens()
return hiddens
def tbptt_split_batch(self, batch):
splits = [batch]
if self.trainer.truncated_bptt_steps is not None:
model_ref = self.trainer.get_model()
with self.trainer.profiler.profile("tbptt_split_batch"):
splits = model_ref.tbptt_split_batch(batch, self.trainer.truncated_bptt_steps)
return splits
def run_training_epoch(self):
# modify dataloader if needed (ddp, etc...)
train_dataloader = self.trainer.accelerator_backend.process_dataloader(self.trainer.train_dataloader)
# track epoch output
epoch_output = [[] for _ in range(self.num_optimizers)]
train_dataloader = self.trainer.data_connector.get_profiled_train_dataloader(train_dataloader)
dataloader_idx = 0
should_check_val = False
for batch_idx, (batch, is_last_batch) in train_dataloader:
self.trainer.batch_idx = batch_idx
# ------------------------------------
# TRAINING_STEP + TRAINING_STEP_END
# ------------------------------------
with self.trainer.profiler.profile("run_training_batch"):
batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
# when returning -1 from train_step, we end epoch early
if batch_output.signal == -1:
break
batch_end_outputs = self.process_train_step_outputs(
batch_output.training_step_output_for_epoch_end,
self.early_stopping_accumulator,
self.checkpoint_accumulator,
)
# hook
# TODO: add outputs to batches
self.on_train_batch_end(epoch_output, batch_end_outputs, batch, batch_idx, dataloader_idx)
# -----------------------------------------
# SAVE METRICS TO LOGGERS
# -----------------------------------------
self.trainer.logger_connector.log_train_step_metrics(batch_output)
# -----------------------------------------
# VALIDATE IF NEEDED + CHECKPOINT CALLBACK
# -----------------------------------------
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch)
if should_check_val:
self.trainer.run_evaluation()
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
# -----------------------------------------
# SAVE LOGGERS (ie: Tensorboard, etc...)
# -----------------------------------------
self.save_loggers_on_train_batch_end()
# update LR schedulers
monitor_metrics = deepcopy(self.trainer.logger_connector.callback_metrics)
self.update_train_loop_lr_schedulers(monitor_metrics=monitor_metrics)
self.trainer.checkpoint_connector.has_trained = True
# max steps reached, end training
if (
self.trainer.max_steps is not None and self.trainer.max_steps == self.trainer.global_step + 1
and self._accumulated_batches_reached()
):
break
# end epoch early
# stop when the flag is changed or we've gone past the amount
# requested in the batches
if self.trainer.should_stop:
break
self.trainer.total_batch_idx += 1
# stop epoch if we limited the number of training batches
if self._num_training_batches_reached(is_last_batch):
break
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
# epoch end hook
self.run_on_epoch_end_hook(epoch_output)
# log epoch metrics
self.trainer.logger_connector.log_train_epoch_end_metrics(
epoch_output, self.checkpoint_accumulator, self.early_stopping_accumulator, self.num_optimizers
)
should_check_val = self.should_check_val_fx(batch_idx, is_last_batch, on_epoch=True)
if should_check_val:
self.trainer.run_evaluation(on_epoch=True)
# reset stage to train
self.trainer._set_wide_running_stage(RunningStage.TRAINING)
should_skip_eval = self.trainer.evaluation_loop.should_skip_evaluation(self.trainer.num_val_batches)
should_train_only = self.trainer.disable_validation or should_skip_eval
if should_train_only:
# update epoch level lr_schedulers
self.trainer.optimizer_connector.update_learning_rates(interval='epoch')
self.check_checkpoint_callback(True)
self.check_early_stopping_callback(True)
# increment the global step once
# progress global step according to grads progress
self.increment_accumulated_grad_global_step()
def run_training_batch(self, batch, batch_idx, dataloader_idx):
# track grad norms
grad_norm_dic = {}
# bookkeeping
self.trainer.hiddens = None
# track all outputs across time and num of optimizers
batch_outputs = [[] for _ in range(len(self.get_optimizers_iterable()))]
if batch is None:
return AttributeDict(signal=0, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_batch_start")
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# hook
response = self.trainer.call_hook("on_train_batch_start", batch, batch_idx, dataloader_idx)
if response == -1:
return AttributeDict(signal=-1, grad_norm_dic=grad_norm_dic)
# lightning module hook
splits = self.tbptt_split_batch(batch)
for split_idx, split_batch in enumerate(splits):
# create an iterable for optimizers and loop over them
for opt_idx, optimizer in self.prepare_optimizers():
# toggle model params + set info to logger_connector
self.run_train_split_start(split_idx, split_batch, opt_idx, optimizer)
if self.should_accumulate():
# For gradient accumulation
# -------------------
# calculate loss (train step + train step end)
# -------------------
# automatic_optimization=True: perform dpp sync only when performing optimizer_step
# automatic_optimization=False: don't block synchronization here
with self.block_ddp_sync_behaviour():
self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# ------------------------------
# BACKWARD PASS
# ------------------------------
# gradient update with accumulated gradients
else:
if self.automatic_optimization:
def train_step_and_backward_closure():
result = self.training_step_and_backward(
split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
)
return None if result is None else result.loss
# optimizer step
self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
else:
self._curr_step_result = self.training_step(
split_batch, batch_idx, opt_idx, self.trainer.hiddens
)
if self._curr_step_result is None:
# user decided to skip optimization
# make sure to zero grad.
continue
batch_outputs = self._process_closure_result(
batch_outputs=batch_outputs,
opt_idx=opt_idx,
)
# todo: Properly aggregate grad_norm accros opt_idx and split_idx
grad_norm_dic = self._cur_grad_norm_dict
self._cur_grad_norm_dict = None
# update running loss + reset accumulated loss
self.update_running_loss()
result = AttributeDict(
signal=0,
grad_norm_dic=grad_norm_dic,
training_step_output_for_epoch_end=batch_outputs,
)
return result
@contextmanager
def block_ddp_sync_behaviour(self, should_block_sync: bool = False):
"""
automatic_optimization = True
Blocks ddp sync gradients behaviour on backwards pass.
This is useful for skipping sync when accumulating gradients, reducing communication overhead
automatic_optimization = False
do not block ddp gradient sync when using manual optimization
as gradients are needed within the training step
Returns:
context manager with sync behaviour off
"""
if (
isinstance(self.trainer.training_type_plugin, ParallelPlugin)
and (self.automatic_optimization or should_block_sync)
):
with self.trainer.training_type_plugin.block_backward_sync():
yield None
else:
yield None
def _process_closure_result(self, batch_outputs: list, opt_idx: int) -> list:
opt_closure_result = self._curr_step_result
if opt_closure_result is not None:
# cache metrics
self.trainer.logger_connector.cache_training_step_metrics(opt_closure_result)
# track hiddens
self.trainer.hiddens = self.process_hiddens(opt_closure_result)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(opt_closure_result.loss)
# track all the outputs across all steps
batch_opt_idx = opt_idx if len(batch_outputs) > 1 else 0
batch_outputs[batch_opt_idx].append(opt_closure_result.training_step_output_for_epoch_end)
if self.automatic_optimization:
# track total loss for logging (avoid mem leaks)
self.accumulated_loss.append(opt_closure_result.loss)
self._curr_step_result = None
return batch_outputs
def training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens):
"""
wrap the forward step in a closure so second order methods work
"""
with self.trainer.profiler.profile("training_step_and_backward"):
# lightning module hook
result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
self._curr_step_result = result
if result is None:
if self.automatic_optimization:
self.warning_cache.warn("training_step returned None if it was on purpose, ignore this warning...")
return None
if not self._skip_backward and self.trainer.train_loop.automatic_optimization:
# backward pass
with self.trainer.profiler.profile("model_backward"):
self.backward(result, optimizer, opt_idx)
# hook - call this hook only
# when gradients have finished to accumulate
if not self.should_accumulate():
self.on_after_backward(result.training_step_output, batch_idx, result.loss)
# check if loss or model weights are nan
if self.trainer.terminate_on_nan:
self.trainer.detect_nan_tensors(result.loss)
if len(self.trainer.optimizers) > 1:
# revert back to previous state
self.trainer.get_model().untoggle_optimizer(opt_idx)
return result
def backward(self, result, optimizer, opt_idx, *args, **kwargs):
self.trainer.dev_debugger.track_event("backward_call")
should_accumulate = self.should_accumulate()
# backward can be called manually in the training loop
if isinstance(result, torch.Tensor):
self.trainer.accelerator_backend.backward(result, optimizer, opt_idx, should_accumulate, *args, **kwargs)
else:
result.closure_loss = self.trainer.accelerator_backend.backward(
result.closure_loss, optimizer, opt_idx, should_accumulate, *args, **kwargs
)
if not self.should_accumulate():
# track gradients
self.track_and_norm_grad(optimizer=optimizer)
def update_train_loop_lr_schedulers(self, monitor_metrics=None):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
if num_accumulated_batches_reached or num_training_batches_reached:
# update lr
self.trainer.optimizer_connector.update_learning_rates(interval="step", monitor_metrics=monitor_metrics)
def run_on_epoch_end_hook(self, epoch_output):
# inform logger the batch loop has finished
self.trainer.logger_connector.on_train_epoch_end()
self.trainer.call_hook('on_train_epoch_end', epoch_output)
self.trainer.call_hook('on_epoch_end')
def increment_accumulated_grad_global_step(self):
num_accumulated_batches_reached = self._accumulated_batches_reached()
num_training_batches_reached = self._num_training_batches_reached()
# progress global step according to grads progress
if num_accumulated_batches_reached or num_training_batches_reached:
self.trainer.global_step += 1
def _accumulated_batches_reached(self):
return (self.trainer.batch_idx + 1) % self.trainer.accumulate_grad_batches == 0
def _num_training_batches_reached(self, is_last_batch=False):
return (self.trainer.batch_idx + 1) == self.trainer.num_training_batches or is_last_batch
def should_accumulate(self):
# checks if backward or backward + optimizer step (via closure)
accumulation_done = self._accumulated_batches_reached()
is_final_batch = self._num_training_batches_reached()
return not (accumulation_done or is_final_batch)
def should_check_val_fx(self, batch_idx, is_last_batch, on_epoch=False):
# decide if we should run validation
is_val_check_batch = (batch_idx + 1) % self.trainer.val_check_batch == 0
is_val_check_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
can_check_val = self.trainer.enable_validation and is_val_check_epoch
is_last_batch_for_infinite_dataset = is_last_batch and self.trainer.val_check_batch == float("inf")
epoch_end_val_check = self.trainer.val_check_batch == self.trainer.num_training_batches
should_check_val = ((is_val_check_batch and epoch_end_val_check) or self.trainer.should_stop
or is_last_batch_for_infinite_dataset
) if on_epoch else (is_val_check_batch and not epoch_end_val_check)
return should_check_val and can_check_val
def build_train_args(self, batch, batch_idx, opt_idx, hiddens):
# enable not needing to add opt_idx to training_step
args = [batch, batch_idx]
if len(self.trainer.optimizers) > 1:
if self.trainer.has_arg("training_step", "optimizer_idx"):
args.append(opt_idx)
else:
num_opts = len(self.trainer.optimizers)
raise ValueError(
f"Your LightningModule defines {num_opts} optimizers but "
f'training_step is missing the "optimizer_idx" argument.'
)
# pass hiddens if using tbptt
if self.trainer.truncated_bptt_steps is not None:
args.append(hiddens)
return args
def save_loggers_on_train_batch_end(self):
# when loggers should save to disk
should_flush_logs = self.trainer.logger_connector.should_flush_logs
if should_flush_logs and self.trainer.is_global_zero and self.trainer.logger is not None:
self.trainer.logger.save()
def process_train_step_outputs(self, all_train_step_outputs, early_stopping_accumulator, checkpoint_accumulator):
"""
Figure out what needs to be tracked/logged at the end of the epoch
"""
# the training step outputs a list per optimizer. The list contains the outputs at each time step
# when no TBPTT is used, then the list has 1 item per batch
# when TBPTT IS used, then the list has n items (1 per time step)
batch_end_outputs = []
for optimizer_idx_outputs in all_train_step_outputs:
# extract one representative sample from each time step (1 if no tbptt) and 0th optimizer
if len(optimizer_idx_outputs) == 0:
continue
sample_output = optimizer_idx_outputs[-1]
# pull out callback info if available (ie: Results object)
if isinstance(sample_output, dict) and "early_stop_on" in sample_output:
early_stopping_accumulator.accumulate(sample_output["early_stop_on"])
if isinstance(sample_output, dict) and "checkpoint_on" in sample_output:
checkpoint_accumulator.accumulate(sample_output["checkpoint_on"])
batch_end_outputs.append(optimizer_idx_outputs)
return batch_end_outputs
def prepare_optimizers(self):
# in manual optimization we loop over all optimizers at once
optimizers = self.get_optimizers_iterable()
if not self.automatic_optimization:
optimizers = [optimizers[0]]
return optimizers
def run_train_split_start(self, split_idx, split_batch, opt_idx, optimizer):
# set split_idx to trainer for tracking
self.trainer.split_idx = split_idx
# make sure only the gradients of the current optimizer's parameters are calculated
# in the training step to prevent dangling gradients in multiple-optimizer setup.
if self.automatic_optimization and len(self.trainer.optimizers) > 1:
model = self.trainer.get_model()
model.toggle_optimizer(optimizer, opt_idx)
# use to track metrics internally
self.trainer.logger_connector.on_train_split_start(split_idx, opt_idx, split_batch)
def update_running_loss(self):
accumulated_loss = self.accumulated_loss.mean()
if accumulated_loss is not None:
# calculate running loss for display
self.running_loss.append(self.accumulated_loss.mean() * self.trainer.accumulate_grad_batches)
# reset for next set of accumulated grads
self.accumulated_loss.reset()
| [
"torch.cuda.empty_cache"
] | 1.4 | MasaYan24/pytorch-lightning | 046ac714f6955ed14b831657ea1b7b16bc28ac93 |
0.4 | """
Assorted utilities for working with neural networks in AllenNLP.
"""
# pylint: disable=too-many-lines
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar
import logging
import math
import warnings
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
T = TypeVar('T')
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, cuda_device) for item in obj])
else:
return obj
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
Parameters
----------
tensor_dicts : ``List[Dict[str, torch.Tensor]]``
The list of tensor dictionaries to batch.
remove_trailing_dimension : ``bool``
If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
"""
Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,
encoding_dim)``, this method returns the final hidden state for each element of the batch,
giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as
``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the
mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch
instance.
Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the
``encoder_outputs`` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with
``encoder_outputs[:, 0, encoding_dim/2:]``.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1).long() - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Tensor, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = tensor_for_masking.new_tensor(torch.rand(tensor_for_masking.size()) > dropout_probability)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
min_val: float = -1e7) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate max, assume unmasked parts are already zeros
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate max
keepdim : ``bool``
Whether to keep dimension
min_val : ``float``
The minimal value for paddings
Returns
-------
A ``torch.Tensor`` of including the maximum values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, min_val)
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
eps: float = 1e-8) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate mean.
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate mean
keepdim : ``bool``
Whether to keep dimension
eps : ``float``
A small value to avoid zero division problem.
Returns
-------
A ``torch.Tensor`` of including the mean values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)
return value_sum / value_count.clamp(min=eps)
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
----------
tag_sequence : torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : Optional[List[int]], optional, (default = None)
A list of length ``sequence_length`` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labellings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
Returns
-------
viterbi_path : List[int]
The tag indices of the maximum likelihood tag sequence.
viterbi_score : torch.Tensor
The score of the viterbi path.
"""
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
"""
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
"""
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def last_dim_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Takes a tensor with 3 or more dimensions and does a masked softmax over the last dimension. We
assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)
has shape ``(batch_size, sequence_length)``.
.. deprecated:: 0.6.1
``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` in version
0.6.1. It will be removed in version 0.8.
"""
warnings.warn("``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` "
"in version 0.6.1. It will be removed in version 0.8.", DeprecationWarning)
return masked_softmax(tensor, mask, dim=-1)
def last_dim_log_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Takes a tensor with 3 or more dimensions and does a masked log softmax over the last dimension.
We assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)
has shape ``(batch_size, sequence_length)``.
.. deprecated:: 0.6.1
``last_dim_log_softmax`` was deprecated in favor of just using ``masked_log_softmax`` in
version 0.6.1. It will be removed in version 0.8.
"""
warnings.warn("``last_dim_log_softmax`` was deprecated in favor of just using "
"``masked_log_softmax`` in version 0.6.1. It will be removed in version 0.8.",
DeprecationWarning)
return masked_log_softmax(tensor, mask, dim=-1)
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
batch_average: bool = None,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
batch_average : bool, optional, (default = None).
A bool indicating whether the loss should be averaged across the batch,
or returned as a vector of losses per batch element.
.. deprecated:: 0.6.2
``batch_average`` was deprecated and replaced with
the more general ``average`` in version 0.6.2. It will be removed
in version 0.8.
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classifcation
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if batch_average is not None:
# Maintain old behavior
if batch_average:
warnings.warn("batch_average=True was deprecated and replaced "
"with average='batch' in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = "batch"
else:
warnings.warn("batch_average=False was deprecated and replaced "
"with average=None in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = None
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss
def replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:
"""
Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable
to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
``tensor.masked_fill((1 - mask).byte(), replace_with)``.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
return tensor.masked_fill((1 - mask).byte(), replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
``combination`` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like ``"1,2,1+2,3-1"``.
We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,
where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the ``combination`` string. For example, for the input string ``"1,2,1*2"``, the result
would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
Parameters
----------
sequence : ``Sequence[T]``
obj : ``T``
Returns
-------
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with :func:`combine_tensors`. This function computes the resultant dimension when
calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is
necessary for knowing the sizes of weight matrices when building models that use
``combine_tensors``.
Parameters
----------
combination : ``str``
A comma-separated list of combination pieces, like ``"1,2,1*2"``, specified identically to
``combination`` in :func:`combine_tensors`.
tensor_dims : ``List[int]``
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to :func:`combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
Parameters
----------
tensor : torch.FloatTensor, required.
A tensor of arbitrary size.
dim : int, optional (default = -1)
The dimension of the tensor to apply the logsumexp to.
keepdim: bool, optional (default = False)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:
"""
The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence
dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,
embedding_size)``.
This function returns selected values in the target with respect to the provided indices, which
have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally
precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
:class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
Parameters
----------
target : ``torch.Tensor``, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : ``torch.LongTensor``
A tensor of shape (batch_size, ...), where each element is an index into the
``sequence_length`` dimension of the ``target`` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`
on ``indices``. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
Returns
-------
selected_targets : ``torch.Tensor``
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flattened_index_select(target: torch.Tensor,
indices: torch.LongTensor) -> torch.Tensor:
"""
The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``
that each of the set_size rows should select. The `target` has size
``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size
``(batch_size, set_size, subset_size, embedding_size)``.
Parameters
----------
target : ``torch.Tensor``, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : ``torch.LongTensor``, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
Returns
-------
selected : ``torch.Tensor``, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError("Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size()))
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(distances: torch.Tensor,
num_identity_buckets: int = 4,
num_total_buckets: int = 10) -> torch.Tensor:
"""
Places the given values (designed for distances) into ``num_total_buckets``semi-logscale
buckets, with ``num_identity_buckets`` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
Parameters
----------
distances : ``torch.Tensor``, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: int, optional (default = 4).
The number of identity buckets (those only holding a single value).
num_total_buckets : int, (default = 10)
The total number of buckets to bucket values into.
Returns
-------
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(tensor: torch.Tensor,
mask: torch.Tensor,
sentence_begin_token: Any,
sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps)`` or
``(batch_size, timesteps, dim)`` this returns a tensor of shape
``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.
Returns both the new tensor and updated mask.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
sentence_begin_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.
sentence_end_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.
Returns
-------
tensor_with_boundary_tokens : ``torch.Tensor``
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : ``torch.Tensor``
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = (tensor_with_boundary_tokens != 0).long()
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(tensor: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps, dim)``
this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of ``add_sentence_boundary_token_ids``.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
Returns
-------
tensor_without_boundary_tokens : ``torch.Tensor``
The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``
new_mask : ``torch.Tensor``
The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]
new_mask[i, :(j - 2)] = 1
return tensor_without_boundary_tokens, new_mask
def add_positional_features(tensor: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
# pylint: disable=line-too-long
"""
Implements the frequency-based positional encoding described
in `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a
different frequency and phase is added to each dimension of the input ``Tensor``.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
Parameters
----------
tensor : ``torch.Tensor``
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : ``float``, optional (default = 1.0)
The smallest timescale to use.
max_timescale : ``float``, optional (default = 1.0e4)
The largest timescale to use.
Returns
-------
The input tensor augmented with the sinusoidal frequencies.
"""
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
| [
"torch.zeros",
"torch.cos",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.max",
"torch.gather",
"torch.sin",
"torch.cuda.LongTensor",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.matmul",
"torch.exp",
"torch.sum"
] | 0.4.1 | threefoldo/allennlp | 9fcc79566cc148cce9f967a7962ac03bc300f011 |
1.5 | # -*- coding: utf-8 -*-
# @Author: Wenwen Yu
# @Created Time: 7/12/2020 9:50 PM
import os
import numpy as np
from numpy import inf
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from src.utils import inf_loop
from src.utils.metrics import MetricTracker, SpanBasedF1MetricTracker
from torch.utils.tensorboard import SummaryWriter
# from src.logger import TensorboardWriter
from src.utils.utils import to_union
class Trainer:
"""
Trainer class
"""
def __init__(self, model, optimizer, config, data_loader, iob_labels_vocab_cls,
valid_data_loader=None, lr_scheduler=None, max_len_step=None):
"""
:param model:
:param optimizer:
:param config:
:param data_loader:
:param iob_labels_vocab_cls
:param valid_data_loader:
:param lr_scheduler:
:param max_len_step: controls number of batches(steps) in each epoch.
"""
self.config = config
self.iob_labels_vocab_cls = iob_labels_vocab_cls
self.distributed = config['distributed']
if self.distributed:
self.local_master = (config['local_rank'] == 0)
self.global_master = (dist.get_rank() == 0)
else:
self.local_master = True
self.global_master = True
self.logger = config.get_logger('trainer', config['trainer']['log_verbosity']) if self.local_master else None
# setup GPU device if available, move model into configured device
self.device, self.device_ids = self._prepare_device(config['local_rank'], config['local_world_size'])
self.model = model.to(self.device)
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
monitor_open = cfg_trainer['monitor_open']
if monitor_open:
self.monitor = cfg_trainer.get('monitor', 'off')
else:
self.monitor = 'off'
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.monitor_mode = 'off'
self.monitor_best = 0
else:
self.monitor_mode, self.monitor_metric = self.monitor.split()
assert self.monitor_mode in ['min', 'max']
self.monitor_best = inf if self.monitor_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.early_stop = inf if self.early_stop == -1 else self.early_stop
self.start_epoch = 1
if self.local_master:
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
# self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
self.writer = SummaryWriter(config.tensorboard_dir)
# load checkpoint for resume training
if config.resume is not None:
self._resume_checkpoint(config.resume)
# load checkpoint following load to multi-gpu, avoid 'module.' prefix
if self.config['trainer']['sync_batch_norm'] and self.distributed:
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
if self.distributed:
self.model = DDP(self.model, device_ids=self.device_ids, output_device=self.device_ids[0],
find_unused_parameters=True)
self.data_loader = data_loader
if max_len_step is None: # max length of iteration step of every epoch
# epoch-based training
self.len_step = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_step = max_len_step
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
log_step = self.config['trainer']['log_step_interval']
self.log_step = log_step if log_step != -1 and 0 < log_step < self.len_step else int(
np.sqrt(data_loader.batch_size))
self.val_epoch_interval = self.config['trainer']['val_epoch_interval']
self.gl_loss_lambda = self.config['trainer']['gl_loss_lambda']
self.train_loss_metrics = MetricTracker('loss', 'gl_loss', 'crf_loss',
writer=self.writer if self.local_master else None)
self.valid_f1_metrics = SpanBasedF1MetricTracker(iob_labels_vocab_cls)
def train(self):
"""
Full training logic, including train and validation.
"""
if self.distributed:
dist.barrier() # Syncing machines before training
not_improved_count = 0
val_result_dict = None
if self.config['evaluate_only']:
print("------Evaluation only------")
val_result_dict = self._valid_epoch(0)
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(0, self.epochs, val_res))
return
for epoch in range(self.start_epoch, self.epochs + 1):
# ensure distribute worker sample different data,
# set different random seed by passing epoch to sampler
if self.distributed:
self.data_loader.sampler.set_epoch(epoch)
result_dict = self._train_epoch(epoch)
# print logged information to the screen
if self.do_validation:
val_result_dict = result_dict['val_result_dict']
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
else:
val_res = ''
# every epoch log information
self.logger_info('[Epoch Validation] Epoch:[{}/{}] Total Loss: {:.6f} '
'GL_Loss: {:.6f} CRF_Loss: {:.6f} \n{}'.
format(epoch, self.epochs, result_dict['loss'],
result_dict['gl_loss'] * self.gl_loss_lambda,
result_dict['crf_loss'], val_res))
# evaluate model performance according to configured metric, check early stop, and
# save best checkpoint as model_best
best = False
if self.monitor_mode != 'off' and self.do_validation:
best, not_improved_count = self._is_best_monitor_metric(best, not_improved_count, val_result_dict)
if not_improved_count > self.early_stop:
self.logger_info("Validation performance didn't improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _is_best_monitor_metric(self, best, not_improved_count, val_result_dict):
"""
monitor metric
:param best:
:param not_improved_count:
:param val_result_dict:
:return:
"""
entity_name, metric = self.monitor_metric.split('-')
val_monitor_metric_res = val_result_dict[entity_name][metric]
try:
# check whether model performance improved or not, according to specified metric(monitor_metric)
improved = (self.monitor_mode == 'min' and val_monitor_metric_res <= self.monitor_best) or \
(self.monitor_mode == 'max' and val_monitor_metric_res >= self.monitor_best)
except KeyError:
self.logger_warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.monitor_metric))
self.monitor_mode = 'off'
improved = False
if improved:
self.monitor_best = val_monitor_metric_res
not_improved_count = 0
best = True
else:
not_improved_count += 1
return best, not_improved_count
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log dict that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_loss_metrics.reset()
# step iteration start ##
for step_idx, input_data_item in enumerate(self.data_loader):
step_idx += 1
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
if self.config['trainer']['anomaly_detection']:
# This mode will increase the runtime and should only be enabled for debugging
with torch.autograd.detect_anomaly():
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
else:
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
# Use a barrier() to make sure that all process have finished forward and backward
if self.distributed:
dist.barrier()
# obtain the sum of all total_loss at all processes
dist.all_reduce(total_loss, op=dist.reduce_op.SUM)
size = dist.get_world_size()
else:
size = 1
gl_loss /= size # averages gl_loss across the whole world
crf_loss /= size # averages crf_loss across the whole world
# calculate average loss across the batch size
avg_gl_loss = torch.mean(gl_loss)
avg_crf_loss = torch.mean(crf_loss)
avg_loss = avg_crf_loss + self.gl_loss_lambda * avg_gl_loss
# update metrics
# self.writer.set_step((epoch - 1) * self.len_step + step_idx - 1) if self.local_master else None
self.train_loss_metrics.update('loss', avg_loss.item(), epoch)
self.train_loss_metrics.update('gl_loss', avg_gl_loss.item() * self.gl_loss_lambda, epoch)
self.train_loss_metrics.update('crf_loss', avg_crf_loss.item(), epoch)
# log messages
if step_idx % self.log_step == 0:
self.logger_info('Train Epoch:[{}/{}] Step:[{}/{}] Total Loss: {:.6f} GL_Loss: {:.6f} CRF_Loss: {:.6f}'.
format(epoch, self.epochs, step_idx, self.len_step,
avg_loss.item(), avg_gl_loss.item() * self.gl_loss_lambda, avg_crf_loss.item()))
# decide whether continue iter
if step_idx == self.len_step + 1:
break
# step iteration end ##
# do validation after val_step_interval iteration
if self.do_validation and epoch % self.val_epoch_interval == 0:
val_result_dict = self._valid_epoch(epoch)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(epoch, self.epochs, self.len_step,
SpanBasedF1MetricTracker.dict2str(val_result_dict)))
# check if best metric, if true, then save as model_best checkpoint.
best, not_improved_count = self._is_best_monitor_metric(False, 0, val_result_dict)
if best:
self._save_checkpoint(epoch, best)
# {'loss': avg_loss, 'gl_loss': avg_gl_loss, 'crf_loss': avg_crf_loss}
log = self.train_loss_metrics.result()
# do validation after training an epoch
if self.do_validation:
val_result_dict = self._valid_epoch(epoch)
log['val_result_dict'] = val_result_dict
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.model.train()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch or regular step, this is a time-consuming procedure if validation data is big.
:param epoch: Integer, current training epoch.
:return: A dict that contains information about validation
"""
self.model.eval()
self.valid_f1_metrics.reset()
with torch.no_grad():
for step_idx, input_data_item in enumerate(self.valid_data_loader):
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
output = self.model(**input_data_item)
logits = output['logits']
new_mask = output['new_mask']
if hasattr(self.model, 'module'):
# List[(List[int], torch.Tensor)] contain the tag indices of the maximum likelihood tag sequence.
# and the score of the viterbi path.
best_paths = self.model.module.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
else:
best_paths = self.model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
predicted_tags = []
for path, score in best_paths:
predicted_tags.append(path)
# self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + step_idx, 'valid') \
# if self.local_master else None
# calculate and update f1 metrics
# (B, N*T, out_dim)
predicted_tags_hard_prob = logits * 0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
predicted_tags_hard_prob[i, j, tag_id] = 1
golden_tags = input_data_item['iob_tags_label']
mask = input_data_item['mask']
union_iob_tags = to_union(golden_tags, mask, self.iob_labels_vocab_cls)
if self.distributed:
dist.barrier() #
self.valid_f1_metrics.update(predicted_tags_hard_prob.long(), union_iob_tags, new_mask)
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
f1_result_dict = self.valid_f1_metrics.result()
overall_dict = f1_result_dict['overall']
if self.local_master:
for key, value in overall_dict.items():
self.writer.add_scalar(key, value, epoch)
return f1_result_dict
@staticmethod
def average_gradients(model):
"""
Gradient averaging
:param model:
:return:
"""
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def logger_info(self, msg):
self.logger.info(msg) if self.local_master else None
def logger_warning(self, msg):
self.logger.warning(msg) if self.local_master else None
def _prepare_device(self, local_rank, local_world_size):
"""
setup GPU device if available, move model into configured device
:param local_rank:
:param local_world_size:
:return:
"""
if self.distributed:
n_gpu_per_process = torch.cuda.device_count() // local_world_size
device_ids = list(range(local_rank * n_gpu_per_process, (local_rank + 1) * n_gpu_per_process))
if torch.cuda.is_available() and local_rank != -1:
torch.cuda.set_device(device_ids[0]) # device_ids[0] =local_rank if local_world_size = n_gpu per node
device = 'cuda'
self.logger_info(
f"[Process {os.getpid()}] world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, n_gpu/process = {n_gpu_per_process}, device_ids = {device_ids}"
)
else:
self.logger_warning('Training will be using CPU!')
device = 'cpu'
device = torch.device(device)
return device, device_ids
else:
n_gpu = torch.cuda.device_count()
n_gpu_use = local_world_size
if n_gpu_use > 0 and n_gpu == 0:
self.logger_warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger_warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
list_ids = list(range(n_gpu_use))
if n_gpu_use > 0:
torch.cuda.set_device(list_ids[0]) # only use first available gpu as devices
self.logger_warning(f'Training is using GPU {list_ids[0]}!')
device = 'cuda'
else:
self.logger_warning('Training is using CPU!')
device = 'cpu'
device = torch.device(device)
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
:return:
"""
# only local master process do save model
if not self.local_master:
return
if hasattr(self.model, 'module'):
arch = type(self.model.module).__name__
state_dict = self.model.module.state_dict()
else:
arch = type(self.model).__name__
state_dict = self.model.state_dict()
state = {
'arch': arch,
'epoch': epoch,
'state_dict': state_dict,
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.monitor_best,
'config': self.config
}
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger_info("Saving current best: model_best.pth ...")
else:
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger_info("Saving checkpoint: {} ...".format(filename))
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
:return:
"""
resume_path = str(resume_path)
self.logger_info("Loading checkpoint: {} ...".format(resume_path))
# map_location = {'cuda:%d' % 0: 'cuda:%d' % self.config['local_rank']}
checkpoint = torch.load(resume_path, map_location=self.device)
self.start_epoch = checkpoint['epoch'] + 1
self.monitor_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['model_arch'] != self.config['model_arch']:
self.logger_warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger_warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger_info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| [
"torch.distributed.get_world_size",
"torch.device",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.no_grad",
"torch.save",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.distributed.all_reduce",
"torch.load",
"torch.autograd.detect_anomaly",
"torch.mean",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"torch.utils.tensorboard.SummaryWriter",
"torch.sum"
] | 1.5.1 | minhhoangbui/PICK-pytorch | c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a |
1.1 | #!/usr/bin/env python3
import argparse
import logging
from pathlib import Path
import sys
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Union
import humanfriendly
import numpy as np
import torch
from tqdm import trange
from typeguard import check_argument_types
from espnet.utils.cli_utils import get_commandline_args
from espnet2.fileio.sound_scp import SoundScpWriter
from espnet2.tasks.enh import EnhancementTask
from espnet2.torch_utils.device_funcs import to_device
from espnet2.torch_utils.set_all_random_seed import set_all_random_seed
from espnet2.utils import config_argparse
from espnet2.utils.types import str2bool
from espnet2.utils.types import str2triple_str
from espnet2.utils.types import str_or_none
EPS = torch.finfo(torch.get_default_dtype()).eps
class SeparateSpeech:
"""SeparateSpeech class
Examples:
>>> import soundfile
>>> separate_speech = SeparateSpeech("enh_config.yml", "enh.pth")
>>> audio, rate = soundfile.read("speech.wav")
>>> separate_speech(audio)
[separated_audio1, separated_audio2, ...]
"""
def __init__(
self,
enh_train_config: Union[Path, str],
enh_model_file: Union[Path, str] = None,
segment_size: Optional[float] = None,
hop_size: Optional[float] = None,
normalize_segment_scale: bool = False,
show_progressbar: bool = False,
ref_channel: Optional[int] = None,
normalize_output_wav: bool = False,
device: str = "cpu",
dtype: str = "float32",
):
assert check_argument_types()
# 1. Build Enh model
enh_model, enh_train_args = EnhancementTask.build_model_from_file(
enh_train_config, enh_model_file, device
)
enh_model.to(dtype=getattr(torch, dtype)).eval()
self.device = device
self.dtype = dtype
self.enh_train_args = enh_train_args
self.enh_model = enh_model
# only used when processing long speech, i.e.
# segment_size is not None and hop_size is not None
self.segment_size = segment_size
self.hop_size = hop_size
self.normalize_segment_scale = normalize_segment_scale
self.normalize_output_wav = normalize_output_wav
self.show_progressbar = show_progressbar
self.num_spk = enh_model.num_spk
task = "enhancement" if self.num_spk == 1 else "separation"
# reference channel for processing multi-channel speech
if ref_channel is not None:
logging.info(
"Overwrite enh_model.separator.ref_channel with {}".format(ref_channel)
)
enh_model.separator.ref_channel = ref_channel
self.ref_channel = ref_channel
else:
self.ref_channel = enh_model.ref_channel
self.segmenting = segment_size is not None and hop_size is not None
if self.segmenting:
logging.info("Perform segment-wise speech %s" % task)
logging.info(
"Segment length = {} sec, hop length = {} sec".format(
segment_size, hop_size
)
)
else:
logging.info("Perform direct speech %s on the input" % task)
@torch.no_grad()
def __call__(
self, speech_mix: Union[torch.Tensor, np.ndarray], fs: int = 8000
) -> List[torch.Tensor]:
"""Inference
Args:
speech_mix: Input speech data (Batch, Nsamples [, Channels])
fs: sample rate
Returns:
[separated_audio1, separated_audio2, ...]
"""
assert check_argument_types()
# Input as audio signal
if isinstance(speech_mix, np.ndarray):
speech_mix = torch.as_tensor(speech_mix)
assert speech_mix.dim() > 1, speech_mix.size()
batch_size = speech_mix.size(0)
speech_mix = speech_mix.to(getattr(torch, self.dtype))
# lenghts: (B,)
lengths = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=speech_mix.size(1)
)
# a. To device
speech_mix = to_device(speech_mix, device=self.device)
lengths = to_device(lengths, device=self.device)
if self.segmenting and lengths[0] > self.segment_size * fs:
# Segment-wise speech enhancement/separation
overlap_length = int(np.round(fs * (self.segment_size - self.hop_size)))
num_segments = int(
np.ceil((speech_mix.size(1) - overlap_length) / (self.hop_size * fs))
)
t = T = int(self.segment_size * fs)
pad_shape = speech_mix[:, :T].shape
enh_waves = []
range_ = trange if self.show_progressbar else range
for i in range_(num_segments):
st = int(i * self.hop_size * fs)
en = st + T
if en >= lengths[0]:
# en - st < T (last segment)
en = lengths[0]
speech_seg = speech_mix.new_zeros(pad_shape)
t = en - st
speech_seg[:, :t] = speech_mix[:, st:en]
else:
t = T
speech_seg = speech_mix[:, st:en] # B x T [x C]
lengths_seg = speech_mix.new_full(
[batch_size], dtype=torch.long, fill_value=T
)
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_seg, lengths_seg)
feats, _, _ = self.enh_model.separator(feats, f_lens)
processed_wav = [
self.enh_model.decoder(f, lengths_seg)[0] for f in feats
]
if speech_seg.dim() > 2:
# multi-channel speech
speech_seg_ = speech_seg[:, self.ref_channel]
else:
speech_seg_ = speech_seg
if self.normalize_segment_scale:
# normalize the energy of each separated stream
# to match the input energy
processed_wav = [
self.normalize_scale(w, speech_seg_) for w in processed_wav
]
# List[torch.Tensor(num_spk, B, T)]
enh_waves.append(torch.stack(processed_wav, dim=0))
# c. Stitch the enhanced segments together
waves = enh_waves[0]
for i in range(1, num_segments):
# permutation between separated streams in last and current segments
perm = self.cal_permumation(
waves[:, :, -overlap_length:],
enh_waves[i][:, :, :overlap_length],
criterion="si_snr",
)
# repermute separated streams in current segment
for batch in range(batch_size):
enh_waves[i][:, batch] = enh_waves[i][perm[batch], batch]
if i == num_segments - 1:
enh_waves[i][:, :, t:] = 0
enh_waves_res_i = enh_waves[i][:, :, overlap_length:t]
else:
enh_waves_res_i = enh_waves[i][:, :, overlap_length:]
# overlap-and-add (average over the overlapped part)
waves[:, :, -overlap_length:] = (
waves[:, :, -overlap_length:] + enh_waves[i][:, :, :overlap_length]
) / 2
# concatenate the residual parts of the later segment
waves = torch.cat([waves, enh_waves_res_i], dim=2)
# ensure the stitched length is same as input
assert waves.size(2) == speech_mix.size(1), (waves.shape, speech_mix.shape)
waves = torch.unbind(waves, dim=0)
else:
# b. Enhancement/Separation Forward
feats, f_lens = self.enh_model.encoder(speech_mix, lengths)
feats, _, _ = self.enh_model.separator(feats, f_lens)
waves = [self.enh_model.decoder(f, lengths)[0] for f in feats]
assert len(waves) == self.num_spk, len(waves) == self.num_spk
assert len(waves[0]) == batch_size, (len(waves[0]), batch_size)
if self.normalize_output_wav:
waves = [
(w / abs(w).max(dim=1, keepdim=True)[0] * 0.9).cpu().numpy()
for w in waves
] # list[(batch, sample)]
else:
waves = [w.cpu().numpy() for w in waves]
return waves
@staticmethod
@torch.no_grad()
def normalize_scale(enh_wav, ref_ch_wav):
"""Normalize the energy of enh_wav to match that of ref_ch_wav.
Args:
enh_wav (torch.Tensor): (B, Nsamples)
ref_ch_wav (torch.Tensor): (B, Nsamples)
Returns:
enh_wav (torch.Tensor): (B, Nsamples)
"""
ref_energy = torch.sqrt(torch.mean(ref_ch_wav.pow(2), dim=1))
enh_energy = torch.sqrt(torch.mean(enh_wav.pow(2), dim=1))
return enh_wav * (ref_energy / enh_energy)[:, None]
@torch.no_grad()
def cal_permumation(self, ref_wavs, enh_wavs, criterion="si_snr"):
"""Calculate the permutation between seaprated streams in two adjacent segments.
Args:
ref_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
enh_wavs (List[torch.Tensor]): [(Batch, Nsamples)]
criterion (str): one of ("si_snr", "mse", "corr)
Returns:
perm (torch.Tensor): permutation for enh_wavs (Batch, num_spk)
"""
loss_func = {
"si_snr": self.enh_model.si_snr_loss,
"mse": lambda enh, ref: torch.mean((enh - ref).pow(2), dim=1),
"corr": lambda enh, ref: (
(enh * ref).sum(dim=1)
/ (enh.pow(2).sum(dim=1) * ref.pow(2).sum(dim=1) + EPS)
).clamp(min=EPS, max=1 - EPS),
}[criterion]
_, perm = self.enh_model._permutation_loss(ref_wavs, enh_wavs, loss_func)
return perm
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
def inference(
output_dir: str,
batch_size: int,
dtype: str,
fs: int,
ngpu: int,
seed: int,
num_workers: int,
log_level: Union[int, str],
data_path_and_name_and_type: Sequence[Tuple[str, str, str]],
key_file: Optional[str],
enh_train_config: str,
enh_model_file: str,
allow_variable_data_keys: bool,
segment_size: Optional[float],
hop_size: Optional[float],
normalize_segment_scale: bool,
show_progressbar: bool,
ref_channel: Optional[int],
normalize_output_wav: bool,
):
assert check_argument_types()
if batch_size > 1:
raise NotImplementedError("batch decoding is not implemented")
if ngpu > 1:
raise NotImplementedError("only single GPU decoding is supported")
logging.basicConfig(
level=log_level,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
if ngpu >= 1:
device = "cuda"
else:
device = "cpu"
# 1. Set random-seed
set_all_random_seed(seed)
# 2. Build separate_speech
separate_speech = SeparateSpeech(
enh_train_config=enh_train_config,
enh_model_file=enh_model_file,
segment_size=segment_size,
hop_size=hop_size,
normalize_segment_scale=normalize_segment_scale,
show_progressbar=show_progressbar,
ref_channel=ref_channel,
normalize_output_wav=normalize_output_wav,
device=device,
dtype=dtype,
)
# 3. Build data-iterator
loader = EnhancementTask.build_streaming_iterator(
data_path_and_name_and_type,
dtype=dtype,
batch_size=batch_size,
key_file=key_file,
num_workers=num_workers,
preprocess_fn=EnhancementTask.build_preprocess_fn(
separate_speech.enh_train_args, False
),
collate_fn=EnhancementTask.build_collate_fn(
separate_speech.enh_train_args, False
),
allow_variable_data_keys=allow_variable_data_keys,
inference=True,
)
# 4. Start for-loop
writers = []
for i in range(separate_speech.num_spk):
writers.append(
SoundScpWriter(f"{output_dir}/wavs/{i + 1}", f"{output_dir}/spk{i + 1}.scp")
)
for keys, batch in loader:
assert isinstance(batch, dict), type(batch)
assert all(isinstance(s, str) for s in keys), keys
_bs = len(next(iter(batch.values())))
assert len(keys) == _bs, f"{len(keys)} != {_bs}"
batch = {k: v for k, v in batch.items() if not k.endswith("_lengths")}
waves = separate_speech(**batch)
for (spk, w) in enumerate(waves):
for b in range(batch_size):
writers[spk][keys[b]] = fs, w[b]
print(w[b],file=sys.stderr)
for writer in writers:
writer.close()
def get_parser():
parser = config_argparse.ArgumentParser(
description="Frontend inference",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Note(kamo): Use '_' instead of '-' as separator.
# '-' is confusing if written in yaml.
parser.add_argument(
"--log_level",
type=lambda x: x.upper(),
default="INFO",
choices=("CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"),
help="The verbose level of logging",
)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument(
"--ngpu",
type=int,
default=0,
help="The number of gpus. 0 indicates CPU mode",
)
parser.add_argument("--seed", type=int, default=0, help="Random seed")
parser.add_argument(
"--dtype",
default="float32",
choices=["float16", "float32", "float64"],
help="Data type",
)
parser.add_argument(
"--fs", type=humanfriendly_or_none, default=8000, help="Sampling rate"
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="The number of workers used for DataLoader",
)
group = parser.add_argument_group("Input data related")
group.add_argument(
"--data_path_and_name_and_type",
type=str2triple_str,
required=True,
action="append",
)
group.add_argument("--key_file", type=str_or_none)
group.add_argument("--allow_variable_data_keys", type=str2bool, default=False)
group = parser.add_argument_group("Output data related")
group.add_argument(
"--normalize_output_wav",
type=str2bool,
default=False,
help="Whether to normalize the predicted wav to [-1~1]",
)
group = parser.add_argument_group("The model configuration related")
group.add_argument("--enh_train_config", type=str, required=True)
group.add_argument("--enh_model_file", type=str, required=True)
group = parser.add_argument_group("Data loading related")
group.add_argument(
"--batch_size",
type=int,
default=1,
help="The batch size for inference",
)
group = parser.add_argument_group("SeparateSpeech related")
group.add_argument(
"--segment_size",
type=float,
default=None,
help="Segment length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--hop_size",
type=float,
default=None,
help="Hop length in seconds for segment-wise speech enhancement/separation",
)
group.add_argument(
"--normalize_segment_scale",
type=str2bool,
default=False,
help="Whether to normalize the energy of the separated streams in each segment",
)
group.add_argument(
"--show_progressbar",
type=str2bool,
default=False,
help="Whether to show a progress bar when performing segment-wise speech "
"enhancement/separation",
)
group.add_argument(
"--ref_channel",
type=int,
default=None,
help="If not None, this will overwrite the ref_channel defined in the "
"separator module (for multi-channel speech processing)",
)
return parser
def main(cmd=None):
print(get_commandline_args(), file=sys.stderr)
parser = get_parser()
args = parser.parse_args(cmd)
kwargs = vars(args)
kwargs.pop("config", None)
inference(**kwargs)
if __name__ == "__main__":
main()
| [
"torch.get_default_dtype",
"torch.cat",
"torch.stack",
"torch.unbind",
"torch.no_grad",
"torch.as_tensor"
] | 1.1.0 | arceushui/Keyword-Spotting-Alibaba | 10e718491075dee8f875c7860385bc4eef22a790 |
1.4 | import time, os, json, time
import numpy as np
import torch
from torch._C import device
import torch.distributed as dist
from torch.autograd import Variable
def test_model(model, test_data, dev):
correct, total = 0, 0
model.eval()
with torch.no_grad():
for data, target in test_data:
data, target = Variable(data).cuda(dev), Variable(target).cuda(dev)
output = model(data)
# get the index of the max log-probability
_, predictions = output.max(1)
total += predictions.size(0)
correct += torch.sum(predictions == target.data).float()
acc = correct / total
return acc.item()
def update_model(model, global_mu, size, cpu, gpu, args):
# all_param = model.state_dict()
# receive the parameter variance from workers
for param in model.parameters():
tensor = torch.zeros_like(param.data, device=cpu)
gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]
dist.gather(tensor=tensor, gather_list=gather_list, dst=0)
param.data = torch.zeros_like(param.data, device=gpu)
for w in range(size):
# Suppose the model received from clients are well processed
param.data = param.data + gather_list[w].clone().detach().to(gpu)
# receive averaged K from workers
avg_k_list = [torch.tensor(0.0) for _ in range(size)]
dist.gather(tensor=torch.tensor(0.0), gather_list=avg_k_list, dst=0)
avg_k = sum(avg_k_list)
print('Averaged K:', avg_k)
# send averaged K to workers
avg_k_list = [avg_k if args.avg_k==-1 else torch.tensor(float(args.avg_k)) for _ in range(size)]
dist.scatter(tensor=avg_k, scatter_list=avg_k_list)
# receive the mu from clients
for idx, param in enumerate(global_mu):
tensor = torch.zeros_like(param.data, device=cpu)
gather_list = [torch.zeros_like(param.data, device=cpu) for _ in range(size)]
dist.gather(tensor=tensor, gather_list=gather_list, dst=0)
global_mu[idx] = torch.zeros_like(param.data, device=gpu)
for w in range(size):
# Suppose the model received from clients are well processed
global_mu[idx] = global_mu[idx] + gather_list[w].clone().detach().to(gpu)
# send the parameters to workers
for param in model.parameters():
tmp_p = param.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
if torch.sum(torch.isnan(tmp_p)) > 0:
print("NaN occurs. Terminate. ")
exit(-1)
# send global_mu to workers
for param in global_mu:
tmp_p = param.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
# model.load_state_dict(all_param)
def run(size, model, args, test_data, f_result, cpu, gpu):
# Receive the weights from all clients
temp_w = torch.tensor([0.0 for _ in range(args.num_workers+1)])
weights = [torch.tensor([0.0 for _ in range(args.num_workers+1)]) for _ in range(size)]
dist.gather(tensor=temp_w, gather_list=weights, dst=0)
weights = sum(weights)
weights = weights / torch.sum(weights)
print('weights:', weights)
# send weights to clients
weights_list = [weights.clone().detach().to(cpu) for _ in range(size)]
dist.scatter(tensor=temp_w, scatter_list=weights_list)
start = time.time()
model = model.cuda(gpu)
for p in model.parameters():
tmp_p = p.clone().detach().to(cpu)
scatter_p_list = [tmp_p for _ in range(size)]
# dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list, group=group)
dist.scatter(tensor=tmp_p, scatter_list=scatter_p_list)
global_mu = [torch.zeros_like(param.data, device=gpu) for param in model.parameters()]
print('Model has sent to all nodes! ')
print('Begin!')
np.random.seed(42)
for t in range(args.T):
model.train()
# send participants to all clients
participants = np.random.choice(np.arange(len(weights)), size=args.num_part, replace=True, p=weights.numpy()) if args.partial else np.arange(len(weights))
print('Participants list:', list(participants))
participants = torch.tensor(participants).to(cpu)
part_list = [participants for _ in range(size)]
dist.scatter(tensor=participants, scatter_list=part_list)
# receive the list of train loss from workers
info_list = [torch.tensor(0.0) for _ in range(size)]
# dist.gather(tensor=torch.tensor([0.0]), gather_list=info_list, group=group)
dist.gather(tensor=torch.tensor(0.0), gather_list=info_list, dst=0)
# info_list = np.concatenate([list(a) for a in info_list])
# train_loss = sum(info_list).item() / args.num_part if args.partial else sum(info_list * weights).item()
train_loss = sum(info_list).item()
# if args.partial:
# update_model_partial(model, size, cpu, gpu, args.num_part)
# else:
# update_model_full(model, size, cpu, gpu, weights)
update_model(model, global_mu, size, cpu, gpu, args)
timestamp = time.time() - start
test_acc = test_model(model, test_data, gpu)
print("Epoch: {}\t\tLoss: {}\t\tAccuracy: {}".format(t, train_loss, test_acc))
f_result.write(str(t) + "\t" + str(timestamp) + "\t" + str(train_loss) + "\t" + str(test_acc) + "\n")
f_result.flush()
def init_processes(rank, size, model, args, test_data, cpu, gpu, backend='mpi'):
if backend == 'mpi':
dist.init_process_group(backend)
elif backend == 'gloo':
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
if not os.path.exists(args.result):
os.makedirs(args.result)
result_file = os.path.join(args.result, '{}.txt'.format(len(os.listdir(args.result))))
f_result = open(result_file, 'w')
f_result.write(json.dumps(vars(args)) + '\n')
run(size, model, args, test_data, f_result, cpu, gpu) | [
"torch.isnan",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.autograd.Variable",
"torch.zeros_like",
"torch.distributed.gather",
"torch.tensor",
"torch.distributed.scatter",
"torch.sum"
] | 1.4.0 | HarliWu/From-Deterioration-to-Acceleration-A-Calibration-Approach-to-Rehabilitating-Step-Asynchronism-in-Fe | 3a2f7196a2ca0446ce7ff7c8d15a0fa56a1d91d4 |
1.0 | import torch
from torch.utils.data import Dataset, ConcatDataset, Sampler
import torch.distributed as dist
import math
import os
import sys
import shelve
from glob import glob
import numpy as np
import uuid
from termcolor import colored
from collections import Counter, OrderedDict
import random
from .. import util
from ..util import TraceMode, PriorInflation
from ..concurrency import ConcurrentShelf
class Batch():
def __init__(self, traces):
self.traces = traces
self.size = len(traces)
sub_batches = {}
total_length_controlled = 0
for trace in traces:
tl = trace.length_controlled
if tl == 0:
raise ValueError('Trace of length zero.')
total_length_controlled += tl
trace_hash = ''.join([variable.address for variable in trace.variables_controlled])
if trace_hash not in sub_batches:
sub_batches[trace_hash] = []
sub_batches[trace_hash].append(trace)
self.sub_batches = list(sub_batches.values())
self.mean_length_controlled = total_length_controlled / self.size
def __len__(self):
return len(self.traces)
def __getitem__(self, key):
return self.traces[key]
def to(self, device):
for trace in self.traces:
trace.to(device=device)
class OnlineDataset(Dataset):
def __init__(self, model, length=None, prior_inflation=PriorInflation.DISABLED):
self._model = model
if length is None:
length = int(1e6)
self._length = length
self._prior_inflation = prior_inflation
def __len__(self):
return self._length
def __getitem__(self, idx):
return next(self._model._trace_generator(trace_mode=TraceMode.PRIOR_FOR_INFERENCE_NETWORK, prior_inflation=self._prior_inflation))
@staticmethod
def _prune_trace(trace):
del(trace.variables)
# trace.variables_controlled = []
del(trace.variables_uncontrolled)
del(trace.variables_replaced)
del(trace.variables_observed)
del(trace.variables_observable)
del(trace.variables_tagged)
del(trace.variables_dict_address)
del(trace.variables_dict_address_base)
# trace.named_variables = {}
del(trace.result)
del(trace.log_prob)
del(trace.log_prob_observed)
# del(trace.log_importance_weight)
# trace.length = 0
# trace.length_controlled = 0
del(trace.execution_time_sec)
for variable in trace.variables_controlled:
# variable.distribution = distribution
# if value is None:
# variable.value = None
# else:
# variable.value = util.to_tensor(value)
del(variable.address_base)
# variable.address = address
del(variable.instance)
del(variable.log_prob)
del(variable.control)
del(variable.replace)
del(variable.name)
del(variable.observable)
del(variable.observed)
del(variable.reused)
del(variable.tagged)
for _, variable in trace.named_variables.items():
controlled = False
for v in trace.variables_controlled:
if variable is v: # Needs to be implemented this way to compare object references instead of object hashes (which change as a result of potentially deleted fields)
controlled = True
break
if not controlled:
del(variable.distribution)
# if value is None:
# variable.value = None
# else:
# variable.value = util.to_tensor(value)
del(variable.address_base)
del(variable.address)
del(variable.instance)
del(variable.log_prob)
del(variable.control)
del(variable.replace)
del(variable.name)
del(variable.observable)
del(variable.observed)
del(variable.reused)
del(variable.tagged)
def save_dataset(self, dataset_dir, num_traces, num_traces_per_file, *args, **kwargs):
num_files = math.ceil(num_traces / num_traces_per_file)
util.progress_bar_init('Saving offline dataset, traces:{}, traces per file:{}, files:{}'.format(num_traces, num_traces_per_file, num_files), num_traces, 'Traces')
i = 0
while i < num_traces:
i += num_traces_per_file
file_name = os.path.join(dataset_dir, 'pyprob_traces_{}_{}'.format(num_traces_per_file, str(uuid.uuid4())))
shelf = shelve.open(file_name, flag='c')
for j in range(num_traces_per_file):
trace = next(self._model._trace_generator(trace_mode=TraceMode.PRIOR, prior_inflation=self._prior_inflation, *args, **kwargs))
self._prune_trace(trace)
shelf[str(j)] = trace
shelf['__length'] = j + 1
shelf.close()
util.progress_bar_update(i)
util.progress_bar_end()
class OfflineDatasetFile(Dataset):
cache = OrderedDict()
cache_capacity = 8
def __init__(self, file_name):
self._file_name = file_name
self._closed = False
shelf = self._open()
self._length = shelf['__length']
def _open(self):
# idea from https://www.kunxi.org/2014/05/lru-cache-in-python
try:
shelf = OfflineDatasetFile.cache.pop(self._file_name)
# it was in the cache, put it back on the front
OfflineDatasetFile.cache[self._file_name] = shelf
return shelf
except KeyError:
# not in the cache
if len(OfflineDatasetFile.cache) >= OfflineDatasetFile.cache_capacity:
# cache is full, delete the last entry
n, s = OfflineDatasetFile.cache.popitem(last=False)
s.close()
shelf = shelve.open(self._file_name, flag='r')
OfflineDatasetFile.cache[self._file_name] = shelf
return shelf
def __len__(self):
return self._length
def __getitem__(self, idx):
shelf = self._open()
return shelf[str(idx)]
class OfflineDataset(ConcatDataset):
def __init__(self, dataset_dir):
self._dataset_dir = dataset_dir
# files = [name for name in os.listdir(self._dataset_dir)]
files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_sorted_*')))
if len(files) > 0:
self._sorted_on_disk = True
else:
self._sorted_on_disk = False
files = sorted(glob(os.path.join(self._dataset_dir, 'pyprob_traces_*')))
if len(files) == 0:
raise RuntimeError('Cannot find any data set files at {}'.format(dataset_dir))
datasets = []
for file in files:
try:
dataset = OfflineDatasetFile(file)
datasets.append(dataset)
except Exception as e:
print(e)
print(colored('Warning: dataset file potentially corrupt, omitting: {}'.format(file), 'red', attrs=['bold']))
super().__init__(datasets)
print('OfflineDataset at: {}'.format(self._dataset_dir))
print('Num. traces : {:,}'.format(len(self)))
print('Sorted on disk : {}'.format(self._sorted_on_disk))
if self._sorted_on_disk:
self._sorted_indices = list(range(len(self)))
else:
file_name = os.path.join(self._dataset_dir, 'pyprob_hashes')
try:
hashes_file = shelve.open(file_name, 'r')
hashes_exist = 'hashes' in hashes_file
hashes_file.close()
except:
hashes_exist = False
if hashes_exist:
print('Using pre-computed hashes in: {}'.format(file_name))
hashes_file = shelve.open(file_name, 'r')
self._hashes = hashes_file['hashes']
self._sorted_indices = hashes_file['sorted_indices']
hashes_file.close()
if torch.is_tensor(self._hashes):
self._hashes = self._hashes.cpu().numpy()
if len(self._sorted_indices) != len(self):
raise RuntimeError('Length of pre-computed hashes ({}) and length of offline dataset ({}) do not match. Dataset files have been altered. Delete and re-generate pre-computed hash file: {}'.format(len(self._sorted_indices), len(self), file_name))
else:
print('No pre-computed hashes found, generating: {}'.format(file_name))
hashes_file = shelve.open(file_name, 'c')
hashes, sorted_indices = self._compute_hashes()
hashes_file['hashes'] = hashes
hashes_file['sorted_indices'] = sorted_indices
hashes_file.close()
self._sorted_indices = sorted_indices
self._hashes = hashes
print('Num. trace types : {:,}'.format(len(set(self._hashes))))
hashes_and_counts = OrderedDict(sorted(Counter(self._hashes).items()))
print('Trace hash\tCount')
for hash, count in hashes_and_counts.items():
print('{:.8f}\t{}'.format(hash, count))
print()
@staticmethod
def _trace_hash(trace):
h = hash(''.join([variable.address for variable in trace.variables_controlled])) + sys.maxsize + 1
return float('{}.{}'.format(trace.length_controlled, h))
def _compute_hashes(self):
hashes = torch.zeros(len(self))
util.progress_bar_init('Hashing offline dataset for sorting', len(self), 'Traces')
for i in range(len(self)):
hashes[i] = self._trace_hash(self[i])
util.progress_bar_update(i)
util.progress_bar_end()
print('Sorting offline dataset')
_, sorted_indices = torch.sort(hashes)
print('Sorting done')
return hashes.cpu().numpy(), sorted_indices.cpu().numpy()
def save_sorted(self, sorted_dataset_dir, num_traces_per_file=None, num_files=None, begin_file_index=None, end_file_index=None):
if num_traces_per_file is not None:
if num_files is not None:
raise ValueError('Expecting either num_traces_per_file or num_files')
else:
if num_files is None:
raise ValueError('Expecting either num_traces_per_file or num_files')
else:
num_traces_per_file = math.ceil(len(self) / num_files)
if os.path.exists(sorted_dataset_dir):
if len(glob(os.path.join(sorted_dataset_dir, '*'))) > 0:
print(colored('Warning: target directory is not empty: {})'.format(sorted_dataset_dir), 'red', attrs=['bold']))
util.create_path(sorted_dataset_dir, directory=True)
file_indices = list(util.chunks(list(self._sorted_indices), num_traces_per_file))
num_traces = len(self)
num_files = len(file_indices)
num_files_digits = len(str(num_files))
file_name_template = 'pyprob_traces_sorted_{{:d}}_{{:0{}d}}'.format(num_files_digits)
file_names = list(map(lambda x: os.path.join(sorted_dataset_dir, file_name_template.format(num_traces_per_file, x)), range(num_files)))
if begin_file_index is None:
begin_file_index = 0
if end_file_index is None:
end_file_index = num_files
if begin_file_index < 0 or begin_file_index > end_file_index or end_file_index > num_files or end_file_index < begin_file_index:
raise ValueError('Invalid indexes begin_file_index:{} and end_file_index: {}'.format(begin_file_index, end_file_index))
print('Sorted offline dataset, traces: {}, traces per file: {}, files: {} (overall)'.format(num_traces, num_traces_per_file, num_files))
util.progress_bar_init('Saving sorted files with indices in range [{}, {}) ({} of {} files overall)'.format(begin_file_index, end_file_index, end_file_index - begin_file_index, num_files), end_file_index - begin_file_index + 1, 'Files')
j = 0
for i in range(begin_file_index, end_file_index):
j += 1
file_name = file_names[i]
print(file_name)
shelf = ConcurrentShelf(file_name)
shelf.lock(write=True)
for new_i, old_i in enumerate(file_indices[i]):
shelf[str(new_i)] = self[old_i]
shelf['__length'] = len(file_indices[i])
shelf.unlock()
util.progress_bar_update(j)
util.progress_bar_end()
class TraceSampler(Sampler):
def __init__(self, offline_dataset):
if not isinstance(offline_dataset, OfflineDataset):
raise TypeError('Expecting an OfflineDataset instance.')
self._sorted_indices = offline_dataset._sorted_indices
def __iter__(self):
return iter(self._sorted_indices)
def __len__(self):
return len(self._offline_dataset)
class TraceBatchSampler(Sampler):
def __init__(self, offline_dataset, batch_size, shuffle_batches=True):
if not isinstance(offline_dataset, OfflineDataset):
raise TypeError('Expecting an OfflineDataset instance.')
self._batches = list(util.chunks(offline_dataset._sorted_indices, batch_size))
self._shuffle_batches = shuffle_batches
def __iter__(self):
if self._shuffle_batches:
np.random.shuffle(self._batches)
return iter(self._batches)
def __len__(self):
return len(self._batches)
class DistributedTraceBatchSampler(Sampler):
def __init__(self, offline_dataset, batch_size, shuffle_batches=True, num_buckets=None, shuffle_buckets=True):
if not isinstance(offline_dataset, OfflineDataset):
raise TypeError('Expecting an OfflineDataset instance.')
if not dist.is_available():
raise RuntimeError('Expecting distributed training.')
self._world_size = dist.get_world_size()
self._rank = dist.get_rank()
# Randomly drop a number of traces so that the number of all minibatches in the whole dataset is an integer multiple of world size
num_batches_to_drop = math.floor(len(offline_dataset._sorted_indices) / batch_size) % self._world_size
num_traces_to_drop = num_batches_to_drop * batch_size
# Ensure all ranks choose the same traces to drop
st = random.getstate()
random.seed(0)
self._batches = list(util.chunks(util.drop_items(list(offline_dataset._sorted_indices), num_traces_to_drop), batch_size)) # List of all minibatches, where each minibatch is a list of trace indices
random.setstate(st)
# Discard last minibatch if it's smaller than batch_size
if len(self._batches[-1]) < batch_size:
del(self._batches[-1])
if num_buckets is None:
num_buckets = len(self._batches) / self._world_size
self._num_buckets = num_buckets
self._bucket_size = math.ceil(len(self._batches) / num_buckets)
if self._bucket_size < self._world_size:
raise RuntimeError('offline_dataset:{}, batch_size:{} and num_buckets:{} imply a bucket_size:{} smaller than world_size:{}'.format(len(offline_dataset), batch_size, num_buckets, self._bucket_size, self._world_size))
# List of buckets, where each bucket is a list of minibatches
self._buckets = list(util.chunks(self._batches, self._bucket_size))
# Unify last two buckets if the last bucket is smaller than other buckets
if len(self._buckets[-1]) < self._bucket_size:
if len(self._buckets) < 2:
raise RuntimeError('offline_dataset:{} too small for given batch_size:{} and num_buckets:{}'.format(len(offline_dataset), batch_size, num_buckets))
self._buckets[-2].extend(self._buckets[-1])
del(self._buckets[-1])
self._shuffle_batches = shuffle_batches
self._shuffle_buckets = shuffle_buckets
self._epoch = 0
self._current_bucket_id = 0
print('DistributedTraceBatchSampler')
print('OfflineDataset size : {:,}'.format(len(offline_dataset)))
print('World size : {:,}'.format(self._world_size))
print('Batch size : {:,}'.format(batch_size))
print('Num. batches dropped: {:,}'.format(num_batches_to_drop))
print('Num. batches : {:,}'.format(len(self._batches)))
print('Bucket size : {:,}'.format(self._bucket_size))
print('Num. buckets : {:,}'.format(self._num_buckets))
def __iter__(self):
self._epoch += 1
bucket_ids = list(range(len(self._buckets)))
if self._shuffle_buckets:
# Shuffle the list of buckets (but not the order of minibatches inside each bucket) at the beginning of each epoch, deterministically based on the epoch number so that all nodes have the same bucket order
# Idea from: https://github.com/pytorch/pytorch/blob/a3fb004b1829880547dd7b3e2cd9d16af657b869/torch/utils/data/distributed.py#L44
st = np.random.get_state()
np.random.seed(self._epoch)
np.random.shuffle(bucket_ids)
np.random.set_state(st)
for bucket_id in bucket_ids:
bucket = self._buckets[bucket_id]
self._current_bucket_id = bucket_id
# num_batches is needed to ensure that all nodes have the same number of minibatches (iterations) in each bucket, in cases where the bucket size is not divisible by world_size.
num_batches = math.floor(len(bucket) / self._world_size)
# Select a num_batches-sized subset of the current bucket for the current node
# The part not selected by the current node will be selected by other nodes
batches = bucket[self._rank:len(bucket):self._world_size][:num_batches]
if self._shuffle_batches:
# Shuffle the list of minibatches (but not the order trace indices inside each minibatch) selected for the current node
np.random.shuffle(batches)
for batch in batches:
yield batch
def __len__(self):
return len(self._batches)
| [
"torch.distributed.is_available",
"torch.distributed.get_world_size",
"torch.is_tensor",
"torch.distributed.get_rank",
"torch.sort"
] | 1.0.0 | bayesianbrad/pyprob | a426fc51c1d6da13052979c21af447f9c4023642 |
1.7 | import pandas as pd
from pymethylprocess.MethylationDataTypes import MethylationArray
from sklearn.metrics import mean_absolute_error, r2_score
import warnings
warnings.filterwarnings("ignore")
from pybedtools import BedTool
import numpy as np
from functools import reduce
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import os
import pysnooper
import argparse
import pickle
from sklearn.metrics import classification_report
import click
import methylcapsnet
from methylcapsnet.build_capsules import *
from methylcapsnet.methylcaps_data_models import *
import sqlite3
import os
import glob
import dask
from dask.diagnostics import ProgressBar
from pathos.multiprocessing import Pool
import multiprocessing
import dask.bag as db
from distributed import Client, LocalCluster, get_task_stream
RANDOM_SEED=42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@pysnooper.snoop('train.log')
def model_capsnet_(train_methyl_array='train_val_test_sets/train_methyl_array.pkl',
val_methyl_array='train_val_test_sets/val_methyl_array.pkl',
interest_col='disease',
n_epochs=10,
n_bins=0,
bin_len=1000000,
min_capsule_len=300,
primary_caps_out_len=45,
caps_out_len=45,
hidden_topology='30,80,50',
gamma=1e-2,
decoder_topology='100,300',
learning_rate=1e-2,
routing_iterations=3,
overlap=0.,
custom_loss='none',
gamma2=1e-2,
job=0,
capsule_choice=['genomic_binned'],
custom_capsule_file='',
test_methyl_array='',
predict=False,
batch_size=16,
limited_capsule_names_file='',
gsea_superset='',
tissue='',
number_sets=25,
use_set=False,
gene_context=False,
select_subtypes=[],
fit_spw=False,
l1_l2='',
custom_capsule_file2='',
min_capsules=5):
capsule_choice=list(capsule_choice)
#custom_capsule_file=list(custom_capsule_file)
hlt_list=filter(None,hidden_topology.split(','))
if hlt_list:
hidden_topology=list(map(int,hlt_list))
else:
hidden_topology=[]
hlt_list=filter(None,decoder_topology.split(','))
if hlt_list:
decoder_topology=list(map(int,hlt_list))
else:
decoder_topology=[]
hidden_caps_layers=[]
include_last=False
ma=MethylationArray.from_pickle(train_methyl_array)
ma_v=MethylationArray.from_pickle(val_methyl_array)
if test_methyl_array and predict:
ma_t=MethylationArray.from_pickle(test_methyl_array)
try:
ma.remove_na_samples(interest_col)
ma_v.remove_na_samples(interest_col)
if test_methyl_array and predict:
ma_t.remove_na_samples(interest_col)
except:
pass
if select_subtypes:
print(ma.pheno[interest_col].unique())
ma.pheno=ma.pheno.loc[ma.pheno[interest_col].isin(select_subtypes)]
ma.beta=ma.beta.loc[ma.pheno.index]
ma_v.pheno=ma_v.pheno.loc[ma_v.pheno[interest_col].isin(select_subtypes)]
ma_v.beta=ma_v.beta.loc[ma_v.pheno.index]
print(ma.pheno[interest_col].unique())
if test_methyl_array and predict:
ma_t.pheno=ma_t.pheno.loc[ma_t.pheno[interest_col].isin(select_subtypes)]
ma_t.beta=ma_t.beta.loc[ma_t.pheno.index]
if custom_capsule_file2 and os.path.exists(custom_capsule_file2):
capsules_dict=torch.load(custom_capsule_file2)
final_modules, modulecpgs, module_names=capsules_dict['final_modules'], capsules_dict['modulecpgs'], capsules_dict['module_names']
if min_capsule_len>1:
include_capsules=[len(x)>min_capsule_len for x in final_modules]
final_modules=[final_modules[i] for i in range(len(final_modules)) if include_capsules[i]]
module_names=[module_names[i] for i in range(len(module_names)) if include_capsules[i]]
modulecpgs=(reduce(np.union1d,final_modules)).tolist()
else:
final_modules, modulecpgs, module_names=build_capsules(capsule_choice,
overlap,
bin_len,
ma,
include_last,
min_capsule_len,
custom_capsule_file,
gsea_superset,
tissue,
gene_context,
use_set,
number_sets,
limited_capsule_names_file)
if custom_capsule_file2:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names),custom_capsule_file2)
assert len(final_modules) >= min_capsules , "Below the number of allowed capsules."
if fit_spw:
modulecpgs=list(reduce(lambda x,y:np.hstack((x,y)),final_modules))
if not include_last: # ERROR HAPPENS HERE!
ma.beta=ma.beta.loc[:,modulecpgs]
ma_v.beta=ma_v.beta.loc[:,modulecpgs]
if test_methyl_array and predict:
ma_t.beta=ma_t.beta.loc[:,modulecpgs]
# https://github.com/higgsfield/Capsule-Network-Tutorial/blob/master/Capsule%20Network.ipynb
original_interest_col=interest_col
if n_bins:
new_interest_col=interest_col+'_binned'
ma.pheno.loc[:,new_interest_col],bins=pd.cut(ma.pheno[interest_col],bins=n_bins,retbins=True)
ma_v.pheno.loc[:,new_interest_col],_=pd.cut(ma_v.pheno[interest_col],bins=bins,retbins=True)
if test_methyl_array and predict:
ma_t.pheno.loc[:,new_interest_col],_=pd.cut(ma_t.pheno[interest_col],bins=bins,retbins=True)
interest_col=new_interest_col
datasets=dict()
datasets['train']=MethylationDataset(ma,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
print(datasets['train'].X.isnull().sum().sum())
datasets['val']=MethylationDataset(ma_v,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
if test_methyl_array and predict:
datasets['test']=MethylationDataset(ma_t,interest_col,modules=final_modules, module_names=module_names, original_interest_col=original_interest_col, run_spw=fit_spw)
dataloaders=dict()
dataloaders['train']=DataLoader(datasets['train'],batch_size=batch_size,shuffle=True,num_workers=8, pin_memory=True, drop_last=True)
dataloaders['val']=DataLoader(datasets['val'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_primary=len(final_modules)
if test_methyl_array and predict:
dataloaders['test']=DataLoader(datasets['test'],batch_size=batch_size,shuffle=False,num_workers=8, pin_memory=True, drop_last=False)
n_inputs=list(map(len,final_modules))
n_out_caps=len(datasets['train'].y_unique)
if not fit_spw:
print("Not fitting MethylSPWNet")
primary_caps = PrimaryCaps(modules=final_modules,hidden_topology=hidden_topology,n_output=primary_caps_out_len)
hidden_caps = []
output_caps = CapsLayer(n_out_caps,n_primary,primary_caps_out_len,caps_out_len,routing_iterations=routing_iterations)
decoder = Decoder(n_out_caps*caps_out_len,len(list(ma.beta)),decoder_topology)
model = CapsNet(primary_caps, hidden_caps, output_caps, decoder, gamma=gamma)
if test_methyl_array and predict:
model.load_state_dict(torch.load('capsnet_model.pkl'))
else:
print("Fitting MethylSPWNet")
module_lens=[len(x) for x in final_modules]
model=MethylSPWNet(module_lens, hidden_topology, dropout_p=0.2, n_output=n_out_caps)
if test_methyl_array and predict:
model.load_state_dict(torch.load('spwnet_model.pkl'))
if torch.cuda.is_available():
model=model.cuda()
# extract all c_ij for all layers across all batches, or just last batch
if l1_l2 and fit_spw:
l1,l2=list(map(float,l1_l2.split(',')))
elif fit_spw:
l1,l2=0.,0.
trainer=Trainer(model=model,
validation_dataloader=dataloaders['val'],
n_epochs=n_epochs,
lr=learning_rate,
n_primary=n_primary,
custom_loss=custom_loss,
gamma2=gamma2,
spw_mode=fit_spw,
l1=l1 if fit_spw else 0.,
l2=l2 if fit_spw else 0.)
if not predict:
try:
#assert 1==2
trainer.fit(dataloader=dataloaders['train'])
val_loss=min(trainer.val_losses)
torch.save(trainer.model.state_dict(),'capsnet_model.pkl' if not fit_spw else 'spwnet_model.pkl')
if fit_spw:
torch.save(dict(final_modules=final_modules, modulecpgs=modulecpgs, module_names=module_names), 'spwnet_capsules.pkl')
torch.save(dict(module_names=module_names,module_lens=module_lens,dropout_p=0.2,hidden_topology=hidden_topology,n_output=n_out_caps),'spwnet_config.pkl')
except Exception as e:
print(e)
val_loss=-2
with sqlite3.connect('jobs.db', check_same_thread=False) as conn:
pd.DataFrame([job,val_loss],index=['job','val_loss'],columns=[0]).T.to_sql('val_loss',conn,if_exists='append')
else:
if test_methyl_array:
trainer.weights=1.
Y=trainer.predict(dataloaders['test'])
pickle.dump(Y,open('predictions.pkl','wb'))
val_loss=-1
#print(val_loss)
# print([min(trainer.val_losses),n_epochs,
# n_bins,
# bin_len,
# min_capsule_len,
# primary_caps_out_len,
# caps_out_len,
# hidden_topology,
# gamma,
# decoder_topology,
# learning_rate,
# routing_iterations])
return val_loss
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.7.0 | Christensen-Lab-Dartmouth/MethylCapsNet | 17b6b19809c5e1984de804eb34cc7494210f91a6 |
1.3 | import os
from unittest.mock import MagicMock, call
import pytest
import torch
from ignite.contrib.handlers.polyaxon_logger import *
from ignite.engine import Engine, Events, State
os.environ["POLYAXON_NO_OP"] = "1"
def test_output_handler_with_wrong_logger_type():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler 'OutputHandler' works only with PolyaxonLogger"):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_output_handler_output_transform():
wrapper = OutputHandler("tag", output_transform=lambda x: x)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.output = 12345
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"tag/output": 12345})
wrapper = OutputHandler("another_tag", output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=123, **{"another_tag/loss": 12345})
def test_output_handler_metric_names():
wrapper = OutputHandler("tag", metric_names=["a", "b", "c"])
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
wrapper = OutputHandler("tag", metric_names=["a",])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": torch.Tensor([0.0, 1.0, 2.0, 3.0])})
mock_engine.state.iteration = 5
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=5, **{"tag/a/0": 0.0, "tag/a/1": 1.0, "tag/a/2": 2.0, "tag/a/3": 3.0}),], any_order=True
)
wrapper = OutputHandler("tag", metric_names=["a", "c"])
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 55.56, "c": "Some text"})
mock_engine.state.iteration = 7
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
with pytest.warns(UserWarning):
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls([call(step=7, **{"tag/a": 55.56})], any_order=True)
# all metrics
wrapper = OutputHandler("tag", metric_names="all")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45, "c": torch.tensor(10.0)})
mock_engine.state.iteration = 5
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/c": 10.0})
def test_output_handler_both():
wrapper = OutputHandler("tag", metric_names=["a", "b"], output_transform=lambda x: {"loss": x})
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State(metrics={"a": 12.23, "b": 23.45})
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_called_once_with(step=5, **{"tag/a": 12.23, "tag/b": 23.45, "tag/loss": 12345})
def test_output_handler_with_wrong_global_step_transform_output():
def global_step_transform(*args, **kwargs):
return "a"
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
with pytest.raises(TypeError, match="global_step must be int"):
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
def test_output_handler_with_global_step_transform():
def global_step_transform(*args, **kwargs):
return 10
wrapper = OutputHandler("tag", output_transform=lambda x: {"loss": x}, global_step_transform=global_step_transform)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 5
mock_engine.state.output = 12345
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
mock_logger.log_metrics.assert_called_once_with(step=10, **{"tag/loss": 12345})
def test_output_handler_with_global_step_from_engine():
mock_another_engine = MagicMock()
mock_another_engine.state = State()
mock_another_engine.state.epoch = 10
mock_another_engine.state.output = 12.345
wrapper = OutputHandler(
"tag",
output_transform=lambda x: {"loss": x},
global_step_transform=global_step_from_engine(mock_another_engine),
)
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.epoch = 1
mock_engine.state.output = 0.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 1
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
mock_another_engine.state.epoch = 11
mock_engine.state.output = 1.123
wrapper(mock_engine, mock_logger, Events.EPOCH_STARTED)
assert mock_logger.log_metrics.call_count == 2
mock_logger.log_metrics.assert_has_calls(
[call(step=mock_another_engine.state.epoch, **{"tag/loss": mock_engine.state.output})]
)
def test_optimizer_params_handler_wrong_setup():
with pytest.raises(TypeError):
OptimizerParamsHandler(optimizer=None)
optimizer = MagicMock(spec=torch.optim.Optimizer)
handler = OptimizerParamsHandler(optimizer=optimizer)
mock_logger = MagicMock()
mock_engine = MagicMock()
with pytest.raises(RuntimeError, match="Handler OptimizerParamsHandler works only with PolyaxonLogger"):
handler(mock_engine, mock_logger, Events.ITERATION_STARTED)
def test_optimizer_params():
optimizer = torch.optim.SGD([torch.Tensor(0)], lr=0.01)
wrapper = OptimizerParamsHandler(optimizer=optimizer, param_name="lr")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
mock_engine = MagicMock()
mock_engine.state = State()
mock_engine.state.iteration = 123
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"lr/group_0": 0.01, "step": 123})
wrapper = OptimizerParamsHandler(optimizer, param_name="lr", tag="generator")
mock_logger = MagicMock(spec=PolyaxonLogger)
mock_logger.log_metrics = MagicMock()
wrapper(mock_engine, mock_logger, Events.ITERATION_STARTED)
mock_logger.log_metrics.assert_called_once_with(**{"generator/lr/group_0": 0.01, "step": 123})
def test_integration():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
trainer = Engine(update_fn)
plx_logger = PolyaxonLogger()
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
def test_integration_as_context_manager():
n_epochs = 5
data = list(range(50))
losses = torch.rand(n_epochs * len(data))
losses_iter = iter(losses)
def update_fn(engine, batch):
return next(losses_iter)
with PolyaxonLogger() as plx_logger:
trainer = Engine(update_fn)
def dummy_handler(engine, logger, event_name):
global_step = engine.state.get_event_attrib_value(event_name)
logger.log_metrics(step=global_step, **{"{}".format("test_value"): global_step})
plx_logger.attach(trainer, log_handler=dummy_handler, event_name=Events.EPOCH_COMPLETED)
trainer.run(data, max_epochs=n_epochs)
@pytest.fixture
def no_site_packages():
import sys
polyaxon_client_modules = {}
for k in sys.modules:
if "polyaxon" in k:
polyaxon_client_modules[k] = sys.modules[k]
for k in polyaxon_client_modules:
del sys.modules[k]
prev_path = list(sys.path)
sys.path = [p for p in sys.path if "site-packages" not in p]
yield "no_site_packages"
sys.path = prev_path
for k in polyaxon_client_modules:
sys.modules[k] = polyaxon_client_modules[k]
def test_no_polyaxon_client(no_site_packages):
with pytest.raises(RuntimeError, match=r"This contrib module requires polyaxon-client to be installed"):
PolyaxonLogger()
| [
"torch.Tensor",
"torch.tensor"
] | 1.3 | nzare/ignite | b53c6aeef87754b3cd3638c91172b386dc73af12 |
1.3 | from pathlib import Path
from datetime import datetime
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import ignite
import ignite.distributed as idist
from ignite.engine import Events, Engine, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss
from ignite.handlers import Checkpoint, DiskSaver
from ignite.utils import manual_seed, setup_logger
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
import utils
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = "stop-on-{}".format(config["stop_iteration"])
folder_name = "{}_backend-{}-{}_{}".format(config["model"], idist.backend(), idist.get_world_size(), now)
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info("Output path: {}".format(config["output_path"]))
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_trains"]:
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info("Stop training on {} iteration".format(trainer.state.iteration))
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_trains=False,
**spawn_kwargs
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_trains (bool): if True, experiment Trains logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
logger.info(
"\nEpoch {} - elapsed: {} - {} metrics:\n {}".format(
epoch, elapsed, tag, "\n".join(["\t{}: {}".format(k, v) for k, v in metrics.items()])
)
)
def log_basic_info(logger, config):
logger.info("Train {} on CIFAR10".format(config["model"]))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info("\t{}: {}".format(key, value))
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), "Checkpoint '{}' is not found".format(checkpoint_fp.as_posix())
logger.info("Resume from a checkpoint: {}".format(checkpoint_fp.as_posix()))
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_trains"]:
from ignite.contrib.handlers.trains_logger import TrainsSaver
return TrainsSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
| [
"torch.cuda.get_device_name",
"torch.nn.CrossEntropyLoss"
] | 1.3 | nzare/ignite | 002b595daa8a8345286c5e096c33e278948686a7 |
1.3 | import os
import pytest
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping
def do_nothing_update_fn(engine, batch):
pass
def test_args_validation():
trainer = Engine(do_nothing_update_fn)
with pytest.raises(ValueError, match=r"Argument patience should be positive integer."):
EarlyStopping(patience=-1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(ValueError, match=r"Argument min_delta should not be a negative number."):
EarlyStopping(patience=2, min_delta=-0.1, score_function=lambda engine: 0, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument score_function should be a function."):
EarlyStopping(patience=2, score_function=12345, trainer=trainer)
with pytest.raises(TypeError, match=r"Argument trainer should be an instance of Engine."):
EarlyStopping(patience=2, score_function=lambda engine: 0, trainer=None)
def test_simple_early_stopping():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_state_dict():
scores = iter([1.0, 0.8, 0.88])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
# Swap to new object, but maintain state
h2 = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
h2.load_state_dict(h.state_dict())
h2(None)
assert not trainer.should_terminate
h2(None)
assert trainer.should_terminate
def test_early_stopping_on_delta():
scores = iter([1.0, 2.0, 2.01, 3.0, 3.01, 3.02])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, min_delta=0.1, score_function=lambda _: next(scores), trainer=trainer)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 1.0; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.99; counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.01; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_last_event_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=False, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 2
assert trainer.should_terminate
def test_early_stopping_on_cumulative_delta():
scores = iter([0.0, 0.3, 0.6])
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(
patience=2, min_delta=0.4, cumulative_delta=True, score_function=lambda _: next(scores), trainer=trainer
)
assert not trainer.should_terminate
h(None) # counter == 0
assert not trainer.should_terminate
h(None) # delta == 0.3; counter == 1
assert not trainer.should_terminate
h(None) # delta == 0.6; counter == 0
assert not trainer.should_terminate
def test_simple_early_stopping_on_plateau():
def score_function(engine):
return 42
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=1, score_function=score_function, trainer=trainer)
# Call 2 times and check if stopped
assert not trainer.should_terminate
h(None)
assert not trainer.should_terminate
h(None)
assert trainer.should_terminate
def test_simple_no_early_stopping():
scores = iter([1.0, 0.8, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
h = EarlyStopping(patience=2, score_function=score_function, trainer=trainer)
# Call 3 times and check if not stopped
assert not trainer.should_terminate
h(None)
h(None)
h(None)
assert not trainer.should_terminate
def test_with_engine_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 7
assert trainer.state.epoch == 7
def test_with_engine_early_stopping_on_plateau():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
def score_function(engine):
return 0.047
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=4, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 5
assert trainer.state.epoch == 5
def test_with_engine_no_early_stopping():
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = iter([1.0, 0.8, 1.2, 1.23, 0.9, 1.0, 1.1, 1.253, 1.26, 1.2])
def score_function(engine):
return next(scores)
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=5, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert n_epochs_counter.count == 10
assert trainer.state.epoch == 10
def _test_distrib_with_engine_early_stopping(device):
import torch.distributed as dist
torch.manual_seed(12)
class Counter(object):
def __init__(self, count=0):
self.count = count
n_epochs_counter = Counter()
scores = torch.tensor([1.0, 0.8, 1.2, 1.5, 0.9, 1.0, 0.99, 1.1, 0.9], requires_grad=False).to(device)
def score_function(engine):
i = trainer.state.epoch - 1
v = scores[i]
dist.all_reduce(v)
v /= dist.get_world_size()
return v.item()
trainer = Engine(do_nothing_update_fn)
evaluator = Engine(do_nothing_update_fn)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
evaluator.run([0])
n_epochs_counter.count += 1
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 7
assert n_epochs_counter.count == 7
def _test_distrib_integration_engine_early_stopping(device):
import torch.distributed as dist
from ignite.metrics import Accuracy
rank = dist.get_rank()
ws = dist.get_world_size()
torch.manual_seed(12)
n_epochs = 10
n_iters = 20
y_preds = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
y_true = (
[torch.randint(0, 2, size=(n_iters, ws)).to(device)]
+ [torch.ones(n_iters, ws).to(device)]
+ [torch.randint(0, 2, size=(n_iters, ws)).to(device) for _ in range(n_epochs - 2)]
)
def update(engine, _):
e = trainer.state.epoch - 1
i = engine.state.iteration - 1
return y_preds[e][i, rank], y_true[e][i, rank]
evaluator = Engine(update)
acc = Accuracy(device=device)
acc.attach(evaluator, "acc")
def score_function(engine):
return engine.state.metrics["acc"]
trainer = Engine(lambda e, b: None)
early_stopping = EarlyStopping(patience=3, score_function=score_function, trainer=trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def evaluation(engine):
data = list(range(n_iters))
evaluator.run(data=data)
evaluator.add_event_handler(Events.COMPLETED, early_stopping)
trainer.run([0], max_epochs=10)
assert trainer.state.epoch == 5
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_gpu(local_rank, distributed_context_single_node_nccl):
device = "cuda:{}".format(local_rank)
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_cpu(local_rank, distributed_context_single_node_gloo):
device = "cpu"
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_cpu(distributed_context_multi_node_gloo):
device = "cpu"
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gpu(distributed_context_multi_node_nccl):
device = "cuda:{}".format(distributed_context_multi_node_nccl["local_rank"])
_test_distrib_with_engine_early_stopping(device)
_test_distrib_integration_engine_early_stopping(device)
| [
"torch.distributed.get_world_size",
"torch.cuda.device_count",
"torch.manual_seed",
"torch.ones",
"torch.randint",
"torch.tensor",
"torch.distributed.all_reduce",
"torch.distributed.get_rank"
] | 1.3 | nzare/ignite | b53c6aeef87754b3cd3638c91172b386dc73af12 |
1.6 |
from __future__ import absolute_import
import sys
import numpy as np
import torch
from torch import nn
import os
from collections import OrderedDict
from torch.autograd import Variable
import itertools
from .base_model import BaseModel
from scipy.ndimage import zoom
import fractions
import functools
import skimage.transform
from tqdm import tqdm
from . import networks_basic as networks
from . import perceptual_loss
class DistModel(BaseModel):
def name(self):
return self.model_name
def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None,
use_gpu=True, printNet=False, spatial=False,
is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]):
'''
INPUTS
model - ['net-lin'] for linearly calibrated network
['net'] for off-the-shelf network
['L2'] for L2 distance in Lab colorspace
['SSIM'] for ssim in RGB colorspace
net - ['squeeze','alex','vgg']
model_path - if None, will look in weights/[NET_NAME].pth
colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM
use_gpu - bool - whether or not to use a GPU
printNet - bool - whether or not to print network architecture out
spatial - bool - whether to output an array containing varying distances across spatial dimensions
is_train - bool - [True] for training mode
lr - float - initial learning rate
beta1 - float - initial momentum term for adam
version - 0.1 for latest, 0.0 was original (with a bug)
gpu_ids - int array - [0] by default, gpus to use
'''
BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids)
self.model = model
self.net = net
self.is_train = is_train
self.spatial = spatial
self.gpu_ids = gpu_ids
self.model_name = '%s [%s]'%(model,net)
if(self.model == 'net-lin'): # pretrained net + linear layer
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net,
use_dropout=True, spatial=spatial, version=version, lpips=True)
kw = {}
if not use_gpu:
kw['map_location'] = 'cpu'
if(model_path is None):
import inspect
model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net)))
if(not is_train):
print('Loading model from: %s'%model_path)
self.net.load_state_dict(torch.load(model_path, **kw), strict=False)
elif(self.model=='net'): # pretrained network
self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False)
elif(self.model in ['L2','l2']):
self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing
self.model_name = 'L2'
elif(self.model in ['DSSIM','dssim','SSIM','ssim']):
self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace)
self.model_name = 'SSIM'
else:
raise ValueError("Model [%s] not recognized." % self.model)
self.parameters = list(self.net.parameters())
if self.is_train: # training mode
# extra network on top to go from distances (d0,d1) => predicted human judgment (h*)
self.rankLoss = networks.BCERankingLoss()
self.parameters += list(self.rankLoss.net.parameters())
self.lr = lr
self.old_lr = lr
self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999))
else: # test mode
self.net.eval()
if(use_gpu):
self.net.to(gpu_ids[0])
self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids)
if(self.is_train):
self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0
if(printNet):
print('---------- Networks initialized -------------')
networks.print_network(self.net)
print('-----------------------------------------------')
def forward(self, in0, in1, retPerLayer=False):
''' Function computes the distance between image patches in0 and in1
INPUTS
in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1]
OUTPUT
computed distances between in0 and in1
'''
return self.net.forward(in0, in1, retPerLayer=retPerLayer)
# ***** TRAINING FUNCTIONS *****
def optimize_parameters(self):
self.forward_train()
self.optimizer_net.zero_grad()
self.backward_train()
self.optimizer_net.step()
self.clamp_weights()
def clamp_weights(self):
for module in self.net.modules():
if(hasattr(module, 'weight') and module.kernel_size==(1,1)):
module.weight.data = torch.clamp(module.weight.data,min=0)
def set_input(self, data):
self.input_ref = data['ref']
self.input_p0 = data['p0']
self.input_p1 = data['p1']
self.input_judge = data['judge']
if(self.use_gpu):
self.input_ref = self.input_ref.to(device=self.gpu_ids[0])
self.input_p0 = self.input_p0.to(device=self.gpu_ids[0])
self.input_p1 = self.input_p1.to(device=self.gpu_ids[0])
self.input_judge = self.input_judge.to(device=self.gpu_ids[0])
self.var_ref = Variable(self.input_ref,requires_grad=True)
self.var_p0 = Variable(self.input_p0,requires_grad=True)
self.var_p1 = Variable(self.input_p1,requires_grad=True)
def forward_train(self): # run forward pass
# print(self.net.module.scaling_layer.shift)
# print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item())
self.d0 = self.forward(self.var_ref, self.var_p0)
self.d1 = self.forward(self.var_ref, self.var_p1)
self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge)
self.var_judge = Variable(1.*self.input_judge).view(self.d0.size())
self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.)
return self.loss_total
def backward_train(self):
torch.mean(self.loss_total).backward()
def compute_accuracy(self,d0,d1,judge):
''' d0, d1 are Variables, judge is a Tensor '''
d1_lt_d0 = (d1<d0).cpu().data.numpy().flatten()
judge_per = judge.cpu().numpy().flatten()
return d1_lt_d0*judge_per + (1-d1_lt_d0)*(1-judge_per)
def get_current_errors(self):
retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()),
('acc_r', self.acc_r)])
for key in retDict.keys():
retDict[key] = np.mean(retDict[key])
return retDict
def get_current_visuals(self):
zoom_factor = 256/self.var_ref.data.size()[2]
ref_img = util.tensor2im(self.var_ref.data)
p0_img = util.tensor2im(self.var_p0.data)
p1_img = util.tensor2im(self.var_p1.data)
ref_img_vis = zoom(ref_img,[zoom_factor, zoom_factor, 1],order=0)
p0_img_vis = zoom(p0_img,[zoom_factor, zoom_factor, 1],order=0)
p1_img_vis = zoom(p1_img,[zoom_factor, zoom_factor, 1],order=0)
return OrderedDict([('ref', ref_img_vis),
('p0', p0_img_vis),
('p1', p1_img_vis)])
def save(self, path, label):
if(self.use_gpu):
self.save_network(self.net.module, path, '', label)
else:
self.save_network(self.net, path, '', label)
self.save_network(self.rankLoss.net, path, 'rank', label)
def update_learning_rate(self,nepoch_decay):
lrd = self.lr / nepoch_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_net.param_groups:
param_group['lr'] = lr
print('update lr [%s] decay: %f -> %f' % (type,self.old_lr, lr))
self.old_lr = lr
def score_2afc_dataset(data_loader, func, name=''):
''' Function computes Two Alternative Forced Choice (2AFC) score using
distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return numpy array of length N
OUTPUTS
[0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators
[1] - dictionary with following elements
d0s,d1s - N arrays containing distances between reference patch to perturbed patches
gts - N array in [0,1], preferred patch selected by human evaluators
(closer to "0" for left patch p0, "1" for right patch p1,
"0.6" means 60pct people preferred right patch, 40pct preferred left)
scores - N array in [0,1], corresponding to what percentage function agreed with humans
CONSTS
N - number of test triplets in data_loader
'''
d0s = []
d1s = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist()
d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist()
gts+=data['judge'].cpu().numpy().flatten().tolist()
d0s = np.array(d0s)
d1s = np.array(d1s)
gts = np.array(gts)
scores = (d0s<d1s)*(1.-gts) + (d1s<d0s)*gts + (d1s==d0s)*.5
return(np.mean(scores), dict(d0s=d0s,d1s=d1s,gts=gts,scores=scores))
def score_jnd_dataset(data_loader, func, name=''):
''' Function computes JND score using distance function 'func' in dataset 'data_loader'
INPUTS
data_loader - CustomDatasetDataLoader object - contains a JNDDataset inside
func - callable distance function - calling d=func(in0,in1) should take 2
pytorch tensors with shape Nx3xXxY, and return pytorch array of length N
OUTPUTS
[0] - JND score in [0,1], mAP score (area under precision-recall curve)
[1] - dictionary with following elements
ds - N array containing distances between two patches shown to human evaluator
sames - N array containing fraction of people who thought the two patches were identical
CONSTS
N - number of test triplets in data_loader
'''
ds = []
gts = []
for data in tqdm(data_loader.load_data(), desc=name):
ds+=func(data['p0'],data['p1']).data.cpu().numpy().tolist()
gts+=data['same'].cpu().numpy().flatten().tolist()
sames = np.array(gts)
ds = np.array(ds)
sorted_inds = np.argsort(ds)
ds_sorted = ds[sorted_inds]
sames_sorted = sames[sorted_inds]
TPs = np.cumsum(sames_sorted)
FPs = np.cumsum(1-sames_sorted)
FNs = np.sum(sames_sorted)-TPs
precs = TPs/(TPs+FPs)
recs = TPs/(TPs+FNs)
score = util.voc_ap(recs,precs)
return(score, dict(ds=ds,sames=sames))
| [
"torch.autograd.Variable",
"torch.optim.Adam",
"torch.clamp",
"torch.load",
"torch.mean",
"torch.nn.DataParallel"
] | 1.6.0 | markveillette/high-fidelity-generative-compression | d88b4d7f1212efa8611e91737ff6bf00bbf36670 |
1.6 | import abc
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# Custom
from src.helpers import maths
MIN_SCALE = 0.11
MIN_LIKELIHOOD = 1e-9
MAX_LIKELIHOOD = 1e4
TAIL_MASS = 2**(-9)
PRECISION_P = 16 # Precision of rANS coder
# TODO: Unit tests
lower_bound_toward = maths.LowerBoundToward.apply
class ContinuousEntropyModel(nn.Module, metaclass=abc.ABCMeta):
"""
Base class for pre-computation of integer probability tables for use in entropy coding.
"""
def __init__(self, distribution, likelihood_bound=MIN_LIKELIHOOD, tail_mass=TAIL_MASS,
precision=PRECISION_P):
"""
The layer assumes that the input tensor is at least 2D, with a batch dimension
at the beginning and a channel dimension, specified by subclassing this layer.
The layer trains an independent probability density model for each 'channel',
but assumes that across all other dimensions, the inputs are i.i.d. (independent
and identically distributed).
Parameters:
distribution: Distribution with CDF / quantile / likelihood methods
Note:
The batch dimensions are indexes into independent, non-identical parameterizations
of this distribution - [B, n_channels], where B usually = 1.
(Dimensions which are not assumed i.i.d.)
"""
super(ContinuousEntropyModel, self).__init__()
self.distribution = distribution
self.likelihood_bound = float(likelihood_bound)
self.tail_mass = float(tail_mass)
self.precision = int(precision)
def quantize_st(self, inputs, offsets=None):
# Ignore rounding in backward pass
values = inputs
if offsets is not None:
offsets = offsets.to(values)
values = values - offsets
delta = (torch.floor(values + 0.5) - values).detach()
values = values + delta
if offsets is not None:
values = values + offsets
return values
def dequantize(self, x, offsets=None):
if offsets is not None:
values = x.type_as(offsets)
values = values + offsets
else:
values = x.to(torch.float32)
return values
@abc.abstractmethod
def build_tables(self, **kwargs):
pass
if __name__ == '__main__':
print('Hi!')
| [
"torch.floor"
] | 1.6.0 | markveillette/high-fidelity-generative-compression | d88b4d7f1212efa8611e91737ff6bf00bbf36670 |
1.0 | import torch
import torch.nn as nn
class LayerNorm(nn.Module):
"""
Layer Normalization.
https://arxiv.org/abs/1607.06450
"""
def __init__(self, hidden_size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.gamma = nn.Parameter(torch.ones(hidden_size))
self.beta = nn.Parameter(torch.zeros(hidden_size))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
hidden_states = self.gamma * (x-mean) / (std + self.eps)
return hidden_states + self.beta
class T5LayerNorm(nn.Module):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
def __init__(self, hidden_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.type_as(self.weight)
| [
"torch.zeros",
"torch.rsqrt",
"torch.ones"
] | 1.0 | krevas/ET-BERT | 464ce3e7942d4450f55021e267ceb9dd48a36b1f |
1.5 | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Multi-objective optimization benchmark problems.
References
.. [Deb2005dtlz]
K. Deb, L. Thiele, M. Laumanns, E. Zitzler, A. Abraham, L. Jain, R. Goldberg.
"Scalable test problems for evolutionary multi-objective optimization"
in Evolutionary Multiobjective Optimization, London, U.K.: Springer-Verlag,
pp. 105-145, 2005.
.. [GarridoMerchan2020]
E. C. Garrido-Merch ́an and D. Hern ́andez-Lobato. Parallel Predictive Entropy
Search for Multi-objective Bayesian Optimization with Constraints.
arXiv e-prints, arXiv:2004.00601, Apr. 2020.
.. [Gelbart2014]
Michael A. Gelbart, Jasper Snoek, and Ryan P. Adams. 2014. Bayesian
optimization with unknown constraints. In Proceedings of the Thirtieth
Conference on Uncertainty in Artificial Intelligence (UAI’14).
AUAI Press, Arlington, Virginia, USA, 250–259.
.. [Oszycka1995]
A. Osyczka, S. Kundu. 1995. A new method to solve generalized multicriteria
optimization problems using the simple genetic algorithm. In Structural
Optimization 10. 94–99.
.. [Tanabe2020]
Ryoji Tanabe, Hisao Ishibuchi, An easy-to-use real-world multi-objective
optimization problem suite, Applied Soft Computing,Volume 89, 2020.
.. [Yang2019a]
K. Yang, M. Emmerich, A. Deutz, and T. Bäck. 2019.
"Multi-Objective Bayesian Global Optimization using expected hypervolume
improvement gradient" in Swarm and evolutionary computation 44, pp. 945--956,
2019.
.. [Zitzler2000]
E. Zitzler, K. Deb, and L. Thiele, “Comparison of multiobjective
evolutionary algorithms: Empirical results,” Evol. Comput., vol. 8, no. 2,
pp. 173–195, 2000.
"""
from __future__ import annotations
import math
from typing import Optional
import torch
from botorch.test_functions.base import (
ConstrainedBaseTestProblem,
MultiObjectiveTestProblem,
)
from botorch.test_functions.synthetic import Branin
from botorch.utils.sampling import sample_hypersphere, sample_simplex
from botorch.utils.transforms import unnormalize
from scipy.special import gamma
from torch import Tensor
class BraninCurrin(MultiObjectiveTestProblem):
r"""Two objective problem composed of the Branin and Currin functions.
Branin (rescaled):
f(x) = (
15*x_1 - 5.1 * (15 * x_0 - 5) ** 2 / (4 * pi ** 2) + 5 * (15 * x_0 - 5)
/ pi - 5
) ** 2 + (10 - 10 / (8 * pi)) * cos(15 * x_0 - 5))
Currin:
f(x) = (1 - exp(-1 / (2 * x_1))) * (
2300 * x_0 ** 3 + 1900 * x_0 ** 2 + 2092 * x_0 + 60
) / 100 * x_0 ** 3 + 500 * x_0 ** 2 + 4 * x_0 + 20
"""
dim = 2
num_objectives = 2
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_ref_point = [18.0, 6.0]
_max_hv = 59.36011874867746 # this is approximated using NSGA-II
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
r"""Constructor for Branin-Currin.
Args:
noise_std: Standard deviation of the observation noise.
negate: If True, negate the objectives.
"""
super().__init__(noise_std=noise_std, negate=negate)
self._branin = Branin()
def _rescaled_branin(self, X: Tensor) -> Tensor:
# return to Branin bounds
x_0 = 15 * X[..., 0] - 5
x_1 = 15 * X[..., 1]
return self._branin(torch.stack([x_0, x_1], dim=-1))
@staticmethod
def _currin(X: Tensor) -> Tensor:
x_0 = X[..., 0]
x_1 = X[..., 1]
factor1 = 1 - torch.exp(-1 / (2 * x_1))
numer = 2300 * x_0.pow(3) + 1900 * x_0.pow(2) + 2092 * x_0 + 60
denom = 100 * x_0.pow(3) + 500 * x_0.pow(2) + 4 * x_0 + 20
return factor1 * numer / denom
def evaluate_true(self, X: Tensor) -> Tensor:
# branin rescaled with inputsto [0,1]^2
branin = self._rescaled_branin(X=X)
currin = self._currin(X=X)
return torch.stack([branin, currin], dim=-1)
class DTLZ(MultiObjectiveTestProblem):
r"""Base class for DTLZ problems.
See [Deb2005dtlz]_ for more details on DTLZ.
"""
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if dim <= num_objectives:
raise ValueError(
f"dim must be > num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self.k = self.dim - self.num_objectives + 1
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
self._ref_point = [self._ref_val for _ in range(num_objectives)]
super().__init__(noise_std=noise_std, negate=negate)
class DTLZ1(DTLZ):
r"""DLTZ1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = 0.5 * x_0 * (1 + g(x))
f_1(x) = 0.5 * (1 - x_0) * (1 + g(x))
g(x) = 100 * \sum_{i=m}^{n-1} (
k + (x_i - 0.5)^2 - cos(20 * pi * (x_i - 0.5))
)
where k = n - m + 1.
The pareto front is given by the line (or hyperplane) \sum_i f_i(x) = 0.5.
The goal is to minimize both objectives. The reference point comes from [Yang2019]_.
"""
_ref_val = 400.0
@property
def _max_hv(self) -> float:
return self._ref_val ** self.num_objectives - 1 / 2 ** self.num_objectives
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
X_m_minus_half = X_m - 0.5
sum_term = (
X_m_minus_half.pow(2) - torch.cos(20 * math.pi * X_m_minus_half)
).sum(dim=-1)
g_X_m = 100 * (self.k + sum_term)
g_X_m_term = 0.5 * (1 + g_X_m)
fs = []
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_m_term * X[..., :idx].prod(dim=-1)
if i > 0:
f_i *= 1 - X[..., idx]
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points randomly sampled from the hyperplane sum_i f(x_i) = 0.5.
"""
f_X = 0.5 * sample_simplex(
n=n,
d=self.num_objectives,
qmc=True,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
)
if self.negate:
f_X *= -1
return f_X
class DTLZ2(DTLZ):
r"""DLTZ2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = (1 + g(x)) * cos(x_0 * pi / 2)
f_1(x) = (1 + g(x)) * sin(x_0 * pi / 2)
g(x) = \sum_{i=m}^{n-1} (x_i - 0.5)^2
The pareto front is given by the unit hypersphere \sum{i} f_i^2 = 1.
Note: the pareto front is completely concave. The goal is to minimize
both objectives.
"""
_ref_val = 1.1
@property
def _max_hv(self) -> float:
# hypercube - volume of hypersphere in R^n such that all coordinates are
# positive
hypercube_vol = self._ref_val ** self.num_objectives
pos_hypersphere_vol = (
math.pi ** (self.num_objectives / 2)
/ gamma(self.num_objectives / 2 + 1)
/ 2 ** self.num_objectives
)
return hypercube_vol - pos_hypersphere_vol
def evaluate_true(self, X: Tensor) -> Tensor:
X_m = X[..., -self.k :]
g_X = (X_m - 0.5).pow(2).sum(dim=-1)
g_X_plus1 = 1 + g_X
fs = []
pi_over_2 = math.pi / 2
for i in range(self.num_objectives):
idx = self.num_objectives - 1 - i
f_i = g_X_plus1.clone()
f_i *= torch.cos(X[..., :idx] * pi_over_2).prod(dim=-1)
if i > 0:
f_i *= torch.sin(X[..., idx] * pi_over_2)
fs.append(f_i)
return torch.stack(fs, dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
r"""Generate `n` pareto optimal points.
The pareto points are randomly sampled from the hypersphere's
positive section.
"""
f_X = sample_hypersphere(
n=n,
d=self.num_objectives,
dtype=self.ref_point.dtype,
device=self.ref_point.device,
qmc=True,
).abs()
if self.negate:
f_X *= -1
return f_X
class VehicleSafety(MultiObjectiveTestProblem):
r"""Optimize Vehicle crash-worthiness.
See [Tanabe2020]_ for details.
The reference point is 1.1 * the nadir point from
approximate front provided by [Tanabe2020]_.
The maximum hypervolume is computed using the approximate
pareto front from [Tanabe2020]_.
"""
_ref_point = [1864.72022, 11.81993945, 0.2903999384]
_max_hv = 246.81607081187002
_bounds = [(1.0, 3.0)] * 5
dim = 5
num_objectives = 3
def evaluate_true(self, X: Tensor) -> Tensor:
X1, X2, X3, X4, X5 = torch.split(X, 1, -1)
f1 = (
1640.2823
+ 2.3573285 * X1
+ 2.3220035 * X2
+ 4.5688768 * X3
+ 7.7213633 * X4
+ 4.4559504 * X5
)
f2 = (
6.5856
+ 1.15 * X1
- 1.0427 * X2
+ 0.9738 * X3
+ 0.8364 * X4
- 0.3695 * X1 * X4
+ 0.0861 * X1 * X5
+ 0.3628 * X2 * X4
- 0.1106 * X1.pow(2)
- 0.3437 * X3.pow(2)
+ 0.1764 * X4.pow(2)
)
f3 = (
-0.0551
+ 0.0181 * X1
+ 0.1024 * X2
+ 0.0421 * X3
- 0.0073 * X1 * X2
+ 0.024 * X2 * X3
- 0.0118 * X2 * X4
- 0.0204 * X3 * X4
- 0.008 * X3 * X5
- 0.0241 * X2.pow(2)
+ 0.0109 * X4.pow(2)
)
f_X = torch.cat([f1, f2, f3], dim=-1)
return f_X
class ZDT(MultiObjectiveTestProblem):
r"""Base class for ZDT problems.
See [Zitzler2000]_ for more details on ZDT.
"""
_ref_point = [11.0, 11.0]
def __init__(
self,
dim: int,
num_objectives: int = 2,
noise_std: Optional[float] = None,
negate: bool = False,
) -> None:
if num_objectives != 2:
raise NotImplementedError(
f"{type(self).__name__} currently only supports 2 objectives."
)
if dim < num_objectives:
raise ValueError(
f"dim must be >= num_objectives, but got {dim} and {num_objectives}"
)
self.num_objectives = num_objectives
self.dim = dim
self._bounds = [(0.0, 1.0) for _ in range(self.dim)]
super().__init__(noise_std=noise_std, negate=negate)
@staticmethod
def _g(X: Tensor) -> Tensor:
return 1 + 9 * X[..., 1:].mean(dim=-1)
class ZDT1(ZDT):
r"""ZDT1 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - sqrt(x_0 / g(x))
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is convex.
"""
_max_hv = 120 + 2 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).sqrt())
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.sqrt()
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT2(ZDT):
r"""ZDT2 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = g(x) * (1 - (x_0 / g(x))^2)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front is concave.
"""
_max_hv = 120 + 1 / 3
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = g * (1 - (f_0 / g).pow(2))
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
f_0 = torch.linspace(
0, 1, n, dtype=self.bounds.dtype, device=self.bounds.device
)
f_1 = 1 - f_0.pow(2)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
class ZDT3(ZDT):
r"""ZDT3 test problem.
d-dimensional problem evaluated on `[0, 1]^d`:
f_0(x) = x_0
f_1(x) = 1 - sqrt(x_0 / g(x)) - x_0 / g * sin(10 * pi * x_0)
g(x) = 1 + 9 / (d - 1) * \sum_{i=1}^{d-1} x_i
The reference point comes from [Yang2019a]_.
The pareto front consists of several discontinuous convex parts.
"""
_max_hv = 128.77811613069076060
_parts = [
# this interval includes both end points
[0, 0.0830015349],
# this interval includes only the right end points
[0.1822287280, 0.2577623634],
[0.4093136748, 0.4538821041],
[0.6183967944, 0.6525117038],
[0.8233317983, 0.8518328654],
]
# nugget to make sure linspace returns elements within the specified range
_eps = 1e-6
def evaluate_true(self, X: Tensor) -> Tensor:
f_0 = X[..., 0]
g = self._g(X=X)
f_1 = 1 - (f_0 / g).sqrt() - f_0 / g * torch.sin(10 * math.pi * f_0)
return torch.stack([f_0, f_1], dim=-1)
def gen_pareto_front(self, n: int) -> Tensor:
n_parts = len(self._parts)
n_per_part = torch.full(
torch.Size([n_parts]),
n // n_parts,
dtype=torch.long,
device=self.bounds.device,
)
left_over = n % n_parts
n_per_part[:left_over] += 1
f_0s = []
for i, p in enumerate(self._parts):
left, right = p
f_0s.append(
torch.linspace(
left + self._eps,
right - self._eps,
n_per_part[i],
dtype=self.bounds.dtype,
device=self.bounds.device,
)
)
f_0 = torch.cat(f_0s, dim=0)
f_1 = 1 - f_0.sqrt() - f_0 * torch.sin(10 * math.pi * f_0)
f_X = torch.stack([f_0, f_1], dim=-1)
if self.negate:
f_X *= -1
return f_X
# ------ Constrained Multi-Objective Test Problems ----- #
class BNH(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained BNH problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.0, 5.0), (0.0, 3.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
return torch.stack(
[4.0 * (X ** 2).sum(dim=-1), ((X - 5.0) ** 2).sum(dim=-1)], dim=-1
)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 25.0 - (X[..., 0] - 5.0) ** 2 - X[..., 1] ** 2
c2 = (X[..., 0] - 8.0) ** 2 + (X[..., 1] + 3.0) ** 2 - 7.7
return torch.stack([c1, c2], dim=-1)
class SRN(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained SRN problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(-20.0, 20.0), (-20.0, 20.0)]
_ref_point = [0.0, 0.0] # TODO: Determine proper reference point
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = 2.0 + ((X - 2.0) ** 2).sum(dim=-1)
obj2 = 9.0 * X[..., 0] - (X[..., 1] - 1.0) ** 2
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 225.0 - ((X ** 2) ** 2).sum(dim=-1)
c2 = -10.0 - X[..., 0] + 3 * X[..., 1]
return torch.stack([c1, c2], dim=-1)
class CONSTR(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""The constrained CONSTR problem.
See [GarridoMerchan2020]_ for more details on this problem. Note that this is a
minimization problem.
"""
dim = 2
num_objectives = 2
num_constraints = 2
_bounds = [(0.1, 10.0), (0.0, 5.0)]
_ref_point = [10.0, 10.0]
def evaluate_true(self, X: Tensor) -> Tensor:
obj1 = X[..., 0]
obj2 = (1.0 + X[..., 1]) / X[..., 0]
return torch.stack([obj1, obj2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
c1 = 9.0 * X[..., 0] + X[..., 1] - 6.0
c2 = 9.0 * X[..., 0] - X[..., 1] - 1.0
return torch.stack([c1, c2], dim=-1)
class ConstrainedBraninCurrin(BraninCurrin, ConstrainedBaseTestProblem):
r"""Constrained Branin Currin Function.
This uses the disk constraint from [Gelbart2014]_.
"""
dim = 2
num_objectives = 2
num_constraints = 1
_bounds = [(0.0, 1.0), (0.0, 1.0)]
_con_bounds = [(-5.0, 10.0), (0.0, 15.0)]
_ref_point = [80.0, 12.0]
_max_hv = 608.4004237022673 # from NSGA-II with 90k evaluations
def __init__(self, noise_std: Optional[float] = None, negate: bool = False) -> None:
super().__init__(noise_std=noise_std, negate=negate)
con_bounds = torch.tensor(self._con_bounds, dtype=torch.float).transpose(-1, -2)
self.register_buffer("con_bounds", con_bounds)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
X_tf = unnormalize(X, self.con_bounds)
return 50 - (X_tf[..., 0:1] - 2.5).pow(2) - (X_tf[..., 1:2] - 7.5).pow(2)
class C2DTLZ2(DTLZ2, ConstrainedBaseTestProblem):
num_constraints = 1
_r = 0.2
# approximate from nsga-ii, TODO: replace with analytic
_max_hv = 0.3996406303723544
def evaluate_slack_true(self, X: Tensor) -> Tensor:
if X.ndim > 2:
raise NotImplementedError("Batch X is not supported.")
f_X = self.evaluate_true(X)
term1 = (f_X - 1).pow(2)
mask = ~(torch.eye(f_X.shape[-1], device=f_X.device).bool())
indices = torch.arange(f_X.shape[1], device=f_X.device).repeat(f_X.shape[1], 1)
indexer = indices[mask].view(f_X.shape[1], f_X.shape[-1] - 1)
term2_inner = (
f_X.unsqueeze(1)
.expand(f_X.shape[0], f_X.shape[-1], f_X.shape[-1])
.gather(dim=-1, index=indexer.repeat(f_X.shape[0], 1, 1))
)
term2 = (term2_inner.pow(2) - self._r ** 2).sum(dim=-1)
min1 = (term1 + term2).min(dim=-1).values
min2 = ((f_X - 1 / math.sqrt(f_X.shape[-1])).pow(2) - self._r ** 2).sum(dim=-1)
return -torch.min(min1, min2).unsqueeze(-1)
class OSY(MultiObjectiveTestProblem, ConstrainedBaseTestProblem):
r"""
The OSY test problem from [Oszycka1995]_.
Implementation from
https://github.com/msu-coinlab/pymoo/blob/master/pymoo/problems/multi/osy.py
Note that this implementation assumes minimization, so please choose negate=True.
"""
dim = 6
num_constraints = 6
num_objectives = 2
_bounds = [
(0.0, 10.0),
(0.0, 10.0),
(1.0, 5.0),
(0.0, 6.0),
(1.0, 5.0),
(0.0, 10.0),
]
_ref_point = [-75.0, 75.0]
def evaluate_true(self, X: Tensor) -> Tensor:
f1 = -(
25 * (X[..., 0] - 2) ** 2
+ (X[..., 1] - 2) ** 2
+ (X[..., 2] - 1) ** 2
+ (X[..., 3] - 4) ** 2
+ (X[..., 4] - 1) ** 2
)
f2 = (X ** 2).sum(-1)
return torch.stack([f1, f2], dim=-1)
def evaluate_slack_true(self, X: Tensor) -> Tensor:
g1 = X[..., 0] + X[..., 1] - 2.0
g2 = 6.0 - X[..., 0] - X[..., 1]
g3 = 2.0 - X[..., 1] + X[..., 0]
g4 = 2.0 - X[..., 0] + 3.0 * X[..., 1]
g5 = 4.0 - (X[..., 2] - 3.0) ** 2 - X[..., 3]
g6 = (X[..., 4] - 3.0) ** 2 + X[..., 5] - 4.0
return torch.stack([g1, g2, g3, g4, g5, g6], dim=-1)
| [
"torch.Size",
"torch.cos",
"torch.cat",
"torch.stack",
"torch.min",
"torch.sin",
"torch.arange",
"torch.split",
"torch.linspace",
"torch.tensor",
"torch.eye",
"torch.exp"
] | 1.5 | NTR0314/botorch | f0310c9a415947f3264dac7f3438744784843323 |
1.2 | import torch
import torch.nn as nn
from torchvision.transforms import ToTensor, ToPILImage
class Generator(nn.Module):
def __init__(self):
super().__init__()
self.conv_block = nn.Sequential(
nn.ConvTranspose2d(100, 512, 4, 1, 0),
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1),
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1),
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, 64, 4, 2, 1),
nn.BatchNorm2d(64),
nn.ReLU(True),
nn.ConvTranspose2d(64, 3, 4, 2, 1),
nn.BatchNorm2d(3),
nn.ReLU(True),
nn.ConvTranspose2d(3, 3, 4, 2, 1),
nn.Tanh(),
)
def forward(self, x):
x = self.conv_block(x)
return x
if __name__ == '__main__':
img = torch.randn(1, 100, 1, 1)
gen = Generator()
print(gen(img).shape)
| [
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.ReLU",
"torch.randn"
] | 1.2.0 | y3sar/painter_gan | 374fb91927ca584b4ef3fd8ba10922c7b5201780 |
1.1 | #!/usr/bin/env python3
import torch
import unittest
from gpytorch.kernels import RBFKernelGrad
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestRBFKernelGrad(unittest.TestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return RBFKernelGrad(**kwargs)
def create_kernel_ard(self, num_dims, **kwargs):
return RBFKernelGrad(ard_num_dims=num_dims, **kwargs)
def test_kernel(self, cuda=False):
a = torch.tensor([[[1, 2], [2, 4]]], dtype=torch.float)
b = torch.tensor([[[1, 3], [0, 4]]], dtype=torch.float)
actual = torch.tensor(
[
[0.35321, 0, -0.73517, 0.0054977, 0.011443, -0.022886],
[0, 0.73517, 0, -0.011443, -0.012374, 0.047633],
[0.73517, 0, -0.79499, 0.022886, 0.047633, -0.083824],
[0.12476, 0.25967, 0.25967, 0.015565, 0.064793, 0],
[-0.25967, -0.2808, -0.54047, -0.064793, -0.23732, 0],
[-0.25967, -0.54047, -0.2808, 0, 0, 0.032396],
]
)
kernel = RBFKernelGrad()
if cuda:
a = a.cuda()
b = b.cuda()
actual = actual.cuda()
kernel = kernel.cuda()
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
def test_kernel_cuda(self):
if torch.cuda.is_available():
self.test_kernel(cuda=True)
def test_kernel_batch(self):
a = torch.tensor([[[1, 2, 3], [2, 4, 0]], [[-1, 1, 2], [2, 1, 4]]], dtype=torch.float)
b = torch.tensor([[[1, 3, 1]], [[2, -1, 0]]], dtype=torch.float).repeat(1, 2, 1)
kernel = RBFKernelGrad()
res = kernel(a, b).evaluate()
# Compute each batch separately
actual = torch.zeros(2, 8, 8)
actual[0, :, :] = kernel(a[0, :, :].squeeze(), b[0, :, :].squeeze()).evaluate()
actual[1, :, :] = kernel(a[1, :, :].squeeze(), b[1, :, :].squeeze()).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
def test_initialize_lengthscale(self):
kernel = RBFKernelGrad()
kernel.initialize(lengthscale=3.14)
actual_value = torch.tensor(3.14).view_as(kernel.lengthscale)
self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)
def test_initialize_lengthscale_batch(self):
kernel = RBFKernelGrad(batch_shape=torch.Size([2]))
ls_init = torch.tensor([3.14, 4.13])
kernel.initialize(lengthscale=ls_init)
actual_value = ls_init.view_as(kernel.lengthscale)
self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)
if __name__ == "__main__":
unittest.main()
| [
"torch.zeros",
"torch.Size",
"torch.norm",
"torch.cuda.is_available",
"torch.tensor"
] | 1.1 | techshot25/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 |
1.1 | #!/usr/bin/env python3
import warnings
import torch
def psd_safe_cholesky(A, upper=False, out=None, jitter=None):
"""Compute the Cholesky decomposition of A. If A is only p.s.d, add a small jitter to the diagonal.
Args:
:attr:`A` (Tensor):
The tensor to compute the Cholesky decomposition of
:attr:`upper` (bool, optional):
See torch.cholesky
:attr:`out` (Tensor, optional):
See torch.cholesky
:attr:`jitter` (float, optional):
The jitter to add to the diagonal of A in case A is only p.s.d. If omitted, chosen
as 1e-6 (float) or 1e-8 (double)
"""
try:
L = torch.cholesky(A, upper=upper, out=out)
# TODO: Remove once fixed in pytorch (#16780)
if A.dim() > 2 and A.is_cuda:
if torch.isnan(L if out is None else out).any():
raise RuntimeError("cholesky_cuda: singular U.")
return L
except RuntimeError as e:
if jitter is None:
jitter = 1e-6 if A.dtype == torch.float32 else 1e-8
Aprime = A.clone()
jitter_prev = 0
for i in range(3):
jitter_new = jitter * (10 ** i)
Aprime.diagonal(dim1=-2, dim2=-1).add_(jitter_new - jitter_prev)
jitter_prev = jitter_new
try:
L = torch.cholesky(Aprime, upper=upper, out=out)
# TODO: Remove once fixed in pytorch (#16780)
if A.dim() > 2 and A.is_cuda:
if torch.isnan(L if out is None else out).any():
raise RuntimeError("cholesky_cuda: singular U.")
warnings.warn(f"A not p.d., added jitter of {jitter_new} to the diagonal", RuntimeWarning)
return L
except RuntimeError:
continue
raise e
| [
"torch.cholesky",
"torch.isnan"
] | 1.1 | techshot25/gpytorch | b4aee6f81a3428172d4914e7e0fef0e71cd1f519 |
1.1 | #!/usr/bin/env python3
import torch
import unittest
from gpytorch.kernels import PolynomialKernel
from gpytorch.test.base_kernel_test_case import BaseKernelTestCase
class TestPolynomialKernel(unittest.TestCase, BaseKernelTestCase):
def create_kernel_no_ard(self, **kwargs):
return PolynomialKernel(power=2, **kwargs)
def test_computes_quadratic_kernel(self):
a = torch.tensor([[4, 1], [2, 2], [8, 0]], dtype=torch.float)
b = torch.tensor([[0, 0], [2, 1], [1, 0]], dtype=torch.float)
kernel = PolynomialKernel(power=2)
kernel.eval()
actual = torch.zeros(3, 3)
for i in range(3):
for j in range(3):
actual[i, j] = (a[i].matmul(b[j]) + kernel.offset).pow(kernel.power)
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# diag
res = kernel(a, b).diag()
actual = actual.diag()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims
actual = torch.zeros(2, 3, 3)
for l in range(2):
actual[l] = kernel(a[:, l].unsqueeze(-1), b[:, l].unsqueeze(-1)).evaluate()
res = kernel(a, b, last_dim_is_batch=True).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims + diag
res = kernel(a, b, last_dim_is_batch=True).diag()
actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
self.assertLess(torch.norm(res - actual), 1e-5)
def test_computes_cubic_kernel(self):
a = torch.tensor([[4, 1], [2, 2], [8, 0]], dtype=torch.float)
b = torch.tensor([[0, 0], [2, 1], [1, 0]], dtype=torch.float)
kernel = PolynomialKernel(power=3)
kernel.eval()
actual = torch.zeros(3, 3)
for i in range(3):
for j in range(3):
actual[i, j] = (a[i].matmul(b[j]) + kernel.offset).pow(kernel.power)
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# diag
res = kernel(a, b).diag()
actual = actual.diag()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims
actual = torch.zeros(2, 3, 3)
for l in range(2):
actual[l] = kernel(a[:, l].unsqueeze(-1), b[:, l].unsqueeze(-1)).evaluate()
res = kernel(a, b, last_dim_is_batch=True).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
# batch_dims + diag
res = kernel(a, b, last_dim_is_batch=True).diag()
actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
self.assertLess(torch.norm(res - actual), 1e-5)
def test_quadratic_kernel_batch(self):
a = torch.tensor([[4, 2, 8], [1, 2, 3]], dtype=torch.float).view(2, 3, 1)
b = torch.tensor([[0, 2, 1], [-1, 2, 0]], dtype=torch.float).view(2, 3, 1)
kernel = PolynomialKernel(power=2, batch_shape=torch.Size([2])).initialize(offset=torch.rand(2, 1))
kernel.eval()
actual = torch.zeros(2, 3, 3)
for k in range(2):
for i in range(3):
for j in range(3):
actual[k, i, j] = (a[k, i].matmul(b[k, j]) + kernel.offset[k]).pow(kernel.power)
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
def test_cubic_kernel_batch(self):
a = torch.tensor([[4, 2, 8], [1, 2, 3]], dtype=torch.float).view(2, 3, 1)
b = torch.tensor([[0, 2, 1], [-1, 2, 0]], dtype=torch.float).view(2, 3, 1)
kernel = PolynomialKernel(power=3, batch_shape=torch.Size([2])).initialize(offset=torch.rand(2, 1))
kernel.eval()
actual = torch.zeros(2, 3, 3)
for k in range(2):
for i in range(3):
for j in range(3):
actual[k, i, j] = (a[k, i].matmul(b[k, j]) + kernel.offset[k]).pow(kernel.power)
res = kernel(a, b).evaluate()
self.assertLess(torch.norm(res - actual), 1e-5)
if __name__ == "__main__":
unittest.main()
| [
"torch.zeros",
"torch.rand",
"torch.Size",
"torch.norm",
"torch.tensor"
] | 1.1 | techshot25/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 |
1.1 | #!/usr/bin/env python3
import torch
import warnings
from .kernel import Kernel
from ..lazy import MatmulLazyTensor, RootLazyTensor
from ..constraints import Positive
class LinearKernel(Kernel):
r"""
Computes a covariance matrix based on the Linear kernel
between inputs :math:`\mathbf{x_1}` and :math:`\mathbf{x_2}`:
.. math::
\begin{equation*}
k_\text{Linear}(\mathbf{x_1}, \mathbf{x_2}) = v\mathbf{x_1}^\top
\mathbf{x_2}.
\end{equation*}
where
* :math:`v` is a :attr:`variance` parameter.
.. note::
To implement this efficiently, we use a :obj:`gpytorch.lazy.RootLazyTensor` during training and a
:class:`gpytorch.lazy.MatmulLazyTensor` during test. These lazy tensors represent matrices of the form
:math:`K = XX^{\top}` and :math:`K = XZ^{\top}`. This makes inference
efficient because a matrix-vector product :math:`Kv` can be computed as
:math:`Kv=X(X^{\top}v)`, where the base multiply :math:`Xv` takes only
:math:`O(nd)` time and space.
Args:
:attr:`variance_prior` (:class:`gpytorch.priors.Prior`):
Prior over the variance parameter (default `None`).
:attr:`variance_constraint` (Constraint, optional):
Constraint to place on variance parameter. Default: `Positive`.
:attr:`active_dims` (list):
List of data dimensions to operate on.
`len(active_dims)` should equal `num_dimensions`.
"""
def __init__(
self,
num_dimensions=None,
offset_prior=None,
variance_prior=None,
variance_constraint=None,
**kwargs
):
super(LinearKernel, self).__init__(**kwargs)
if variance_constraint is None:
variance_constraint = Positive()
if num_dimensions is not None:
warnings.warn(
"The `num_dimensions` argument is deprecated and no longer used.",
DeprecationWarning
)
self.register_parameter(
name="offset",
parameter=torch.nn.Parameter(torch.zeros(1, 1, num_dimensions))
)
if offset_prior is not None:
warnings.warn(
"The `offset_prior` argument is deprecated and no longer used.",
DeprecationWarning
)
self.register_parameter(
name="raw_variance", parameter=torch.nn.Parameter(torch.zeros(*self.batch_shape, 1, 1))
)
if variance_prior is not None:
self.register_prior(
"variance_prior",
variance_prior,
lambda: self.variance,
lambda v: self._set_variance(v)
)
self.register_constraint("raw_variance", variance_constraint)
@property
def variance(self):
return self.raw_variance_constraint.transform(self.raw_variance)
@variance.setter
def variance(self, value):
self._set_variance(value)
def _set_variance(self, value):
if not torch.is_tensor(value):
value = torch.as_tensor(value).to(self.raw_variance)
self.initialize(raw_variance=self.raw_variance_constraint.inverse_transform(value))
def forward(self, x1, x2, diag=False, last_dim_is_batch=False, **params):
x1_ = x1 * self.variance.sqrt()
if last_dim_is_batch:
x1_ = x1_.transpose(-1, -2).unsqueeze(-1)
if x1.size() == x2.size() and torch.equal(x1, x2):
# Use RootLazyTensor when x1 == x2 for efficiency when composing
# with other kernels
prod = RootLazyTensor(x1_)
else:
x2_ = x2 * self.variance.sqrt()
if last_dim_is_batch:
x2_ = x2_.transpose(-1, -2).unsqueeze(-1)
prod = MatmulLazyTensor(x1_, x2_.transpose(-2, -1))
if diag:
return prod.diag()
else:
return prod
| [
"torch.is_tensor",
"torch.as_tensor",
"torch.equal",
"torch.zeros"
] | 1.1 | techshot25/gpytorch | 092d523027a844939ba85d7ea8c8c7b7511843d5 |
0.4 | # coding: utf-8
import torch
from torch import nn
import math
import numpy as np
from torch.nn import functional as F
def position_encoding_init(n_position, d_pos_vec, position_rate=1.0,
sinusoidal=True):
''' Init the sinusoid position encoding table '''
# keep dim 0 for padding token position encoding zero vector
position_enc = np.array([
[position_rate * pos / np.power(10000, 2 * (i // 2) / d_pos_vec) for i in range(d_pos_vec)]
if pos != 0 else np.zeros(d_pos_vec) for pos in range(n_position)])
position_enc = torch.from_numpy(position_enc).float()
if sinusoidal:
position_enc[1:, 0::2] = torch.sin(position_enc[1:, 0::2]) # dim 2i
position_enc[1:, 1::2] = torch.cos(position_enc[1:, 1::2]) # dim 2i+1
return position_enc
def sinusoidal_encode(x, w):
y = w * x
y[1:, 0::2] = torch.sin(y[1:, 0::2].clone())
y[1:, 1::2] = torch.cos(y[1:, 1::2].clone())
return y
class SinusoidalEncoding(nn.Embedding):
def __init__(self, num_embeddings, embedding_dim,
*args, **kwargs):
super(SinusoidalEncoding, self).__init__(num_embeddings, embedding_dim,
padding_idx=0,
*args, **kwargs)
self.weight.data = position_encoding_init(num_embeddings, embedding_dim,
position_rate=1.0,
sinusoidal=False)
def forward(self, x, w=1.0):
isscaler = np.isscalar(w)
assert self.padding_idx is not None
if isscaler or w.size(0) == 1:
weight = sinusoidal_encode(self.weight, w)
return F.embedding(
x, weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse)
else:
# TODO: cannot simply apply for batch
# better to implement efficient function
pe = []
for batch_idx, we in enumerate(w):
weight = sinusoidal_encode(self.weight, we)
pe.append(F.embedding(
x[batch_idx], weight, self.padding_idx, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.sparse))
pe = torch.stack(pe)
return pe
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
ctx.mark_shared_storage((x, res))
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
def Linear(in_features, out_features, dropout=0):
"""Weight-normalized Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features)
m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def Embedding(num_embeddings, embedding_dim, padding_idx, std=0.01):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
m.weight.data.normal_(0, std)
return m
def Conv1d(in_channels, out_channels, kernel_size, dropout=0, std_mul=4.0, **kwargs):
from .conv import Conv1d
m = Conv1d(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return nn.utils.weight_norm(m)
def ConvTranspose1d(in_channels, out_channels, kernel_size, dropout=0,
std_mul=1.0, **kwargs):
m = nn.ConvTranspose1d(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((std_mul * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
m.weight.data.normal_(mean=0, std=std)
m.bias.data.zero_()
return nn.utils.weight_norm(m)
class Conv1dGLU(nn.Module):
"""(Dilated) Conv1d + Gated linear unit + (optionally) speaker embedding
"""
def __init__(self, n_speakers, speaker_embed_dim,
in_channels, out_channels, kernel_size,
dropout, padding=None, dilation=1, causal=False, residual=False,
*args, **kwargs):
super(Conv1dGLU, self).__init__()
self.dropout = dropout
self.residual = residual
if padding is None:
# no future time stamps available
if causal:
padding = (kernel_size - 1) * dilation
else:
padding = (kernel_size - 1) // 2 * dilation
self.causal = causal
self.conv = Conv1d(in_channels, 2 * out_channels, kernel_size,
dropout=dropout, padding=padding, dilation=dilation,
*args, **kwargs)
if n_speakers > 1:
self.speaker_proj = Linear(speaker_embed_dim, out_channels)
else:
self.speaker_proj = None
def forward(self, x, speaker_embed=None):
return self._forward(x, speaker_embed, False)
def incremental_forward(self, x, speaker_embed=None):
return self._forward(x, speaker_embed, True)
def _forward(self, x, speaker_embed, is_incremental):
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
if is_incremental:
splitdim = -1
x = self.conv.incremental_forward(x)
else:
splitdim = 1
x = self.conv(x)
# remove future time steps
x = x[:, :, :residual.size(-1)] if self.causal else x
a, b = x.split(x.size(splitdim) // 2, dim=splitdim)
if self.speaker_proj is not None:
softsign = F.softsign(self.speaker_proj(speaker_embed))
# Since conv layer assumes BCT, we need to transpose
softsign = softsign if is_incremental else softsign.transpose(1, 2)
a = a + softsign
x = a * torch.sigmoid(b)
return (x + residual) * math.sqrt(0.5) if self.residual else x
def clear_buffer(self):
self.conv.clear_buffer()
class HighwayConv1d(nn.Module):
"""Weight normzlized Conv1d + Highway network (support incremental forward)
"""
def __init__(self, in_channels, out_channels, kernel_size=1, padding=None,
dilation=1, causal=False, dropout=0, std_mul=None, glu=False):
super(HighwayConv1d, self).__init__()
if std_mul is None:
std_mul = 4.0 if glu else 1.0
if padding is None:
# no future time stamps available
if causal:
padding = (kernel_size - 1) * dilation
else:
padding = (kernel_size - 1) // 2 * dilation
self.causal = causal
self.dropout = dropout
self.glu = glu
self.conv = Conv1d(in_channels, 2 * out_channels,
kernel_size=kernel_size, padding=padding,
dilation=dilation, dropout=dropout,
std_mul=std_mul)
def forward(self, x):
return self._forward(x, False)
def incremental_forward(self, x):
return self._forward(x, True)
def _forward(self, x, is_incremental):
"""Forward
Args:
x: (B, in_channels, T)
returns:
(B, out_channels, T)
"""
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
if is_incremental:
splitdim = -1
x = self.conv.incremental_forward(x)
else:
splitdim = 1
x = self.conv(x)
# remove future time steps
x = x[:, :, :residual.size(-1)] if self.causal else x
if self.glu:
x = F.glu(x, dim=splitdim)
return (x + residual) * math.sqrt(0.5)
else:
a, b = x.split(x.size(splitdim) // 2, dim=splitdim)
T = torch.sigmoid(b)
return (T * a + (1 - T) * residual)
def clear_buffer(self):
self.conv.clear_buffer()
def get_mask_from_lengths(memory, memory_lengths):
"""Get mask tensor from list of length
Args:
memory: (batch, max_time, dim)
memory_lengths: array like
"""
mask = memory.data.new(memory.size(0), memory.size(1)).byte().zero_()
for idx, l in enumerate(memory_lengths):
mask[idx][:l] = 1
return ~mask
| [
"torch.nn.Linear",
"torch.cos",
"torch.sigmoid",
"torch.nn.functional.glu",
"torch.nn.ConvTranspose1d",
"torch.stack",
"torch.sin",
"torch.nn.functional.dropout",
"torch.from_numpy",
"torch.nn.functional.embedding",
"torch.nn.utils.weight_norm",
"torch.nn.Embedding"
] | 0.4.0 | tripzero/deepvoice3_pytorch | 90027d27dab2889d856f9db9ffaf39d4f70b3067 |
0.4 | import os
import json
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from collections import OrderedDict
from sg2im.utils import timeit, bool_flag, LossManager
from sg2im.utils import int_tuple, float_tuple, str_tuple
from sg2im.data.vg import SequenceTransformerVgSceneGraphDataset
import pytorch_lightning as pl
from transformers import (
BertTokenizerFast,
BertTokenizer,
EncoderDecoderModel,
EncoderDecoderConfig,
AutoModel,
BertForSequenceClassification,
)
from pytorch_lightning.plugins import DDPPlugin
VG_DIR = os.path.expanduser('datasets/vg')
COCO_DIR = os.path.expanduser('datasets/coco')
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true', default=False)
parser.add_argument('--dataset', default='coco', choices=['vg', 'coco'])
parser.add_argument('--scene_graphs_json', default='scene_graphs/figure_6_sheep.json')
parser.add_argument('--load_checkpoint', default="")
# Optimization hyperparameters
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--num_iterations', default=1000000, type=int)
parser.add_argument('--learning_rate', default=1e-5, type=float)
parser.add_argument('--gpus', default=1, type=int)
# Switch the generator to eval mode after this many iterations
parser.add_argument('--eval_mode_after', default=100000, type=int)
# Dataset options common to both VG and COCO
parser.add_argument('--image_size', default='64,64', type=int_tuple)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=1024, type=int)
parser.add_argument('--shuffle_val', default=True, type=bool_flag)
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--include_relationships', default=True, type=bool_flag)
# VG-specific options
parser.add_argument('--vg_image_dir', default=os.path.join(VG_DIR, 'images'))
parser.add_argument('--train_h5', default=os.path.join(VG_DIR, 'train.h5'))
parser.add_argument('--val_h5', default=os.path.join(VG_DIR, 'val.h5'))
parser.add_argument('--vocab_json', default=os.path.join(VG_DIR, 'vocab.json'))
parser.add_argument('--max_objects_per_image', default=10, type=int)
parser.add_argument('--vg_use_orphaned_objects', default=True, type=bool_flag)
# COCO-specific options
parser.add_argument('--coco_train_image_dir',
default=os.path.join(COCO_DIR, 'images/train2017'))
parser.add_argument('--coco_val_image_dir',
default=os.path.join(COCO_DIR, 'images/val2017'))
parser.add_argument('--coco_train_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_train2017.json'))
parser.add_argument('--coco_train_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_train2017.json'))
parser.add_argument('--coco_val_instances_json',
default=os.path.join(COCO_DIR, 'annotations/instances_val2017.json'))
parser.add_argument('--coco_val_stuff_json',
default=os.path.join(COCO_DIR, 'annotations/stuff_val2017.json'))
parser.add_argument('--instance_whitelist', default=None, type=str_tuple)
parser.add_argument('--stuff_whitelist', default=None, type=str_tuple)
parser.add_argument('--coco_include_other', default=False, type=bool_flag)
parser.add_argument('--min_object_size', default=0.02, type=float)
parser.add_argument('--min_objects_per_image', default=3, type=int)
parser.add_argument('--coco_stuff_only', default=True, type=bool_flag)
parser.add_argument('--max_lengths_for_image', default=1024, type=int)
# Generator options
parser.add_argument('--mask_size', default=16, type=int) # Set this to 0 to use no masks
parser.add_argument('--embedding_dim', default=128, type=int)
parser.add_argument('--gconv_dim', default=128, type=int)
parser.add_argument('--gconv_hidden_dim', default=512, type=int)
parser.add_argument('--gconv_num_layers', default=5, type=int)
parser.add_argument('--mlp_normalization', default='none', type=str)
parser.add_argument('--refinement_network_dims', default='1024,512,256,128,64', type=int_tuple)
parser.add_argument('--normalization', default='batch')
parser.add_argument('--activation', default='leakyrelu-0.2')
parser.add_argument('--layout_noise_dim', default=32, type=int)
parser.add_argument('--use_boxes_pred_after', default=-1, type=int)
# Generator losses
parser.add_argument('--mask_loss_weight', default=0, type=float)
parser.add_argument('--l1_pixel_loss_weight', default=1.0, type=float)
parser.add_argument('--bbox_pred_loss_weight', default=10, type=float)
parser.add_argument('--predicate_pred_loss_weight', default=0, type=float) # DEPRECATED
# Generic discriminator options
parser.add_argument('--discriminator_loss_weight', default=0.01, type=float)
parser.add_argument('--gan_loss_type', default='gan')
parser.add_argument('--d_clip', default=None, type=float)
parser.add_argument('--d_normalization', default='batch')
parser.add_argument('--d_padding', default='valid')
parser.add_argument('--d_activation', default='leakyrelu-0.2')
# Object discriminator
parser.add_argument('--d_obj_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--crop_size', default=32, type=int)
parser.add_argument('--d_obj_weight', default=1.0, type=float) # multiplied by d_loss_weight
parser.add_argument('--ac_loss_weight', default=0.1, type=float)
# Image discriminator
parser.add_argument('--d_img_arch',
default='C4-64-2,C4-128-2,C4-256-2')
parser.add_argument('--d_img_weight', default=1.0, type=float) # multiplied by d_loss_weight
# Output options
parser.add_argument('--print_every', default=10, type=int)
parser.add_argument('--timing', default=False, type=bool_flag)
parser.add_argument('--checkpoint_every', default=10000, type=int)
parser.add_argument('--output_dir', default=os.getcwd())
parser.add_argument('--checkpoint_name', default='checkpoint')
parser.add_argument('--checkpoint_start_from', default=None)
parser.add_argument('--restore_from_checkpoint', default=False, type=bool_flag)
class VGDataModule(pl.LightningDataModule):
def __init__(self, args, tokenizer, num_workers=8):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.num_workers = num_workers
self.batch_size = args.batch_size
def setup(self, stage=None):
args = self.args
with open(args.vocab_json, 'r') as f:
vocab = json.load(f)
dset_kwargs = {
'vocab': vocab,
'h5_path': args.train_h5,
'image_dir': args.vg_image_dir,
'image_size': args.image_size,
'max_samples': args.num_train_samples,
'max_objects': args.max_objects_per_image,
'use_orphaned_objects': args.vg_use_orphaned_objects,
'include_relationships': args.include_relationships,
'max_lengths_for_image': args.max_lengths_for_image
}
train_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
# iter_per_epoch = len(train_dset) // args.batch_size
# print('There are %d iterations per epoch' % iter_per_epoch)
dset_kwargs['h5_path'] = args.val_h5
del dset_kwargs['max_samples']
val_dset = SequenceTransformerVgSceneGraphDataset(
**dset_kwargs, tokenizer=self.tokenizer
)
self.train_dset = train_dset
self.val_dset = val_dset
def train_dataloader(self):
return DataLoader(
self.train_dset, batch_size=self.batch_size, num_workers=self.num_workers, shuffle=True
)
def val_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.val_dset, batch_size=self.batch_size, num_workers=self.num_workers)
class Discriminator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = BertForSequenceClassification.from_pretrained(backbone)
def forward(self, *args, **kwargs):
outputs = self.backbone(*args, **kwargs)
return outputs["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.bert.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class Generator(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = EncoderDecoderModel.from_encoder_decoder_pretrained(
backbone, backbone, tie_encoder_decoder=True
)
def forward(self, *args, **kwargs):
return self.backbone(*args, **kwargs)
def forward_logits(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["logits"]
def forward_loss(self, *args, **kwargs):
return self.backbone(*args, **kwargs)["loss"]
def apply_word_embeddings(self, inputs):
"""
Because Gumbel softmax outputs cannot directly feed to huggingface model,
we have to compute the `input_embed` manually.
"""
word_embeddings = self.backbone.encoder.embeddings.word_embeddings
return torch.matmul(inputs, word_embeddings.weight)
class GAN(pl.LightningModule):
def __init__(
self,
args,
tokenizer,
backbone=None,
):
super().__init__()
self.args = args
self.validation_z = torch.randn(8, 100)
self.tokenizer = tokenizer
self.discriminator = Discriminator(backbone)
self.generator = Generator(backbone)
self.graph_special_token = "[graph]"
self.image_special_token = "[image]"
self.tau = 1
self.image_token_id_list, self.text_token_id_list = self.retrieve_bad_image_text_tokens_ids()
def retrieve_bad_image_text_tokens_ids(self):
special_tokens_list = ["[CLS]", "[SEP]"]
image_tokens_list = [f"[itoken{i}]" for i in range(512)]
extra_image_tokens_list = [f"[itoken{i}]" for i in range(512, 32 * 32)]
vocab = self.tokenizer.get_vocab()
special_tokens_id_list = [vocab[token] for token in special_tokens_list]
image_token_id_list = [vocab[token] for token in image_tokens_list]
extra_image_tokens_id_list = [vocab[token] for token in extra_image_tokens_list]
text_token_id_list = [v for k, v in vocab.items()]
text_token_id_list = \
list(set(text_token_id_list) - set(image_token_id_list) - set(extra_image_tokens_id_list))
return image_token_id_list + extra_image_tokens_id_list, text_token_id_list + extra_image_tokens_id_list
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy_with_logits(y_hat, y)
def training_step(self, batch, batch_idx, optimizer_idx):
# sample noise
# z = torch.randn(imgs.shape[0], self.hparams.latent_dim)
# z = z.type_as(imgs)
generator_batch = {
"input_ids": batch["sent_input/input_ids"],
"attention_mask": batch["sent_input/attention_mask"],
"decoder_input_ids": batch["code_output/input_ids"],
"decoder_attention_mask": batch["code_output/attention_mask"],
"labels": batch["code_output/input_ids"].clone()
}
# exlude the loss for padding tokens
generator_batch["labels"][generator_batch["labels"] == self.tokenizer.pad_token_id] = -100
# train generator
if optimizer_idx == 0:
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# log sampled images
# sample_imgs = self.generated_imgs[:6]
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, 0)
# ground truth result (ie: all fake)
# put on GPU because we created this tensor inside training_loop
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
ac_loss = self.generator.forward_loss(**fake_batch)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions_embedding.shape[0]).type_as(predictions_embedding).long()
}
g_d_loss = self.discriminator(**fake_dis_batch)
g_loss = g_d_loss + ac_loss
# g_loss = ac_loss
self.log('g_ac_loss', ac_loss, prog_bar=True)
self.log('g_d_loss', g_d_loss, prog_bar=True)
# return {"loss": g_loss}
# train discriminator (inverse generator)
# if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
# don't compute the gradients of the generator
predictions = predictions.detach()
predictions_embedding = self.generator.apply_word_embeddings(predictions)
fake_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
fake_batch["labels"][fake_batch["labels"] == self.tokenizer.pad_token_id] = -100
fake_ac_loss = self.generator.forward_loss(**fake_batch)
# For real data
real_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"decoder_input_ids": batch["sent_output/input_ids"],
"decoder_attention_mask": batch["sent_output/attention_mask"],
"labels": batch["sent_output/input_ids"].clone()
}
real_batch["labels"][real_batch["labels"] == self.tokenizer.pad_token_id] = -100
real_ac_loss = self.generator.forward_loss(**real_batch)
ac_loss = (real_ac_loss + fake_ac_loss) / 2
self.log('ac_loss', ac_loss, prog_bar=True)
# return {"loss": ac_loss}
return g_loss + ac_loss
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
logits = self.generator.forward_logits(**generator_batch)
# don't compute the gradients of the generator
predictions = F.gumbel_softmax(logits, tau=self.tau, hard=True, dim=-1)
predictions_embedding = self.discriminator.apply_word_embeddings(predictions)
fake_dis_batch = {
"inputs_embeds": predictions_embedding,
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.zeros(predictions.shape[0]).type_as(predictions).long()
}
fake_loss = self.discriminator(**fake_dis_batch)
# fake = torch.zeros(fake_preds.shape)
# fake = fake.type_as(fake_preds)
# fake_loss = self.adversarial_loss(fake_preds, fake)
real_dis_batch = {
"input_ids": batch["code_output/input_ids"],
"attention_mask": batch["code_output/attention_mask"],
"labels": torch.ones(predictions.shape[0]).type_as(predictions).long()
}
real_loss = self.discriminator(**real_dis_batch)
# real = torch.ones(real_preds.shape)
# real = real.type_as(real_preds)
# real_loss = self.adversarial_loss(real_preds, real)
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
self.log('d_loss', d_loss, prog_bar=True)
return d_loss
def configure_optimizers(self):
lr = self.args.learning_rate
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(0.5, 0.999))
opt_d = torch.optim.Adam(
self.discriminator.parameters(),
lr=lr,
betas=(0.5, 0.999)
)
return [opt_g, opt_d], []
# def on_epoch_end(self):
# z = self.validation_z.type_as(self.generator.model[0].weight)
# # log sampled images
# sample_imgs = self(z)
# grid = torchvision.utils.make_grid(sample_imgs)
# self.logger.experiment.add_image('generated_images', grid, self.current_epoch)
def test_step(self, batch, batch_idx):
pass
def inference(self, scene_graphs_json):
scene_graphs = self.read_scene_graphs(scene_graphs_json)
image_tokens_generation = self.generator.backbone.generate(
scene_graphs["input_ids"],
max_length=66,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids] for ids in self.text_token_id_list],
)
print(image_tokens_generation)
output = []
for data in image_tokens_generation:
output.append(self.tokenizer.decode(data, skip_special_tokens=True))
print(output[-1])
reconstructed_graph = self.generator.backbone.generate(
image_tokens_generation,
max_length=64,
# num_beams=5,
# no_repeat_ngram_size=2,
# early_stopping=True,
do_sample=True,
top_p=0.92,
top_k=0,
decoder_start_token_id=self.generator.backbone.config.decoder.pad_token_id,
bad_words_ids=[[ids]for ids in self.image_token_id_list],
)
for data in reconstructed_graph:
print(self.tokenizer.decode(data, skip_special_tokens=True))
if not os.path.exists(self.args.output_dir):
os.makedirs(self.args.output_dir)
itokens_output_file = os.path.join(self.args.output_dir, "itokens_output.json")
with open(itokens_output_file, "w") as f:
json.dump(output, f, indent=2)
def read_scene_graphs(self, scene_graphs_json):
with open(scene_graphs_json, 'r') as f:
scene_graphs = json.load(f)
if isinstance(scene_graphs, dict):
# We just got a single scene graph, so promote it to a list
scene_graphs = [scene_graphs]
objs, triples, obj_to_img = [], [], []
obj_offset = 0
sents_list = []
for i, sg in enumerate(scene_graphs):
# Insert dummy __image__ object and __in_image__ relationships
sents = []
for s, p, o in sg['relationships']:
sent = f"{sg['objects'][s]} {p} {sg['objects'][o]}."
sents.append(sent)
sent = " ".join(sents)
sent = f"{self.graph_special_token} {sent} {self.image_special_token}"
sents_list.append(sent)
print(sent)
sent_tensor = self.tokenizer(
sents_list,
return_tensors="pt",
padding="max_length",
max_length=64,
truncation=True,
add_special_tokens=False
)
device = next(self.parameters()).device
sent_tensor = {k: v.to(device) for k, v in sent_tensor.items()}
return sent_tensor
def main(args):
backbone = "bert-base-uncased-itokens"
tokenizer = BertTokenizerFast.from_pretrained(backbone)
# encoder_decoder_config = EncoderDecoderConfig.from_pretrained("bert-base-uncased-itokens")
# model = EncoderDecoderModel.from_pretrained(
# "bert-base-uncased-itokens", config=encoder_decoder_config
# )
# model = EncoderDecoderModel.from_encoder_decoder_pretrained(
# "bert-base-uncased-itokens", "bert-base-uncased-itokens", tie_encoder_decoder=True
# )
# generator = Generator(model)
# discriminator = Discriminator(
# AutoModel.from_pretrained("bert-base-uncased-itokens")
# )
if args.test:
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
model.cuda()
model.eval()
model.inference(args.scene_graphs_json)
return
# train
if args.gpus > 1:
dm = VGDataModule(args, tokenizer, 2)
else:
dm = VGDataModule(args, tokenizer)
if args.load_checkpoint != "":
model = GAN.load_from_checkpoint(
args.load_checkpoint,
args=args,
tokenizer=tokenizer,
backbone=backbone
)
else:
model = GAN(args, tokenizer, backbone)
training_args = {
"gpus": args.gpus,
"fast_dev_run": False,
"max_steps": args.num_iterations,
"precision": 32,
"gradient_clip_val": 1,
}
if args.gpus > 1:
additional_args = {
"accelerator": "ddp",
"plugins": [DDPPlugin(find_unused_parameters=True)]
# "plugins": [my_ddp]
}
training_args.update(additional_args)
trainer = pl.Trainer(**training_args)
trainer.fit(model, dm)
if __name__ == "__main__":
args = parser.parse_args()
main(args) | [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.gumbel_softmax",
"torch.zeros",
"torch.ones",
"torch.utils.data.DataLoader",
"torch.matmul",
"torch.randn"
] | 0.4.0 | louis2889184/sg2im | 6df2095bf58703c7d6d74bf47535a7cf45690bc0 |
1.5 | import copy
from dataclasses import dataclass
from typing import List, Optional
import torch
from torch.nn import CrossEntropyLoss, Module
from torch.utils.data import DataLoader
def federated_averaging(models: List[Module]) -> Module:
global_model = copy.deepcopy(models[0])
global_weights = global_model.state_dict()
local_weights = [m.state_dict() for m in models]
for k in global_weights.keys():
for i in range(1, len(local_weights)):
global_weights[k] += local_weights[i][k]
global_weights[k] = torch.div(global_weights[k], len(local_weights))
global_model.load_state_dict(global_weights)
return global_model
class ModelAccumulator:
def __init__(self):
self.model_counter: int = 0
self.global_model = None
self.global_weights = None
def update(self, model):
local_weights = model.state_dict()
if self.global_model is None:
self.global_model = model
self.global_weights = local_weights
self.model_counter += 1
else:
for k in self.global_weights.keys():
self.global_weights[k] += local_weights[k]
self.model_counter += 1
def get(self):
for k in self.global_weights.keys():
self.global_weights[k] = torch.div(
self.global_weights[k], self.model_counter
)
self.global_model.load_state_dict(self.global_weights)
return self.global_model
def reset(self):
self.global_model = None
self.global_weights = None
self.model_counter = 0
@dataclass
class EdgeDeviceSettings:
batch_size: int
epochs: int
learning_rate: float
learning_rate_decay: float
device: str
@dataclass
class TrainingResult:
loss: float
steps: int
learning_rate: float
class EdgeDevice:
def __init__(
self, device_id: int, settings: EdgeDeviceSettings, data_loader: DataLoader
):
self.device_id = device_id
self._data_loader = data_loader
self.setting = copy.deepcopy(settings)
self._loss_func = CrossEntropyLoss()
self._model: Optional[Module] = None
def download(self, model: Module):
self._model = copy.deepcopy(model)
def upload(self) -> Module:
if self._model is not None:
return copy.deepcopy(self._model)
else:
raise ValueError("Model not found on this device!")
def train(self) -> TrainingResult:
if self._data_loader is None:
raise ValueError("Dataset not found on this device!")
self._model.train()
self.setting.learning_rate = (
self.setting.learning_rate * self.setting.learning_rate_decay
)
optimizer = torch.optim.SGD(
params=self._model.parameters(), lr=self.setting.learning_rate
)
epoch_loss = []
local_steps: int = 0
for _ in range(self.setting.epochs):
batch_loss = []
for i_batch, (images, labels) in enumerate(self._data_loader):
self._model.zero_grad()
images = images.to(self.setting.device)
labels = labels.to(self.setting.device)
logits = self._model(images)
loss = self._loss_func(logits, labels)
loss.backward()
optimizer.step()
local_steps += 1
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss) / len(batch_loss))
mean_loss = sum(epoch_loss) / len(epoch_loss)
return TrainingResult(
loss=mean_loss, steps=local_steps, learning_rate=self.setting.learning_rate
)
| [
"torch.div",
"torch.nn.CrossEntropyLoss"
] | 1.5.0 | dawidkski/federated-faceid | 95b1f4b7da0e8baf1cac35edf3b49528c650c491 |
0.4 | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.enc_rnn_size, opt.dropout, embeddings,
opt.bridge)
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
if model_opt.model_type == 'text' and \
model_opt.enc_rnn_size != model_opt.dec_rnn_size:
raise AssertionError("""We do not support different encoder and
decoder rnn sizes for translation now.""")
# Build encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
# Build decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.dec_rnn_size,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = \
{fix_key(k): v for (k, v) in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
""" Build the Model """
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model
| [
"torch.nn.LogSoftmax",
"torch.device",
"torch.nn.init.xavier_uniform_",
"torch.load"
] | 0.4.1 | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 |
1.4 | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data.segmentation import SegmentDataset
from model.segmentation.fcn import FCN32
from model.segmentation.unet import UNet, UNetVGG16
__all__ = [ "SegmentAgent" ]
class SegmentAgent:
"""Train Image Segmentation model
Requirements:
Simple baseline
- (15%) validation mIoU > 0.635
- (15%) testing mIoU > 0.625
"""
def __init__(self, config):
self.config = config
# Check environment
if torch.cuda.is_available():
self.device = torch.device(config['train']['device'])
else:
raise RuntimeError("Please train your model with GPU")
# Create dataset
tr_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
te_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
train_dataset = SegmentDataset(root=config['dataset']['train']['root'],
transform=tr_transform)
valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],
transform=te_transform)
# Create dataloader
self.train_loader = DataLoader(train_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=True)
self.valid_loader = DataLoader(valid_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=False)
# Create model
if config['train']['model'] == 'fcn':
self.model = FCN32(n_classes=7)
elif config['train']['model'] == 'unet':
self.model = UNetVGG16(n_classes=7)
self.model.to(self.device)
# Create optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])
# Create loss function
self.criterion = nn.CrossEntropyLoss()
# Create tensorboard
tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])
self.writer = SummaryWriter(tensorboard_dir)
# Logging
self.start_epoch = 0
self.current_epoch = -1
self.current_loss = 10000
# Resume training or not
if config['train']['resume']:
checkpoint_file = osp.join(config['train']['log_dir'],
config['train']['checkpoint_dir'],
'best.pth')
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in self.optimizer.param_groups:
param_group['lr'] = config['optim']['lr']
self.current_epoch = checkpoint['current_epoch'] + 1
self.start_epoch = self.current_epoch + 1
print("Resume training at epoch {}".format(self.start_epoch))
def train(self):
for epoch in range(self.start_epoch, self.config['train']['n_epochs']):
self.current_epoch = epoch
self.train_one_epoch()
self.validate()
def train_one_epoch(self):
running_loss = 0
self.model.train()
for i, (imgs, targets) in enumerate(self.train_loader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
# Forward & Backward
self.optimizer.zero_grad()
outputs = self.model(imgs) # (n, c, h, w)
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
# Cumulate result
running_loss += loss.item() * len(imgs)
# Show training information
if (i % self.config['train']['interval']) == 0:
print("Epoch {}:{}({}%), Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'],
int(i*100/len(self.train_loader)), loss.item()))
train_loss = running_loss / len(self.train_loader.dataset)
print("Epoch {}:{}, Train Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'], train_loss))
# Export result to tensorboard
self.writer.add_scalar("Train Loss", train_loss, self.current_epoch)
def validate(self):
running_loss = 0
pred_masks = []
true_masks = []
self.model.eval()
with torch.no_grad():
for imgs, targets in self.valid_loader:
imgs = imgs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(imgs) # (n, c, h, w)
# Save segmenation mask
pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)
pred_masks.append(pred_mask)
true_masks.append(targets.detach().cpu().numpy())
# Compute loss
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
# Validation Loss
running_loss += loss.item() * len(imgs)
# Show validation result
pred_masks = np.vstack(pred_masks)
true_masks = np.vstack(true_masks)
miou = self._mean_iou_score(pred_masks, true_masks)
valid_loss = running_loss / len(self.valid_loader.dataset)
print("Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}".format(
self.current_epoch, self.config['train']['n_epochs'],
valid_loss, miou))
# Save training checkpoints
if valid_loss < self.current_loss:
self.current_loss = valid_loss
self._save_checkpoint()
# Export result to tensorboard
self.writer.add_scalar("Valid Loss", valid_loss, self.current_epoch)
def finalize(self):
pass
def _save_checkpoint(self):
checkpoints = { 'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'current_loss': self.current_loss }
checkpoint_file = osp.join(self.config['train']['log_dir'],
self.config['train']['checkpoint_dir'],
'best.pth')
if not osp.exists(osp.dirname(checkpoint_file)):
os.makedirs(osp.dirname(checkpoint_file))
torch.save(checkpoints, checkpoint_file)
print("Save checkpoint to '{}'".format(checkpoint_file))
def _mean_iou_score(self, pred_masks, true_masks):
"""Compute mean IoU score over 6 classes"""
mean_iou = 0
for i in range(6):
tp_fp = np.sum(pred_masks == i)
tp_fn = np.sum(true_masks == i)
tp = np.sum((pred_masks == i) * (true_masks == i))
iou = tp / (tp_fp + tp_fn - tp)
mean_iou += iou / 6
return mean_iou
| [
"torch.device",
"torch.save",
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.4.0 | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 |
1.6 | import argparse
from random import choice
from pathlib import Path
# torch
import torch
from torch.optim import Adam
from torch.nn.utils import clip_grad_norm_
# vision imports
from PIL import Image
from torchvision import transforms as T
from torch.utils.data import DataLoader, Dataset
from torchvision.datasets import ImageFolder
from torchvision.utils import make_grid, save_image
# dalle related classes and utils
from dalle_pytorch import OpenAIDiscreteVAE, DiscreteVAE, DALLE
from dalle_pytorch.simple_tokenizer import tokenize, tokenizer, VOCAB_SIZE
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required = False)
group.add_argument('--vae_path', type = str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type = str,
help='path to your partially trained DALL-E')
parser.add_argument('--image_text_folder', type = str, required = True,
help='path to your folder of images and text for learning the DALL-E')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
# constants
VAE_PATH = args.vae_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = 20
BATCH_SIZE = 4
LEARNING_RATE = 3e-4
GRAD_CLIP_NORM = 0.5
MODEL_DIM = 512
TEXT_SEQ_LEN = 256
DEPTH = 2
HEADS = 4
DIM_HEAD = 64
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path))
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
dalle_params = dict(
vae = vae,
**dalle_params
)
IMAGE_SIZE = vae_params['image_size']
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
print('using OpenAIs pretrained VAE for encoding images to tokens')
vae_params = None
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
vae = vae,
num_text_tokens = VOCAB_SIZE,
text_seq_len = TEXT_SEQ_LEN,
dim = MODEL_DIM,
depth = DEPTH,
heads = HEADS,
dim_head = DIM_HEAD
)
# helpers
def save_model(path):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'weights': dalle.state_dict()
}
torch.save(save_obj, path)
# dataset loading
class TextImageDataset(Dataset):
def __init__(self, folder, text_len = 256, image_size = 128):
super().__init__()
path = Path(folder)
text_files = [*path.glob('**/*.txt')]
image_files = [
*path.glob('**/*.png'),
*path.glob('**/*.jpg'),
*path.glob('**/*.jpeg')
]
text_files = {t.stem: t for t in text_files}
image_files = {i.stem: i for i in image_files}
keys = (image_files.keys() & text_files.keys())
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.image_tranform = T.Compose([
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
T.CenterCrop(image_size),
T.Resize(image_size),
T.ToTensor(),
T.Lambda(lambda t: t.expand(3, -1, -1)),
T.Normalize((0.5,) * 3, (0.5,) * 3)
])
def __len__(self):
return len(self.keys)
def __getitem__(self, ind):
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
image = Image.open(image_file)
descriptions = text_file.read_text().split('\n')
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
description = choice(descriptions)
tokenized_text = tokenize(description).squeeze(0)
mask = tokenized_text != 0
image_tensor = self.image_tranform(image)
return tokenized_text, image_tensor, mask
# create dataset and dataloader
ds = TextImageDataset(
args.image_text_folder,
text_len = TEXT_SEQ_LEN,
image_size = IMAGE_SIZE
)
assert len(ds) > 0, 'dataset is empty'
print(f'{len(ds)} image-text pairs found for training')
dl = DataLoader(ds, batch_size = BATCH_SIZE, shuffle = True, drop_last = True)
# initialize DALL-E
dalle = DALLE(**dalle_params).cuda()
if RESUME:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(dalle.parameters(), lr = LEARNING_RATE)
# experiment tracker
import wandb
wandb.config.depth = DEPTH
wandb.config.heads = HEADS
wandb.config.dim_head = DIM_HEAD
wandb.init(project = 'dalle_train_transformer', resume = RESUME)
# training
for epoch in range(EPOCHS):
for i, (text, images, mask) in enumerate(dl):
text, images, mask = map(lambda t: t.cuda(), (text, images, mask))
loss = dalle(text, images, mask = mask, return_loss = True)
loss.backward()
clip_grad_norm_(dalle.parameters(), GRAD_CLIP_NORM)
opt.step()
opt.zero_grad()
log = {}
if i % 10 == 0:
print(epoch, i, f'loss - {loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': loss.item()
}
if i % 100 == 0:
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
image = dalle.generate_images(
text[:1],
mask = mask[:1],
filter_thres = 0.9 # topk sampling at 0.9
)
save_model(f'./dalle.pt')
wandb.save(f'./dalle.pt')
log = {
**log,
'image': wandb.Image(image, caption = decoded_text)
}
wandb.log(log)
save_model(f'./dalle-final.pt')
wandb.save('./dalle-final.pt')
wandb.finish()
| [
"torch.save",
"torch.utils.data.DataLoader"
] | 1.6 | Atica57/DALLE-pytorch | 4fa108271aeb1972fcb118390ec15b656f2c328a |
1.3 | import time
import random
import numpy as np
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import torch
from torch.utils.data import Dataset
from src import config
def draw_grapheme(grapheme, font_path, size=(137, 236)):
height, width = size
image = Image.new('RGB', (width, height))
draw = ImageDraw.Draw(image)
font_size = np.random.randint(70, 110)
font = ImageFont.truetype(str(font_path), font_size)
w, h = draw.textsize(grapheme, font=font)
width_ratio = np.random.uniform(1.5, 2.5)
height_ratio = np.random.uniform(2.5, 3.5)
fill = np.random.randint(200, 255)
draw.text(((width - w) / width_ratio, (height - h) / height_ratio),
grapheme, font=font, fill=fill)
image = image.filter(ImageFilter.BLUR)
return np.array(image)[:, :, 0]
def get_draw_data():
graphemes = []
for grapheme_root_idx, grapheme_root in config.class_map['grapheme_root'].items():
for vowel_diacritic_idx, vowel_diacritic in config.class_map['vowel_diacritic'].items():
for consonant_diacritic_idx, consonant_diacritic in config.class_map['consonant_diacritic'].items():
consonant_diacritic, grapheme_root, vowel_diacritic = [c if c != '0' else '' for c in
[consonant_diacritic, grapheme_root,
vowel_diacritic]]
grapheme = consonant_diacritic + grapheme_root + vowel_diacritic
graphemes.append({
'grapheme': grapheme,
'grapheme_root': grapheme_root_idx,
'vowel_diacritic': vowel_diacritic_idx,
'consonant_diacritic': consonant_diacritic_idx
})
return graphemes
class BengaliDrawDataset(Dataset):
def __init__(self,
fonts_dir,
transform=None,
mixer=None):
self.fonts_dir = fonts_dir
self.transform = transform
self.mixer = mixer
self.data = get_draw_data()
self.font_paths = sorted(Path(fonts_dir).glob('*.ttf'))
def __len__(self):
return len(self.data)
def get_sample(self, idx):
sample = self.data[idx]
font_path = np.random.choice(self.font_paths)
image = draw_grapheme(sample['grapheme'], font_path,
size=config.raw_image_shape)
grapheme = torch.tensor(sample['grapheme_root'], dtype=torch.int64)
vowel = torch.tensor(sample['vowel_diacritic'], dtype=torch.int64)
consonant = torch.tensor(sample['consonant_diacritic'], dtype=torch.int64)
target = grapheme, vowel, consonant
return image, target
def _set_random_seed(self, idx):
seed = int(time.time() * 1000.0) + idx
random.seed(seed)
np.random.seed(seed % (2**32 - 1))
@torch.no_grad()
def __getitem__(self, idx):
self._set_random_seed(idx)
image, target = self.get_sample(idx)
if self.mixer is not None:
image, target = self.mixer(self, image, target)
if self.transform is not None:
image = self.transform(image)
return image, target
| [
"torch.no_grad",
"torch.tensor"
] | 1.3.1 | lRomul/argus-bengali-ai | e64374230f5390a17305769126ff4bfc9a2a8644 |
1.4 | import main
from common import Task, STOP, GNN_TYPE
from attrdict import AttrDict
from experiment import Experiment
import torch
override_params = {
2: {'batch_size': 64, 'eval_every': 1000},
3: {'batch_size': 64},
4: {'batch_size': 1024},
5: {'batch_size': 1024},
6: {'batch_size': 1024},
7: {'batch_size': 2048},
8: {'batch_size': 1024, 'accum_grad': 2}, # effective batch size of 2048, with less GPU memory
}
class Results:
def __init__(self, train_acc, test_acc, epoch):
self.train_acc = train_acc
self.test_acc = test_acc
self.epoch = epoch
if __name__ == '__main__':
task = Task.DICTIONARY
gnn_type = GNN_TYPE.GAT
stopping_criterion = STOP.TRAIN
min_depth = 2
max_depth = 8
results_all_depths = {}
for depth in range(min_depth, max_depth + 1):
num_layers = depth + 1
args = main.get_fake_args(task=task, depth=depth, num_layers=num_layers, loader_workers=7,
type=gnn_type, stop=stopping_criterion,
no_activation=True, no_residual=False)
if depth in override_params:
for key, value in AttrDict(override_params[depth]).items():
args[key] = value
train_acc, test_acc, epoch = Experiment(args).run()
torch.cuda.empty_cache()
results_all_depths[depth] = Results(train_acc=train_acc, test_acc=test_acc, epoch=epoch)
print()
print(f'Task: {task}')
print('depth, train_acc, test_acc, epoch, train_acc, test_acc, epoch,')
for depth in range(min_depth, max_depth + 1):
res = results_all_depths[depth]
print(f'{depth}, {res.train_acc}, {res.test_acc}, {res.epoch}')
| [
"torch.cuda.empty_cache"
] | 1.4.0 | urialon/bottleneck | 481fbb95edc6ae711da40b6305b40c12ce6a6d29 |
1.0 | # coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch UniSpeech model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...deepspeed import is_deepspeed_zero3_enabled
from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import torch_int_div
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
_HIDDEN_STATES_START_POSITION = 2
# General docstring
_CONFIG_FOR_DOC = "UniSpeechConfig"
_PROCESSOR_FOR_DOC = "Wav2Vec2Processor"
# Base docstring
_CHECKPOINT_FOR_DOC = "patrickvonplaten/unispeech-large-1500h-cv-timit"
_EXPECTED_OUTPUT_SHAPE = [1, 292, 1024]
# CTC docstring
_CTC_EXPECTED_OUTPUT = "'mister quilter is the apposl of the midle classes and weare glad to welcom his gosepl'"
_CTC_EXPECTED_LOSS = 17.17
# Audio class docstring
_FEAT_EXTRACTOR_FOR_DOC = "Wav2Vec2FeatureExtractor"
_SEQ_CLASS_CHECKPOINT = "hf-internal-testing/tiny-random-unispeech"
_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" # TODO(anton) - could you quickly fine-tune a KS WavLM Model
_SEQ_CLASS_EXPECTED_LOSS = 0.66 # TODO(anton) - could you quickly fine-tune a KS WavLM Model
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/unispeech-large-1500h-cv",
"microsoft/unispeech-large-multi-lingual-1500h-cv",
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
]
@dataclass
class UniSpeechForPreTrainingOutput(ModelOutput):
"""
Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.
Args:
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
projected_states: torch.FloatTensor = None
projected_quantized_states: torch.FloatTensor = None
codevector_perplexity: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.sum(-1).detach().tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=np.bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechNoLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechLayerNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states.transpose(-2, -1)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->UniSpeech
class UniSpeechGroupNormConvLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = nn.Conv1d(
self.in_conv_dim,
self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
stride=config.conv_stride[layer_id],
bias=config.conv_bias,
)
self.activation = ACT2FN[config.feat_extract_activation]
self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->UniSpeech
class UniSpeechPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
)
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
deepspeed.zero.register_external_parameter(self, self.conv.weight_v)
deepspeed.zero.register_external_parameter(self, self.conv.weight_g)
else:
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
self.padding = UniSpeechSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
hidden_states = hidden_states.transpose(1, 2)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->UniSpeech
class UniSpeechSamePadLayer(nn.Module):
def __init__(self, num_conv_pos_embeddings):
super().__init__()
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def forward(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, :, : -self.num_pad_remove]
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->UniSpeech
class UniSpeechFeatureEncoder(nn.Module):
"""Construct the features from raw audio waveform"""
def __init__(self, config):
super().__init__()
if config.feat_extract_norm == "group":
conv_layers = [UniSpeechGroupNormConvLayer(config, layer_id=0)] + [
UniSpeechNoLayerNormConvLayer(config, layer_id=i + 1)
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
UniSpeechLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = nn.ModuleList(conv_layers)
self.gradient_checkpointing = False
self._requires_grad = True
def _freeze_parameters(self):
for param in self.parameters():
param.requires_grad = False
self._requires_grad = False
def forward(self, input_values):
hidden_states = input_values[:, None]
# make sure hidden_states require grad for gradient_checkpointing
if self._requires_grad and self.training:
hidden_states.requires_grad = True
for conv_layer in self.conv_layers:
if self._requires_grad and self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs)
return custom_forward
hidden_states = torch.utils.checkpoint.checkpoint(
create_custom_forward(conv_layer),
hidden_states,
)
else:
hidden_states = conv_layer(hidden_states)
return hidden_states
class UniSpeechFeatureExtractor(UniSpeechFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->UniSpeech
class UniSpeechFeatureProjection(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
norm_hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(norm_hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states, norm_hidden_states
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->UniSpeech
class UniSpeechAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned aross GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->UniSpeech
class UniSpeechFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderLayerStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = UniSpeechAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.feed_forward = UniSpeechFeedForward(config)
self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = self.dropout(hidden_states)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->UniSpeech
class UniSpeechEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([UniSpeechEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens output 0
hidden_states[~attention_mask] = 0.0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->UniSpeech
class UniSpeechEncoderStableLayerNorm(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = UniSpeechPositionalConvEmbedding(config)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList(
[UniSpeechEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
# make sure padded tokens are not attended to
hidden_states[~attention_mask] = 0
# extend attention_mask
attention_mask = (1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states)
deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled()
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
if not skip_the_layer or deepspeed_zero3_is_enabled:
# under deepspeed zero3 all gpus must run in sync
# XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
if self.gradient_checkpointing and self.training:
# create gradient checkpointing function
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
class UniSpeechGumbelVectorQuantizer(nn.Module):
"""
Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information.
"""
def __init__(self, config):
super().__init__()
self.num_groups = config.num_codevector_groups
self.num_vars = config.num_codevectors_per_group
if config.codevector_dim % self.num_groups != 0:
raise ValueError(
f"`config.codevector_dim {config.codevector_dim} must be divisible by `config.num_codevector_groups`"
f" {self.num_groups} for concatenation"
)
# storage for codebook variables (codewords)
self.codevectors = nn.Parameter(
torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups)
)
self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars)
# can be decayed for training
self.temperature = 2
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
class UniSpeechPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
_keys_to_ignore_on_load_missing = [r"position_ids"]
supports_gradient_checkpointing = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch_int_div(input_length - kernel_size, stride) + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (UniSpeechEncoder, UniSpeechEncoderStableLayerNorm, UniSpeechFeatureEncoder)):
module.gradient_checkpointing = value
UNISPEECH_START_DOCSTRING = r"""
UniSpeech was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled
Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei,
Michael Zeng, Xuedong Huang.
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving etc.).
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`UniSpeechConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UNISPEECH_INPUTS_DOCSTRING = r"""
Args:
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file
into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install
soundfile*). To prepare the array into *input_values*, the [`UniSpeechProcessor`] should be used for
padding and conversion into a tensor of type *torch.FloatTensor*. See [`UniSpeechProcessor.__call__`] for
details.
attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
<Tip warning={true}>
`attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should
**not** be passed to avoid degraded performance when doing batched inference. For such models
`input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these
models also yield slightly different results depending on whether `input_values` is padded or not.
</Tip>
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare UniSpeech Model transformer outputting raw hidden-states without any specific head on top.",
UNISPEECH_START_DOCSTRING,
)
class UniSpeechModel(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=Wav2Vec2BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_EXPECTED_OUTPUT_SHAPE,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, Wav2Vec2BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return Wav2Vec2BaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a vector-quantization module and ctc loss for pre-training.""", UNISPEECH_START_DOCSTRING
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=UniSpeechForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, UniSpeechForPreTrainingOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
sampled_negative_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_negatives)`, *optional*):
Indices indicating which quantized target vectors are used as negative sampled vectors in contrastive loss.
Required input for pre-training.
Returns:
Example:
```python
>>> import torch
>>> from transformers import Wav2Vec2FeatureExtractor, UniSpeechForPreTraining
>>> from transformers.models.unispeech.modeling_unispeech import _compute_mask_indices
>>> feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
... "hf-internal-testing/tiny-random-unispeech-sat"
... )
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
# project quantized features twice
quantized_features = self.project_q(quantized_features)
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
# project to ctc units
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
# TODO(PVP) - add negative sampling & loss computation
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForCTC(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout = nn.Dropout(config.final_dropout)
if config.vocab_size is None:
raise ValueError(
f"You are trying to instantiate {self.__class__} with a configuration that "
"does not define the vocabulary size of the language model head. Please "
"instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
"or define `vocab_size` of your model's configuration."
)
output_hidden_size = (
config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
)
self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_PROCESSOR_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=_CTC_EXPECTED_OUTPUT,
expected_loss=_CTC_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, CausalLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
config.vocab_size - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
if labels.max() >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
# retrieve loss input_lengths from attention_mask
attention_mask = (
attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
)
input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = labels >= 0
target_lengths = labels_mask.sum(-1)
flattened_targets = labels.masked_select(labels_mask)
# ctc_loss doesn't support fp16
log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
with torch.backends.cudnn.flags(enabled=False):
loss = nn.functional.ctc_loss(
log_probs,
flattened_targets,
input_lengths,
target_lengths,
blank=self.config.pad_token_id,
reduction=self.config.ctc_loss_reduction,
zero_infinity=self.config.ctc_zero_infinity,
)
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutput(
loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
@add_start_docstrings(
"""
UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
SUPERB Keyword Spotting.
""",
UNISPEECH_START_DOCSTRING,
)
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->UniSpeech, wav2vec2->unispeech, WAV_2_VEC_2->UNISPEECH
class UniSpeechForSequenceClassification(UniSpeechPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if hasattr(config, "add_adapter") and config.add_adapter:
raise ValueError(
"Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)"
)
self.unispeech = UniSpeechModel(config)
num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
if config.use_weighted_layer_sum:
self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
def freeze_base_model(self):
"""
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
"""
for param in self.unispeech.parameters():
param.requires_grad = False
@add_start_docstrings_to_model_forward(UNISPEECH_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_FEAT_EXTRACTOR_FOR_DOC,
checkpoint=_SEQ_CLASS_CHECKPOINT,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
modality="audio",
expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
)
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if self.config.use_weighted_layer_sum:
hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
hidden_states = torch.stack(hidden_states, dim=1)
norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
else:
hidden_states = outputs[0]
hidden_states = self.projector(hidden_states)
if attention_mask is None:
pooled_output = hidden_states.mean(dim=1)
else:
padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states[~padding_mask] = 0.0
pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.init.kaiming_normal_",
"torch.bmm",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.ctc_loss",
"torch.nn.LayerNorm",
"torch.nn.Conv1d",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.tensor",
"torch.zeros",
"torch.nn.functional.dropout",
"torch.nn.GroupNorm",
"torch.nn.functional.log_softmax",
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.utils.weight_norm",
"torch.log",
"torch.backends.cudnn.flags",
"torch.nn.Dropout",
"torch.arange",
"torch.bernoulli",
"torch.ones_like"
] | 1.0 | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 |
1.0 | # coding=utf-8
# Copyright Studio Ousia and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LUKE model."""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN, gelu
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_luke import LukeConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LukeConfig"
_TOKENIZER_FOR_DOC = "LukeTokenizer"
_CHECKPOINT_FOR_DOC = "studio-ousia/luke-base"
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [
"studio-ousia/luke-base",
"studio-ousia/luke-large",
# See all LUKE models at https://huggingface.co/models?filter=luke
]
@dataclass
class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling):
"""
Base class for outputs of the LUKE model.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
Sequence of entity hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length +
entity_length, sequence_length + entity_length)`. Attentions weights after the attention softmax, used to
compute the weighted average in the self-attention heads.
"""
entity_last_hidden_state: torch.FloatTensor = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseLukeModelOutput(BaseModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
Sequence of entity hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
entity_last_hidden_state: torch.FloatTensor = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LukeMaskedLMOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
The sum of masked language modeling (MLM) loss and entity prediction loss.
mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked entity prediction (MEP) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mlm_loss: Optional[torch.FloatTensor] = None
mep_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
entity_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntityClassificationOutput(ModelOutput):
"""
Outputs of entity classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntityPairClassificationOutput(ModelOutput):
"""
Outputs of entity pair classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntitySpanClassificationOutput(ModelOutput):
"""
Outputs of entity span classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output of each
layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class LukeEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class LukeEntityEmbeddings(nn.Module):
def __init__(self, config: LukeConfig):
super().__init__()
self.config = config
self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0)
if config.entity_emb_size != config.hidden_size:
self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None
):
if token_type_ids is None:
token_type_ids = torch.zeros_like(entity_ids)
entity_embeddings = self.entity_embeddings(entity_ids)
if self.config.entity_emb_size != self.config.hidden_size:
entity_embeddings = self.entity_embedding_dense(entity_embeddings)
position_embeddings = self.position_embeddings(position_ids.clamp(min=0))
position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1)
position_embeddings = position_embeddings * position_embedding_mask
position_embeddings = torch.sum(position_embeddings, dim=-2)
position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = entity_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LukeSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.use_entity_aware_attention = config.use_entity_aware_attention
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
if self.use_entity_aware_attention:
self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size)
self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size)
self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
if entity_hidden_states is None:
concat_hidden_states = word_hidden_states
else:
concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
key_layer = self.transpose_for_scores(self.key(concat_hidden_states))
value_layer = self.transpose_for_scores(self.value(concat_hidden_states))
if self.use_entity_aware_attention and entity_hidden_states is not None:
# compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e)
# query layers
w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states))
w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states))
e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states))
e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states))
# compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above
w2w_key_layer = key_layer[:, :, :word_size, :]
e2w_key_layer = key_layer[:, :, :word_size, :]
w2e_key_layer = key_layer[:, :, word_size:, :]
e2e_key_layer = key_layer[:, :, word_size:, :]
# compute attention scores based on the dot product between the query and key vectors
w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2))
w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2))
e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2))
e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2))
# combine attention scores to create the final attention score matrix
word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3)
entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3)
attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2)
else:
query_layer = self.transpose_for_scores(self.query(concat_hidden_states))
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in LukeModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
output_word_hidden_states = context_layer[:, :word_size, :]
if entity_hidden_states is None:
output_entity_hidden_states = None
else:
output_entity_hidden_states = context_layer[:, word_size:, :]
if output_attentions:
outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs)
else:
outputs = (output_word_hidden_states, output_entity_hidden_states)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LukeSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LukeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LukeSelfAttention(config)
self.output = LukeSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError("LUKE does not support the pruning of attention heads")
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
self_outputs = self.self(
word_hidden_states,
entity_hidden_states,
attention_mask,
head_mask,
output_attentions,
)
if entity_hidden_states is None:
concat_self_outputs = self_outputs[0]
concat_hidden_states = word_hidden_states
else:
concat_self_outputs = torch.cat(self_outputs[:2], dim=1)
concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
attention_output = self.output(concat_self_outputs, concat_hidden_states)
word_attention_output = attention_output[:, :word_size, :]
if entity_hidden_states is None:
entity_attention_output = None
else:
entity_attention_output = attention_output[:, word_size:, :]
# add attentions if we output them
outputs = (word_attention_output, entity_attention_output) + self_outputs[2:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LukeIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LukeOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LukeLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LukeAttention(config)
self.intermediate = LukeIntermediate(config)
self.output = LukeOutput(config)
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
self_attention_outputs = self.attention(
word_hidden_states,
entity_hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
if entity_hidden_states is None:
concat_attention_output = self_attention_outputs[0]
else:
concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1)
outputs = self_attention_outputs[2:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output
)
word_layer_output = layer_output[:, :word_size, :]
if entity_hidden_states is None:
entity_layer_output = None
else:
entity_layer_output = layer_output[:, word_size:, :]
outputs = (word_layer_output, entity_layer_output) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class LukeEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_word_hidden_states = () if output_hidden_states else None
all_entity_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
word_hidden_states,
entity_hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(
word_hidden_states,
entity_hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
)
word_hidden_states = layer_outputs[0]
if entity_hidden_states is not None:
entity_hidden_states = layer_outputs[1]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[2],)
if output_hidden_states:
all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
if not return_dict:
return tuple(
v
for v in [
word_hidden_states,
all_word_hidden_states,
all_self_attentions,
entity_hidden_states,
all_entity_hidden_states,
]
if v is not None
)
return BaseLukeModelOutput(
last_hidden_state=word_hidden_states,
hidden_states=all_word_hidden_states,
attentions=all_self_attentions,
entity_last_hidden_state=entity_hidden_states,
entity_hidden_states=all_entity_hidden_states,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LukePooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class EntityPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class EntityPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transform = EntityPredictionHeadTransform(config)
self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LukePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LukeConfig
base_model_prefix = "luke"
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
if module.embedding_dim == 1: # embedding for bias parameters
module.weight.data.zero_()
else:
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LukeEncoder):
module.gradient_checkpointing = value
LUKE_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LukeConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LUKE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`LukeTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any"
" specific head on top.",
LUKE_START_DOCSTRING,
)
class LukeModel(LukePreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config: LukeConfig, add_pooling_layer: bool = True):
super().__init__(config)
self.config = config
self.embeddings = LukeEmbeddings(config)
self.entity_embeddings = LukeEntityEmbeddings(config)
self.encoder = LukeEncoder(config)
self.pooler = LukePooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def get_entity_embeddings(self):
return self.entity_embeddings.entity_embeddings
def set_entity_embeddings(self, value):
self.entity_embeddings.entity_embeddings = value
def _prune_heads(self, heads_to_prune):
raise NotImplementedError("LUKE does not support the pruning of attention heads")
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.FloatTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseLukeModelOutputWithPooling]:
r"""
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeModel
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
>>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
# Compute the contextualized entity representation corresponding to the entity mention "Beyoncé"
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
>>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**encoding)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
# Input Wikipedia entities to obtain enriched contextualized representations of word tokens
>>> text = "Beyoncé lives in Los Angeles."
>>> entities = [
... "Beyoncé",
... "Los Angeles",
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
>>> entity_spans = [
... (0, 7),
... (17, 28),
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> encoding = tokenizer(
... text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt"
... )
>>> outputs = model(**encoding)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if entity_ids is not None:
entity_seq_length = entity_ids.size(1)
if entity_attention_mask is None:
entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device)
if entity_token_type_ids is None:
entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
# First, compute word embeddings
word_embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
# Second, compute extended attention mask
extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask)
# Third, compute entity embeddings and concatenate with word embeddings
if entity_ids is None:
entity_embedding_output = None
else:
entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids)
# Fourth, send embeddings through the model
encoder_outputs = self.encoder(
word_embedding_output,
entity_embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size)
sequence_output = encoder_outputs[0]
# Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseLukeModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
entity_last_hidden_state=encoder_outputs.entity_last_hidden_state,
entity_hidden_states=encoder_outputs.entity_hidden_states,
)
def get_extended_attention_mask(
self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]
):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
word_attention_mask (`torch.LongTensor`):
Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
entity_attention_mask (`torch.LongTensor`, *optional*):
Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
attention_mask = word_attention_mask
if entity_attention_mask is not None:
attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})")
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead
class LukeLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and
masked entity prediction.
""",
LUKE_START_DOCSTRING,
)
class LukeForMaskedLM(LukePreTrainedModel):
_keys_to_ignore_on_save = [
r"lm_head.decoder.weight",
r"lm_head.decoder.bias",
r"entity_predictions.decoder.weight",
]
_keys_to_ignore_on_load_missing = [
r"position_ids",
r"lm_head.decoder.weight",
r"lm_head.decoder.bias",
r"entity_predictions.decoder.weight",
]
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.lm_head = LukeLMHead(config)
self.entity_predictions = EntityPredictionHead(config)
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self):
super().tie_weights()
self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings)
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.LongTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
entity_labels: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LukeMaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
loss = None
mlm_loss = None
logits = self.lm_head(outputs.last_hidden_state)
if labels is not None:
mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1))
if loss is None:
loss = mlm_loss
mep_loss = None
entity_logits = self.entity_predictions(outputs.entity_last_hidden_state)
if entity_labels is not None:
mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1))
if loss is None:
loss = mep_loss
else:
loss = loss + mep_loss
if not return_dict:
output = (logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions)
if mlm_loss is not None and mep_loss is not None:
return (loss, mlm_loss, mep_loss) + output
elif mlm_loss is not None:
return (loss, mlm_loss) + output
elif mep_loss is not None:
return (loss, mep_loss) + output
else:
return output
return LukeMaskedLMOutput(
loss=loss,
mlm_loss=mlm_loss,
mep_loss=mep_loss,
logits=logits,
entity_logits=entity_logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity
token) for entity classification tasks, such as Open Entity.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntityClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.FloatTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, EntityClassificationOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
used for the single-label classification. In this case, labels should contain the indices that should be in
`[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntityClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: person
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
feature_vector = outputs.entity_last_hidden_state[:, 0, :]
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 1:
loss = nn.functional.cross_entropy(logits, labels)
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntityClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity
tokens) for entity pair classification tasks, such as TACRED.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntityPairClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.FloatTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, EntityPairClassificationOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss is
used for the single-label classification. In this case, labels should contain the indices that should be in
`[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary cross entropy
loss is used for the multi-label classification. In this case, labels should only contain `[0, 1]`, where 0
and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntityPairClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [
... (0, 7),
... (17, 28),
... ] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: per:cities_of_residence
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
feature_vector = torch.cat(
[outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1
)
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 1:
loss = nn.functional.cross_entropy(logits, labels)
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntityPairClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks
such as named entity recognition.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntitySpanClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask=None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
entity_ids: Optional[torch.LongTensor] = None,
entity_attention_mask: Optional[torch.LongTensor] = None,
entity_token_type_ids: Optional[torch.LongTensor] = None,
entity_position_ids: Optional[torch.LongTensor] = None,
entity_start_positions: Optional[torch.LongTensor] = None,
entity_end_positions: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, EntitySpanClassificationOutput]:
r"""
entity_start_positions (`torch.LongTensor`):
The start positions of entities in the word token sequence.
entity_end_positions (`torch.LongTensor`):
The end positions of entities in the word token sequence.
labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross
entropy loss is used for the single-label classification. In this case, labels should contain the indices
that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length,
num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,
labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntitySpanClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> text = "Beyoncé lives in Los Angeles"
# List all possible entity spans in the text
>>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens
>>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens
>>> entity_spans = []
>>> for i, start_pos in enumerate(word_start_positions):
... for end_pos in word_end_positions[i:]:
... entity_spans.append((start_pos, end_pos))
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()
>>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):
... if predicted_class_idx != 0:
... print(text[span[0] : span[1]], model.config.id2label[predicted_class_idx])
Beyoncé PER
Los Angeles LOC
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
hidden_size = outputs.last_hidden_state.size(-1)
entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions)
entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions)
feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2)
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 2:
loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntitySpanClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.arange",
"torch.gather",
"torch.nn.Tanh",
"torch.nn.CrossEntropyLoss",
"torch.ones",
"torch.nn.functional.cross_entropy",
"torch.nn.functional.softmax",
"torch.zeros_like",
"torch.matmul",
"torch.nn.Embedding",
"torch.cumsum",
"torch.sum"
] | 1.0 | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 |
1.0 | #!/usr/bin/env python3
# Copyright 2018 CMU and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bertology: this script shows how you can explore the internals of the models in the library to:
- compute the entropy of the head attentions
- compute the importance of each head
- prune (remove) the low importance head.
Some parts of this script are adapted from the code of Michel et al. (http://arxiv.org/abs/1905.10650)
which is available at https://github.com/pmichel31415/are-16-heads-really-better-than-1
"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, SequentialSampler, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
GlueDataset,
default_data_collator,
glue_compute_metrics,
glue_output_modes,
glue_processors,
set_seed,
)
from transformers.trainer_utils import is_main_process
logger = logging.getLogger(__name__)
def entropy(p):
"""Compute the entropy of a probability distribution"""
plogp = p * torch.log(p)
plogp[p == 0] = 0
return -plogp.sum(dim=-1)
def print_2d_tensor(tensor):
"""Print a 2D tensor"""
logger.info("lv, h >\t" + "\t".join(f"{x + 1}" for x in range(len(tensor))))
for row in range(len(tensor)):
if tensor.dtype != torch.long:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:.5f}" for x in tensor[row].cpu().data))
else:
logger.info(f"layer {row + 1}:\t" + "\t".join(f"{x:d}" for x in tensor[row].cpu().data))
def compute_heads_importance(
args, model, eval_dataloader, compute_entropy=True, compute_importance=True, head_mask=None, actually_pruned=False
):
"""This method shows how to compute:
- head attention entropy
- head importance scores according to http://arxiv.org/abs/1905.10650
"""
# Prepare our tensors
n_layers, n_heads = model.config.num_hidden_layers, model.config.num_attention_heads
head_importance = torch.zeros(n_layers, n_heads).to(args.device)
attn_entropy = torch.zeros(n_layers, n_heads).to(args.device)
if head_mask is None:
head_mask = torch.ones(n_layers, n_heads).to(args.device)
head_mask.requires_grad_(requires_grad=True)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
head_mask = None
preds = None
labels = None
tot_tokens = 0.0
for step, inputs in enumerate(tqdm(eval_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
for k, v in inputs.items():
inputs[k] = v.to(args.device)
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
outputs = model(**inputs, head_mask=head_mask)
loss, logits, all_attentions = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
if compute_entropy:
for layer, attn in enumerate(all_attentions):
masked_entropy = entropy(attn.detach()) * inputs["attention_mask"].float().unsqueeze(1)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
# Also store our logits/labels if we want to compute metrics afterwards
if preds is None:
preds = logits.detach().cpu().numpy()
labels = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
labels = np.append(labels, inputs["labels"].detach().cpu().numpy(), axis=0)
tot_tokens += inputs["attention_mask"].float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
exponent = 2
norm_by_layer = torch.pow(torch.pow(head_importance, exponent).sum(-1), 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
head_importance = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print/save matrices
np.save(os.path.join(args.output_dir, "attn_entropy.npy"), attn_entropy.detach().cpu().numpy())
np.save(os.path.join(args.output_dir, "head_importance.npy"), head_importance.detach().cpu().numpy())
logger.info("Attention entropies")
print_2d_tensor(attn_entropy)
logger.info("Head importance scores")
print_2d_tensor(head_importance)
logger.info("Head ranked by importance scores")
head_ranks = torch.zeros(head_importance.numel(), dtype=torch.long, device=args.device)
head_ranks[head_importance.view(-1).sort(descending=True)[1]] = torch.arange(
head_importance.numel(), device=args.device
)
head_ranks = head_ranks.view_as(head_importance)
print_2d_tensor(head_ranks)
return attn_entropy, head_importance, preds, labels
def mask_heads(args, model, eval_dataloader):
"""This method shows how to mask head (set some heads to zero), to test the effect on the network,
based on the head importance scores, as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
_, head_importance, preds, labels = compute_heads_importance(args, model, eval_dataloader, compute_entropy=False)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
original_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info("Pruning: original score: %f, threshold: %f", original_score, original_score * args.masking_threshold)
new_head_mask = torch.ones_like(head_importance)
num_to_mask = max(1, int(new_head_mask.numel() * args.masking_amount))
current_score = original_score
while current_score >= original_score * args.masking_threshold:
head_mask = new_head_mask.clone() # save current head mask
# heads from least important to most - keep only not-masked heads
head_importance[head_mask == 0.0] = float("Inf")
current_heads_to_mask = head_importance.view(-1).sort()[1]
if len(current_heads_to_mask) <= num_to_mask:
break
# mask heads
current_heads_to_mask = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s", str(current_heads_to_mask.tolist()))
new_head_mask = new_head_mask.view(-1)
new_head_mask[current_heads_to_mask] = 0.0
new_head_mask = new_head_mask.view_as(head_mask)
new_head_mask = new_head_mask.clone().detach()
print_2d_tensor(new_head_mask)
# Compute metric and head importance again
_, head_importance, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, head_mask=new_head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
current_score = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)",
current_score,
new_head_mask.sum(),
new_head_mask.sum() / new_head_mask.numel() * 100,
)
logger.info("Final head mask")
print_2d_tensor(head_mask)
np.save(os.path.join(args.output_dir, "head_mask.npy"), head_mask.detach().cpu().numpy())
return head_mask
def prune_heads(args, model, eval_dataloader, head_mask):
"""This method shows how to prune head (remove heads weights) based on
the head importance scores as described in Michel et al. (http://arxiv.org/abs/1905.10650)
"""
# Try pruning and test time speedup
# Pruning is like masking but we actually remove the masked weights
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args, model, eval_dataloader, compute_entropy=False, compute_importance=False, head_mask=head_mask
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_masking = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
original_time = datetime.now() - before_time
original_num_params = sum(p.numel() for p in model.parameters())
heads_to_prune = dict(
(layer, (1 - head_mask[layer].long()).nonzero().squeeze().tolist()) for layer in range(len(head_mask))
)
assert sum(len(h) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(heads_to_prune)
pruned_num_params = sum(p.numel() for p in model.parameters())
before_time = datetime.now()
_, _, preds, labels = compute_heads_importance(
args,
model,
eval_dataloader,
compute_entropy=False,
compute_importance=False,
head_mask=None,
actually_pruned=True,
)
preds = np.argmax(preds, axis=1) if args.output_mode == "classification" else np.squeeze(preds)
score_pruning = glue_compute_metrics(args.task_name, preds, labels)[args.metric_name]
new_time = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)",
original_num_params,
pruned_num_params,
pruned_num_params / original_num_params * 100,
)
logger.info("Pruning: score with masking: %f score with pruning: %f", score_masking, score_pruning)
logger.info("Pruning: speed ratio (new timing / original timing): %f percents", original_time / new_time * 100)
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models",
)
parser.add_argument(
"--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train selected in the list: " + ", ".join(glue_processors.keys()),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name_or_path",
)
parser.add_argument(
"--cache_dir",
default=None,
type=str,
help="Where do you want to store the pre-trained models downloaded from huggingface.co",
)
parser.add_argument(
"--data_subset", type=int, default=-1, help="If > 0: limit the data to a subset of data_subset instances."
)
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Whether to overwrite data in output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--dont_normalize_importance_by_layer", action="store_true", help="Don't normalize importance score by layers"
)
parser.add_argument(
"--dont_normalize_global_importance",
action="store_true",
help="Don't normalize all importance scores between 0 and 1",
)
parser.add_argument(
"--try_masking", action="store_true", help="Whether to try to mask head until a threshold of accuracy."
)
parser.add_argument(
"--masking_threshold",
default=0.9,
type=float,
help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value).",
)
parser.add_argument(
"--masking_amount", default=0.1, type=float, help="Amount to heads to masking at each masking step."
)
parser.add_argument("--metric_name", default="acc", type=str, help="Metric to use for head masking.")
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
),
)
parser.add_argument("--batch_size", default=1, type=int, help="Batch size.")
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available")
parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
args = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
args.device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
torch.distributed.init_process_group(backend="nccl") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device, args.n_gpu, bool(args.local_rank != -1)))
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seeds
set_seed(args.seed)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in glue_processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = glue_processors[args.task_name]()
args.output_mode = glue_output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
output_attentions=True,
cache_dir=args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir,
)
model = AutoModelForSequenceClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
model = nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
elif args.n_gpu > 1:
model = nn.DataParallel(model)
# Print/save training arguments
os.makedirs(args.output_dir, exist_ok=True)
torch.save(args, os.path.join(args.output_dir, "run_args.bin"))
logger.info("Training/evaluation parameters %s", args)
# Prepare dataset for the GLUE task
eval_dataset = GlueDataset(args, tokenizer=tokenizer, mode="dev")
if args.data_subset > 0:
eval_dataset = Subset(eval_dataset, list(range(min(args.data_subset, len(eval_dataset)))))
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.batch_size, collate_fn=default_data_collator
)
# Compute head entropy and importance score
compute_heads_importance(args, model, eval_dataloader)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
head_mask = mask_heads(args, model, eval_dataloader)
prune_heads(args, model, eval_dataloader, head_mask)
if __name__ == "__main__":
main()
| [
"torch.zeros",
"torch.device",
"torch.distributed.init_process_group",
"torch.utils.data.SequentialSampler",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.ones",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.ones_like",
"torch.utils.data.distributed.DistributedSampler",
"torch.log",
"torch.nn.DataParallel",
"torch.pow"
] | 1.0 | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 |
1.0 | # coding=utf-8
# Copyright 2018 Google T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import tempfile
import unittest
from transformers import T5Config, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import ByT5Tokenizer, T5EncoderModel, T5ForConditionalGeneration, T5Model, T5Tokenizer
from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST
class T5ModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
decoder_seq_length=9,
# For common tests
is_training=True,
use_attention_mask=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
dropout_rate=0.1,
initializer_factor=0.002,
eos_token_id=1,
pad_token_id=0,
decoder_start_token_id=0,
scope=None,
decoder_layers=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.scope = None
self.decoder_layers = decoder_layers
def get_large_model_config(self):
return T5Config.from_pretrained("t5-base")
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = self.get_config()
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def get_pipeline_config(self):
return T5Config(
vocab_size=166, # t5 forces 100 extra tokens
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_decoder_layers=self.decoder_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
)
def get_config(self):
return T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_decoder_layers=self.decoder_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config)
model.to(torch_device)
model.eval()
# make sure that lm_labels are correctly padded from the right
lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id)
# add casaul pad token mask
triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not()
lm_labels.masked_fill_(triangular_mask, self.pad_token_id)
decoder_input_ids = model._shift_right(lm_labels)
for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)):
# first item
self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id)
if i < decoder_input_ids_slice.shape[-1]:
if i < decoder_input_ids.shape[-1] - 1:
# items before diagonal
self.parent.assertListEqual(
decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist()
)
# pad items after diagonal
if i < decoder_input_ids.shape[-1] - 2:
self.parent.assertListEqual(
decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist()
)
else:
# all items after square
self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist())
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(decoder_past), config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]), 4)
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5ForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
self.parent.assertEqual(len(outputs), 4)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_decoder_model_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config).get_decoder().to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config).get_decoder()
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config).get_decoder().to(torch_device).eval()
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_generate_with_past_key_values(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5ForConditionalGeneration(config=config).to(torch_device).eval()
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = T5Model(config=config).to(torch_device).half().eval()
output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_encoder_decoder_shared_weights(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
for model_class in [T5Model, T5ForConditionalGeneration]:
torch.manual_seed(0)
model = model_class(config=config).to(torch_device).eval()
# load state dict copies weights but does not tie them
model.encoder.load_state_dict(model.decoder.state_dict(), strict=False)
torch.manual_seed(0)
tied_config = copy.deepcopy(config)
tied_config.tie_encoder_decoder = True
tied_model = model_class(config=tied_config).to(torch_device).eval()
model_result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
tied_model.save_pretrained(tmpdirname)
tied_model = model_class.from_pretrained(tmpdirname)
tied_model.to(torch_device)
tied_model.eval()
# check that models has less parameters
self.parent.assertLess(
sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters())
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx],
tied_model_result[0][0, :, random_slice_idx],
atol=1e-4,
)
)
def check_resize_embeddings_t5_v1_1(
self,
config,
):
prev_vocab_size = config.vocab_size
config.tie_word_embeddings = False
model = T5ForConditionalGeneration(config=config).to(torch_device).eval()
model.resize_token_embeddings(prev_vocab_size - 10)
self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10)
self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10)
self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"use_cache": False,
}
return config, inputs_dict
@require_torch
class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else ()
all_generative_model_classes = (T5ForConditionalGeneration,) if is_torch_available() else ()
all_parallelizable_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else ()
fx_compatible = True
test_pruning = False
test_resize_embeddings = True
test_model_parallel = True
is_encoder_decoder = True
def setUp(self):
self.model_tester = T5ModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_shift_right(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_model_v1_1(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
# check that gated gelu feed forward and different word embeddings work
config = config_and_inputs[0]
config.tie_word_embeddings = False
config.feed_forward_proj = "gated-gelu"
self.model_tester.create_and_check_model(config, *config_and_inputs[1:])
def test_with_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_with_lm_head(*config_and_inputs)
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*config_and_inputs)
def test_decoder_model_past_with_attn_mask(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs)
def test_decoder_model_past_with_3d_attn_mask(self):
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = self.model_tester.prepare_config_and_inputs()
attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length],
vocab_size=2,
)
decoder_attention_mask = ids_tensor(
[self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length],
vocab_size=2,
)
self.model_tester.create_and_check_decoder_model_attention_mask_past(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_generate_with_past_key_values(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs)
def test_encoder_decoder_shared_weights(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def test_v1_1_resize_embeddings(self):
config = self.model_tester.prepare_config_and_inputs()[0]
self.model_tester.check_resize_embeddings_t5_v1_1(config)
@slow
def test_model_from_pretrained(self):
for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = T5Model.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip("Test has a segmentation fault on torch 1.8.0")
def test_export_to_onnx(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
model = T5Model(config_and_inputs[0]).to(torch_device)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
model,
(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]),
f"{tmpdirname}/t5_test.onnx",
export_params=True,
opset_version=9,
input_names=["input_ids", "decoder_input_ids"],
)
def test_generate_with_head_masking(self):
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config = config_and_inputs[0]
max_length = config_and_inputs[1].shape[-1] + 3
model = T5ForConditionalGeneration(config).eval()
model.to(torch_device)
head_masking = {
"head_mask": torch.zeros(config.num_layers, config.num_heads, device=torch_device),
"decoder_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers, config.num_heads, device=torch_device),
}
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
head_masks = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
head_masks["decoder_head_mask"] = torch.ones(
config.num_decoder_layers, config.num_heads, device=torch_device
)
out = model.generate(
config_and_inputs[1],
num_beams=1,
max_length=max_length,
output_attentions=True,
return_dict_in_generate=True,
**head_masks,
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)
class T5EncoderOnlyModelTester:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
# For common tests
use_attention_mask=True,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
is_training=False,
dropout_rate=0.1,
initializer_factor=0.002,
is_encoder_decoder=False,
eos_token_id=1,
pad_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
# For common tests
self.seq_length = self.encoder_seq_length
self.use_attention_mask = use_attention_mask
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.is_training = is_training
def get_large_model_config(self):
return T5Config.from_pretrained("t5-base")
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
config = T5Config(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
is_encoder_decoder=self.is_encoder_decoder,
)
return (
config,
input_ids,
attention_mask,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
):
model = T5EncoderModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
attention_mask=attention_mask,
)
result = model(input_ids=input_ids)
encoder_output = result.last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
attention_mask,
):
model = T5EncoderModel(config=config).to(torch_device).half().eval()
output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
class T5EncoderOnlyModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (T5EncoderModel,) if is_torch_available() else ()
test_pruning = False
test_resize_embeddings = False
test_model_parallel = True
all_parallelizable_model_classes = (T5EncoderModel,) if is_torch_available() else ()
def setUp(self):
self.model_tester = T5EncoderOnlyModelTester(self)
self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skipIf(torch_device == "cpu", "Cant do half precision")
def test_model_fp16_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs)
def use_task_specific_params(model, task):
model.config.update(model.config.task_specific_params[task])
@require_torch
@require_sentencepiece
@require_tokenizers
class T5ModelIntegrationTests(unittest.TestCase):
@cached_property
def model(self):
return T5ForConditionalGeneration.from_pretrained("t5-base").to(torch_device)
@cached_property
def tokenizer(self):
return T5Tokenizer.from_pretrained("t5-base")
@slow
def test_small_generation(self):
model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device)
model.config.max_length = 8
model.config.num_beams = 1
model.config.do_sample = False
tokenizer = T5Tokenizer.from_pretrained("t5-small")
input_ids = tokenizer("summarize: Hello there", return_tensors="pt").input_ids.to(torch_device)
sequences = model.generate(input_ids)
output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0]
self.assertTrue(output_str == "Hello there!")
@slow
def test_small_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("t5-small").to(torch_device)
tokenizer = T5Tokenizer.from_pretrained("t5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -19.0845
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_v1_1_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.7.1
>>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary
>>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>'
>>> path_to_mtf_small_spm_model_path = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None)
>>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100)
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device)
tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -59.0293
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_small_byt5_integration_test(self):
"""
For comparision run:
>>> import t5 # pip install t5==0.9.1
>>> path_to_byt5_small_checkpoint = '<fill_in>'
>>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None)
>>> vocab = t5.data.ByteVocabulary()
>>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab)
"""
model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device)
tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small")
input_ids = tokenizer("Hello there", return_tensors="pt").input_ids
labels = tokenizer("Hi I am", return_tensors="pt").input_ids
loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss
mtf_score = -(labels.shape[-1] * loss.item())
EXPECTED_SCORE = -60.7397
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
@slow
def test_summarization(self):
model = self.model
tok = self.tokenizer
FRANCE_ARTICLE = ( # @noqa
"Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings"
" Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane."
' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."'
' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s'
" comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video"
" showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French"
" Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a"
" phone at the wreckage site. The two publications described the supposed video, but did not post it on"
" their websites. The publications said that they watched the video, which was found by a source close to"
" the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported."
' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the'
" cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the"
' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,'
" editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said"
" the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman"
" in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the"
' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,'
' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be'
" sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by"
" specialized technicians working hand-in-hand with investigators. But none of the cell phones found so"
" far have been sent to the institute, Menichini said. Asked whether staff involved in the search could"
' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin'
' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match'
' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered'
' cell phones from the crash site after Bild and Paris Match published their reports. "That is something'
" we did not know before. ... Overall we can say many things of the investigation weren't revealed by the"
' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline'
" Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the"
" controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the"
' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of'
' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school'
" discovered in an internal investigation, Lufthansa said, included medical documents he submitted in"
" connection with resuming his flight training. The announcement indicates that Lufthansa, the parent"
" company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and"
" ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%"
' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was'
" sharing the information and documents -- including training and medical records -- with public"
" prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the"
" past week to recover human remains and plane debris scattered across a steep mountainside. He saw the"
" crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash"
" site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late"
" Tuesday that no visible human remains were left at the site but recovery teams would keep searching."
" French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all"
" the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested."
" In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said."
" Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew"
" on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with"
" the flight school during his training were among several developments as investigators continued to"
" delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa"
" spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his"
' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in'
" Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at"
" some point before his aviation career and underwent psychotherapy before he got his pilot's license."
" Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the"
" crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to"
" lose his pilot's license, a European government official briefed on the investigation told CNN on"
' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being'
" considered. Another source, a law enforcement official briefed on the investigation, also told CNN that"
" authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would"
" not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had"
" seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded"
" he had psychological issues, the European government official said. But no matter what details emerge"
" about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic"
' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact'
" that maybe they weren't going to keep doing their job and they're upset about that and so they're"
' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to'
" also take that rage and turn it outward on 149 other people who had nothing to do with the person's"
' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight'
" 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura"
" Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine"
" Amiel and Anna-Maja Rappard contributed to this report."
)
SHORTER_ARTICLE = (
"(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on"
" Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The"
" formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based."
" The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its"
' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East'
' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the'
" situation in Palestinian territories, paving the way for possible war crimes investigations against"
" Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and"
" the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the"
" body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a"
' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the'
' world is also a step closer to ending a long era of impunity and injustice," he said, according to an'
' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge'
" Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the"
' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine'
" acquires all the rights as well as responsibilities that come with being a State Party to the Statute."
' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights'
' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should'
" immediately end their pressure, and countries that support universal acceptance of the court's treaty"
' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the'
" group. \"What's objectionable is the attempts to undermine international justice, not Palestine's"
' decision to join a treaty to which over 100 countries around the world are members." In January, when'
" the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an"
' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"'
" disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a"
' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in'
' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We'
' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"'
" it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the"
' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the'
" court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou"
' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war'
" between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry"
" will include alleged war crimes committed since June. The International Criminal Court was set up in"
" 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder"
" and Faith Karimi contributed to this report."
)
IRAN_ARTICLE = (
"(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran"
" in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively"
" block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger."
" Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli"
" Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a"
" letter to the Iranian leadership warning them away from a deal. The debate that has already begun since"
" the announcement of the new framework will likely result in more heat than light. It will not be helped"
" by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ."
" The most misleading assertion, despite universal rejection by experts, is that the negotiations'"
" objective at the outset was the total elimination of any nuclear program in Iran. That is the position"
" of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it"
" had been, there would have been no Iranian team at the negotiating table. Rather, the objective has"
" always been to structure an agreement or series of agreements so that Iran could not covertly develop a"
" nuclear arsenal before the United States and its allies could respond. The new framework has exceeded"
" expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by"
" two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another"
" dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite"
" sharp accusations by some in the United States and its allies, Iran denies having such a program, and"
" U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's"
" continued cooperation with International Atomic Energy Agency inspections is further evidence on this"
" point, and we'll know even more about Iran's program in the coming months and years because of the deal."
" In fact, the inspections provisions that are part of this agreement are designed to protect against any"
" covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that"
" the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter"
" warning that a deal might be killed by Congress or a future president). This of course is not the case."
" The talks were between Iran and the five permanent members of the U.N. Security Council (United States,"
" United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has"
" played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement"
" reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran"
" and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement"
" contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the"
" case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased"
" or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes"
" Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear"
" sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going"
" forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such"
" a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the"
' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not'
" suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New"
" START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement"
" with Iran will not be so balanced. The restrictions and obligations in the final framework agreement"
" will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove"
" most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally"
" some insist that any agreement must address Iranian missile programs, human rights violations or support"
" for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are"
" unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in"
" the negotiations would be a poison pill. This agreement should be judged on its merits and on how it"
" affects the security of our negotiating partners and allies, including Israel. Those judgments should be"
" fact-based, not based on questionable assertions or dubious assumptions."
)
ARTICLE_SUBWAY = (
"New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A"
" year later, she got married again in Westchester County, but to a different man and without divorcing"
" her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos"
' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married'
" once more, this time in the Bronx. In an application for a marriage license, she stated it was her"
' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false'
' instrument for filing in the first degree," referring to her false statements on the 2010 marriage'
" license application, according to court documents. Prosecutors said the marriages were part of an"
" immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to"
" her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was"
" arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New"
" York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total,"
" Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All"
" occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be"
" married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors"
" said the immigration scam involved some of her husbands, who filed for permanent residence status"
" shortly after the marriages. Any divorces happened only after such filings were approved. It was"
" unclear whether any of the men will be prosecuted. The case was referred to the Bronx District"
" Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's"
' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,'
" Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his"
" native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces"
" up to four years in prison. Her next court appearance is scheduled for May 18."
)
expected_summaries = [
'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a'
" cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one"
" magazine says .",
"the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a"
" preliminary examination into the situation in the occupied Palestinian territory . as members of the"
" court, Palestinians may be subject to counter-charges as well .",
"the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:"
" the debate that has already begun since the announcement of the new framework will likely result in more"
" heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and"
" implement a rigorous inspection regime .",
"prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two"
' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10'
" times, with nine of her marriages occurring between 1999 and 2002 .",
]
use_task_specific_params(model, "summarization")
dct = tok(
[model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]],
padding="max_length",
truncation=True,
return_tensors="pt",
).to(torch_device)
self.assertEqual(512, dct["input_ids"].shape[1])
hypotheses_batch = model.generate(
**dct,
num_beams=4,
length_penalty=2.0,
max_length=142,
min_length=56,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertListEqual(
expected_summaries,
decoded,
)
@slow
def test_translation_en_to_de(self):
model = self.model
tok = self.tokenizer
use_task_specific_params(model, "translation_en_to_de")
en_text = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.'
expected_translation = (
'"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.'
)
input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt")
input_ids = input_ids.to(torch_device)
output = model.generate(input_ids)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
@slow
def test_translation_en_to_fr(self):
model = self.model # t5-base
tok = self.tokenizer
use_task_specific_params(model, "translation_en_to_fr")
en_text = (
' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of'
" countless generations of stars: the oldest stars are seen as blue dots. "
)
input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt")
input_ids = input_ids.to(torch_device)
output = model.generate(
input_ids=input_ids,
num_beams=4,
length_penalty=2.0,
max_length=100,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
new_truncated_translation = (
"Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre "
"un "
"« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées "
"sous forme "
"de points bleus."
)
self.assertEqual(translation, new_truncated_translation)
@slow
def test_translation_en_to_ro(self):
model = self.model
tok = self.tokenizer
use_task_specific_params(model, "translation_en_to_ro")
en_text = "Taco Bell said it plans to add 2,000 locations in the US by 2022."
expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022."
inputs = tok(model.config.prefix + en_text, return_tensors="pt").to(torch_device)
output = model.generate(**inputs)
translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(translation, expected_translation)
@require_torch
class TestAsymmetricT5(unittest.TestCase):
def build_model_and_check_forward_pass(self, **kwargs):
tester = T5ModelTester(self, **kwargs)
config, *inputs = tester.prepare_config_and_inputs()
(
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = inputs
model = T5ForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
)
# outputs = model(*inputs)
assert len(outputs) == 4
assert outputs["logits"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size)
assert outputs["loss"].size() == ()
return model
def test_small_decoder(self):
# num_hidden_layers is passed to T5Config as num_layers
model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2)
assert len(model.encoder.block) == 2
assert len(model.decoder.block) == 1
def test_defaulting_to_symmetry(self):
# num_hidden_layers is passed to T5Config as num_layers
model = self.build_model_and_check_forward_pass(num_hidden_layers=2)
assert len(model.decoder.block) == len(model.encoder.block) == 2
| [
"torch.zeros",
"torch.cat",
"torch.isnan",
"torch.ones",
"torch.manual_seed",
"torch.all",
"torch.onnx.export",
"torch.allclose"
] | 1.0 | bugface/transformers | ba286fe7d51db12ad663effac83bed8199dd7141 |
0.3 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Tuple
import torch
import torch.distributions as dist
from beanmachine.ppl.inference.proposer.single_site_ancestral_proposer import (
SingleSiteAncestralProposer,
)
from beanmachine.ppl.legacy.inference.proposer.newtonian_monte_carlo_utils import (
is_valid,
hessian_of_log_prob,
)
from beanmachine.ppl.model.rv_identifier import RVIdentifier
from beanmachine.ppl.utils import tensorops
from beanmachine.ppl.world import World
LOGGER = logging.getLogger("beanmachine")
class SingleSiteHalfSpaceNMCProposer(SingleSiteAncestralProposer):
"""
Single-Site Half Space Newtonian Monte Carlo Proposers.
See sec. 3.2 of [1]
[1] Arora, Nim, et al. `Newtonian Monte Carlo: single-site MCMC meets second-order gradient methods`
"""
def __init__(self, node: RVIdentifier):
super().__init__(node)
self._proposal_distribution = None
def compute_alpha_beta(
self, world: World
) -> Tuple[bool, torch.Tensor, torch.Tensor]:
"""
Computes alpha and beta of the Gamma proposal given the node.
alpha = 1 - hessian_diag * x^2
beta = -1 * x * hessian_diag - first_grad
"""
node_val = world[self.node]
first_gradient, hessian_diag = hessian_of_log_prob(
world, self.node, node_val, tensorops.halfspace_gradients
)
if not is_valid(first_gradient) or not is_valid(hessian_diag):
LOGGER.warning(
"Gradient or Hessian is invalid at node {n}.\n".format(n=str(self.node))
)
return False, torch.tensor(0.0), torch.tensor(0.0)
node_val_reshaped = node_val.reshape(-1)
predicted_alpha = (
1 - hessian_diag * (node_val_reshaped * node_val_reshaped)
).t()
predicted_beta = -1 * node_val_reshaped * hessian_diag - first_gradient
condition = (predicted_alpha > 0) & (predicted_beta > 0)
predicted_alpha = torch.where(
condition, predicted_alpha, torch.tensor(1.0).to(dtype=predicted_beta.dtype)
)
node_var = world.get_variable(self.node)
mean = (
node_var.distribution.mean.reshape(-1)
if is_valid(node_var.distribution.mean)
else torch.ones_like(predicted_beta)
)
predicted_beta = torch.where(condition, predicted_beta, mean)
predicted_alpha = predicted_alpha.reshape(node_val.shape)
predicted_beta = predicted_beta.reshape(node_val.shape)
return True, predicted_alpha, predicted_beta
def get_proposal_distribution(self, world: World) -> dist.Distribution:
"""
Returns the proposal distribution of the node.
Args:
world: the world in which we're proposing a new value for node.
Returns:
The proposal distribution.
"""
# if the number of variables in the world is 1 and proposal distribution
# has already been computed, we can use the old proposal distribution
# and skip re-computing the gradient, since there are no other variable
# in the world that may change the gradient and the old one is still
# correct.
if self._proposal_distribution is not None and len(world.latent_nodes) == 1:
return self._proposal_distribution
is_valid, alpha, beta = self.compute_alpha_beta(world)
if not is_valid:
LOGGER.warning(
"Node {n} has invalid proposal solution. ".format(n=self.node)
+ "Proposer falls back to SingleSiteAncestralProposer.\n"
)
return super().get_proposal_distribution(world)
self._proposal_distribution = dist.Gamma(alpha, beta)
return self._proposal_distribution
| [
"torch.distributions.Gamma",
"torch.tensor",
"torch.ones_like",
"torch.where"
] | 0.3 | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 |
0.3 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import beanmachine.ppl as bm
from beanmachine.ppl.inference import BMGInference
from torch import tensor
from torch.distributions import Bernoulli, Beta, Normal, Uniform, HalfCauchy, StudentT
@bm.random_variable
def beta(n):
return Beta(2.0, 2.0)
@bm.random_variable
def flip_beta():
return Bernoulli(tensor([beta(0), beta(1)]))
@bm.random_variable
def beta_2_2():
return Beta(2.0, tensor([3.0, 4.0]))
@bm.random_variable
def flip_beta_2_2():
return Bernoulli(beta_2_2())
@bm.random_variable
def uniform_2_2():
return Uniform(0.0, tensor([1.0, 1.0]))
@bm.random_variable
def flip_uniform_2_2():
return Bernoulli(uniform_2_2())
@bm.random_variable
def flip_logits():
return Bernoulli(logits=tensor([beta(0), beta(1)]))
@bm.random_variable
def flip_const():
return Bernoulli(tensor([0.25, 0.75]))
@bm.random_variable
def flip_const_4():
return Bernoulli(tensor([0.25, 0.75, 0.5, 0.5]))
@bm.random_variable
def flip_const_2_3():
return Bernoulli(tensor([[0.25, 0.75, 0.5], [0.125, 0.875, 0.625]]))
@bm.random_variable
def normal_2_3():
mus = flip_const_2_3() # 2 x 3 tensor of 0 or 1
sigmas = tensor([2.0, 3.0, 4.0])
return Normal(mus, sigmas)
@bm.random_variable
def hc_3():
return HalfCauchy(tensor([1.0, 2.0, 3.0]))
@bm.random_variable
def studentt_2_3():
return StudentT(hc_3(), normal_2_3(), hc_3())
@bm.functional
def operators():
# Note that we do NOT devectorize the multiplication; it gets
# turned into a MatrixScale.
return ((beta_2_2() + tensor([[5.0, 6.0], [7.0, 8.0]])) * 10.0).exp()
class FixVectorizedModelsTest(unittest.TestCase):
def test_fix_vectorized_models_1(self) -> None:
self.maxDiff = None
observations = {flip_beta(): tensor([0.0, 1.0])}
queries = [flip_beta(), flip_const()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=Tensor];
N05[label=Bernoulli];
N06[label=Sample];
N07[label="Observation tensor([0., 1.])"];
N08[label=Query];
N09[label="[0.25,0.75]"];
N10[label=Bernoulli];
N11[label=Sample];
N12[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N04;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N06 -> N08;
N09 -> N10;
N10 -> N11;
N11 -> N12;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=2];
N05[label=1];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=Bernoulli];
N09[label=Sample];
N10[label=ToMatrix];
N11[label=Query];
N12[label=0.25];
N13[label=Bernoulli];
N14[label=Sample];
N15[label=0.75];
N16[label=Bernoulli];
N17[label=Sample];
N18[label=ToMatrix];
N19[label=Query];
N20[label="Observation False"];
N21[label="Observation True"];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N06;
N03 -> N08;
N04 -> N10;
N04 -> N18;
N05 -> N10;
N05 -> N18;
N06 -> N07;
N07 -> N10;
N07 -> N20;
N08 -> N09;
N09 -> N10;
N09 -> N21;
N10 -> N11;
N12 -> N13;
N13 -> N14;
N14 -> N18;
N15 -> N16;
N16 -> N17;
N17 -> N18;
N18 -> N19;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_2(self) -> None:
self.maxDiff = None
observations = {flip_const_4(): tensor([0.0, 1.0, 0.0, 1.0])}
queries = [flip_const_4()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[0.25,0.75,0.5,0.5]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([0., 1., 0., 1.])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
# Note that due to the order in which we do the rewriting we
# end up with a not-deduplicated Bernoulli(0.5) node here, which
# is slightly unfortunate but probably not worth fixing right now.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=4];
N01[label=1];
N02[label=0.25];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.75];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=0.5];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=ToMatrix];
N14[label=Query];
N15[label="Observation False"];
N16[label="Observation True"];
N17[label="Observation False"];
N18[label="Observation True"];
N00 -> N13;
N01 -> N13;
N02 -> N03;
N03 -> N04;
N04 -> N13;
N04 -> N15;
N05 -> N06;
N06 -> N07;
N07 -> N13;
N07 -> N16;
N08 -> N09;
N08 -> N11;
N09 -> N10;
N10 -> N13;
N10 -> N17;
N11 -> N12;
N12 -> N13;
N12 -> N18;
N13 -> N14;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_3(self) -> None:
self.maxDiff = None
observations = {flip_const_2_3(): tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]])}
queries = [flip_const_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N1[label=Bernoulli];
N2[label=Sample];
N3[label="Observation tensor([[0., 0., 0.],\\n [1., 1., 1.]])"];
N4[label=Query];
N0 -> N1;
N1 -> N2;
N2 -> N3;
N2 -> N4;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=3];
N01[label=2];
N02[label=0.25];
N03[label=Bernoulli];
N04[label=Sample];
N05[label=0.75];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=0.5];
N09[label=Bernoulli];
N10[label=Sample];
N11[label=0.125];
N12[label=Bernoulli];
N13[label=Sample];
N14[label=0.875];
N15[label=Bernoulli];
N16[label=Sample];
N17[label=0.625];
N18[label=Bernoulli];
N19[label=Sample];
N20[label=ToMatrix];
N21[label=Query];
N22[label="Observation False"];
N23[label="Observation False"];
N24[label="Observation False"];
N25[label="Observation True"];
N26[label="Observation True"];
N27[label="Observation True"];
N00 -> N20;
N01 -> N20;
N02 -> N03;
N03 -> N04;
N04 -> N20;
N04 -> N22;
N05 -> N06;
N06 -> N07;
N07 -> N20;
N07 -> N23;
N08 -> N09;
N09 -> N10;
N10 -> N20;
N10 -> N24;
N11 -> N12;
N12 -> N13;
N13 -> N20;
N13 -> N25;
N14 -> N15;
N15 -> N16;
N16 -> N20;
N16 -> N26;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N19 -> N27;
N20 -> N21;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_4(self) -> None:
# Demonstrate we can also do devectorizations on logits-style Bernoullis.
# (A logits Bernoulli with a beta prior is a likely mistake in a real model,
# but it is a convenient test case.)
self.maxDiff = None
observations = {}
queries = [flip_logits()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label=2.0];
N1[label=Beta];
N2[label=Sample];
N3[label=Sample];
N4[label=Tensor];
N5[label="Bernoulli(logits)"];
N6[label=Sample];
N7[label=Query];
N0 -> N1;
N0 -> N1;
N1 -> N2;
N1 -> N3;
N2 -> N4;
N3 -> N4;
N4 -> N5;
N5 -> N6;
N6 -> N7;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2.0];
N01[label=Beta];
N02[label=Sample];
N03[label=Sample];
N04[label=2];
N05[label=1];
N06[label=ToReal];
N07[label="Bernoulli(logits)"];
N08[label=Sample];
N09[label=ToReal];
N10[label="Bernoulli(logits)"];
N11[label=Sample];
N12[label=ToMatrix];
N13[label=Query];
N00 -> N01;
N00 -> N01;
N01 -> N02;
N01 -> N03;
N02 -> N06;
N03 -> N09;
N04 -> N12;
N05 -> N12;
N06 -> N07;
N07 -> N08;
N08 -> N12;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_5(self) -> None:
self.maxDiff = None
observations = {}
queries = [studentt_2_3()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite. Note that we have a size[3] stochastic input and
# a size[2, 3] stochastic input to the StudentT, and we broadcast the three
# HalfCauchy samples correctly
expected = """
digraph "graph" {
N00[label="[1.0,2.0,3.0]"];
N01[label=HalfCauchy];
N02[label=Sample];
N03[label="[[0.25,0.75,0.5],\\\\n[0.125,0.875,0.625]]"];
N04[label=Bernoulli];
N05[label=Sample];
N06[label="[2.0,3.0,4.0]"];
N07[label=Normal];
N08[label=Sample];
N09[label=StudentT];
N10[label=Sample];
N11[label=Query];
N00 -> N01;
N01 -> N02;
N02 -> N09;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N07;
N06 -> N07;
N07 -> N08;
N08 -> N09;
N09 -> N10;
N10 -> N11;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=3];
N01[label=2];
N02[label=1.0];
N03[label=HalfCauchy];
N04[label=Sample];
N05[label=0.25];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=ToReal];
N09[label=2.0];
N10[label=Normal];
N11[label=Sample];
N12[label=StudentT];
N13[label=Sample];
N14[label=HalfCauchy];
N15[label=Sample];
N16[label=0.75];
N17[label=Bernoulli];
N18[label=Sample];
N19[label=ToReal];
N20[label=3.0];
N21[label=Normal];
N22[label=Sample];
N23[label=StudentT];
N24[label=Sample];
N25[label=HalfCauchy];
N26[label=Sample];
N27[label=0.5];
N28[label=Bernoulli];
N29[label=Sample];
N30[label=ToReal];
N31[label=4.0];
N32[label=Normal];
N33[label=Sample];
N34[label=StudentT];
N35[label=Sample];
N36[label=0.125];
N37[label=Bernoulli];
N38[label=Sample];
N39[label=ToReal];
N40[label=Normal];
N41[label=Sample];
N42[label=StudentT];
N43[label=Sample];
N44[label=0.875];
N45[label=Bernoulli];
N46[label=Sample];
N47[label=ToReal];
N48[label=Normal];
N49[label=Sample];
N50[label=StudentT];
N51[label=Sample];
N52[label=0.625];
N53[label=Bernoulli];
N54[label=Sample];
N55[label=ToReal];
N56[label=Normal];
N57[label=Sample];
N58[label=StudentT];
N59[label=Sample];
N60[label=ToMatrix];
N61[label=Query];
N00 -> N60;
N01 -> N60;
N02 -> N03;
N03 -> N04;
N04 -> N12;
N04 -> N12;
N04 -> N42;
N04 -> N42;
N05 -> N06;
N06 -> N07;
N07 -> N08;
N08 -> N10;
N09 -> N10;
N09 -> N14;
N09 -> N40;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N60;
N14 -> N15;
N15 -> N23;
N15 -> N23;
N15 -> N50;
N15 -> N50;
N16 -> N17;
N17 -> N18;
N18 -> N19;
N19 -> N21;
N20 -> N21;
N20 -> N25;
N20 -> N48;
N21 -> N22;
N22 -> N23;
N23 -> N24;
N24 -> N60;
N25 -> N26;
N26 -> N34;
N26 -> N34;
N26 -> N58;
N26 -> N58;
N27 -> N28;
N28 -> N29;
N29 -> N30;
N30 -> N32;
N31 -> N32;
N31 -> N56;
N32 -> N33;
N33 -> N34;
N34 -> N35;
N35 -> N60;
N36 -> N37;
N37 -> N38;
N38 -> N39;
N39 -> N40;
N40 -> N41;
N41 -> N42;
N42 -> N43;
N43 -> N60;
N44 -> N45;
N45 -> N46;
N46 -> N47;
N47 -> N48;
N48 -> N49;
N49 -> N50;
N50 -> N51;
N51 -> N60;
N52 -> N53;
N53 -> N54;
N54 -> N55;
N55 -> N56;
N56 -> N57;
N57 -> N58;
N58 -> N59;
N59 -> N60;
N60 -> N61;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_6(self) -> None:
self.maxDiff = None
observations = {}
queries = [flip_beta_2_2(), flip_uniform_2_2()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite: notice that here torch automatically
# broadcast the 2.0 to [2.0, 2.0] for us when the node was accumulated,
# and similarly for 0.0.
expected = """
digraph "graph" {
N00[label="[2.0,2.0]"];
N01[label="[3.0,4.0]"];
N02[label=Beta];
N03[label=Sample];
N04[label=Bernoulli];
N05[label=Sample];
N06[label=Query];
N07[label="[0.0,0.0]"];
N08[label="[1.0,1.0]"];
N09[label=Uniform];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=Query];
N00 -> N02;
N01 -> N02;
N02 -> N03;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N07 -> N09;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After: notice that we correctly generate two samples from a Flat distribution
# here.
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2];
N01[label=1];
N02[label=2.0];
N03[label=3.0];
N04[label=Beta];
N05[label=Sample];
N06[label=Bernoulli];
N07[label=Sample];
N08[label=4.0];
N09[label=Beta];
N10[label=Sample];
N11[label=Bernoulli];
N12[label=Sample];
N13[label=ToMatrix];
N14[label=Query];
N15[label=Flat];
N16[label=Sample];
N17[label=Bernoulli];
N18[label=Sample];
N19[label=Sample];
N20[label=Bernoulli];
N21[label=Sample];
N22[label=ToMatrix];
N23[label=Query];
N00 -> N13;
N00 -> N22;
N01 -> N13;
N01 -> N22;
N02 -> N04;
N02 -> N09;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N07;
N07 -> N13;
N08 -> N09;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N13;
N13 -> N14;
N15 -> N16;
N15 -> N19;
N16 -> N17;
N17 -> N18;
N18 -> N22;
N19 -> N20;
N20 -> N21;
N21 -> N22;
N22 -> N23;
}
"""
self.assertEqual(expected.strip(), observed.strip())
def test_fix_vectorized_models_7(self) -> None:
self.maxDiff = None
observations = {}
queries = [operators()]
observed = BMGInference().to_dot(queries, observations, after_transform=False)
# The model before the rewrite:
expected = """
digraph "graph" {
N0[label="[2.0,2.0]"];
N1[label="[3.0,4.0]"];
N2[label=Beta];
N3[label=Sample];
N4[label="[[5.0,6.0],\\\\n[7.0,8.0]]"];
N5[label="+"];
N6[label=10.0];
N7[label="*"];
N8[label=Exp];
N9[label=Query];
N0 -> N2;
N1 -> N2;
N2 -> N3;
N3 -> N5;
N4 -> N5;
N5 -> N7;
N6 -> N7;
N7 -> N8;
N8 -> N9;
}
"""
self.assertEqual(expected.strip(), observed.strip())
# After:
observed = BMGInference().to_dot(queries, observations, after_transform=True)
expected = """
digraph "graph" {
N00[label=2];
N01[label=10.0];
N02[label=2.0];
N03[label=3.0];
N04[label=Beta];
N05[label=Sample];
N06[label=ToPosReal];
N07[label=5.0];
N08[label="+"];
N09[label=4.0];
N10[label=Beta];
N11[label=Sample];
N12[label=ToPosReal];
N13[label=6.0];
N14[label="+"];
N15[label=7.0];
N16[label="+"];
N17[label=8.0];
N18[label="+"];
N19[label=ToMatrix];
N20[label=MatrixScale];
N21[label=0];
N22[label=ColumnIndex];
N23[label=index];
N24[label=Exp];
N25[label=1];
N26[label=index];
N27[label=Exp];
N28[label=ColumnIndex];
N29[label=index];
N30[label=Exp];
N31[label=index];
N32[label=Exp];
N33[label=ToMatrix];
N34[label=Query];
N00 -> N19;
N00 -> N19;
N00 -> N33;
N00 -> N33;
N01 -> N20;
N02 -> N04;
N02 -> N10;
N03 -> N04;
N04 -> N05;
N05 -> N06;
N06 -> N08;
N06 -> N16;
N07 -> N08;
N08 -> N19;
N09 -> N10;
N10 -> N11;
N11 -> N12;
N12 -> N14;
N12 -> N18;
N13 -> N14;
N14 -> N19;
N15 -> N16;
N16 -> N19;
N17 -> N18;
N18 -> N19;
N19 -> N20;
N20 -> N22;
N20 -> N28;
N21 -> N22;
N21 -> N23;
N21 -> N29;
N22 -> N23;
N22 -> N26;
N23 -> N24;
N24 -> N33;
N25 -> N26;
N25 -> N28;
N25 -> N31;
N26 -> N27;
N27 -> N33;
N28 -> N29;
N28 -> N31;
N29 -> N30;
N30 -> N33;
N31 -> N32;
N32 -> N33;
N33 -> N34;
}
"""
self.assertEqual(expected.strip(), observed.strip())
| [
"torch.distributions.Normal",
"torch.tensor",
"torch.distributions.Beta"
] | 0.3 | horizon-blue/beanmachine-1 | b13e4e3e28ffb860947eb8046863b0cabb581222 |
1.6 | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparser configuration"""
import argparse
import os
import torch
import deepspeed
def add_model_config_args(parser):
"""Model arguments"""
group = parser.add_argument_group('model', 'model configuration')
group.add_argument('--pretrained-bert', action='store_true',
help='use a pretrained bert-large-uncased model instead'
'of initializing from scratch. See '
'--tokenizer-model-type to specify which pretrained '
'BERT model to use')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='dropout probability for attention weights')
group.add_argument('--num-attention-heads', type=int, default=16,
help='num of transformer attention heads')
group.add_argument('--hidden-size', type=int, default=1024,
help='tansformer hidden size')
group.add_argument('--intermediate-size', type=int, default=None,
help='transformer embedding dimension for FFN'
'set to 4*`--hidden-size` if it is None')
group.add_argument('--num-layers', type=int, default=24,
help='num decoder layers')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='layer norm epsilon')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='dropout probability for hidden state transformer')
group.add_argument('--max-position-embeddings', type=int, default=512,
help='maximum number of position embeddings to use')
group.add_argument('--vocab-size', type=int, default=30522,
help='vocab size to use for non-character-level '
'tokenization. This value will only be used when '
'creating a tokenizer')
group.add_argument('--deep-init', action='store_true',
help='initialize bert model similar to gpt2 model.'
'scales initialization of projection layers by a '
'factor of 1/sqrt(2N). Necessary to train bert '
'models larger than BERT-Large.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
return parser
def add_fp16_config_args(parser):
"""Mixed precision arguments."""
group = parser.add_argument_group('fp16', 'fp16 configurations')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode')
group.add_argument('--fp32-embedding', action='store_true',
help='embedding in fp32')
group.add_argument('--fp32-layernorm', action='store_true',
help='layer norm in fp32')
group.add_argument('--fp32-tokentypes', action='store_true',
help='embedding token types in fp32')
group.add_argument('--fp32-allreduce', action='store_true',
help='all-reduce in fp32')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale')
group.add_argument('--min-scale', type=float, default=1,
help='Minimum loss scale for dynamic loss scale')
return parser
def add_training_args(parser):
"""Training arguments."""
group = parser.add_argument_group('train', 'training configurations')
group.add_argument('--do_train', action='store_true',
help="Do training")
group.add_argument('--do_eval', action='store_true',
help="Do evaluation")
group.add_argument('--zero_shot', action="store_true",
help="do zero-shot")
group.add_argument('--batch-size', type=int, default=4,
help='Data Loader batch size')
group.add_argument('--weight-decay', type=float, default=0.01,
help='weight decay coefficient for L2 regularization')
group.add_argument('--checkpoint-activations', action='store_true',
help='checkpoint activation to allow for training '
'with larger models and sequences')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--clip-grad', type=float, default=1.0,
help='gradient clipping')
group.add_argument('--epoch', type=int, default=10,
help='total number of iterations to train over all training runs')
group.add_argument('--log-interval', type=int, default=100,
help='report interval')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after this many new iterations.')
group.add_argument('--seed', type=int, default=1234,
help='random seed')
# Batch prodecuer arguments
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token.')
# Learning rate.
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay LR over,'
' If None defaults to `--train-iters`*`--epochs`')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine', 'exponential'],
help='learning rate decay function')
group.add_argument('--lr', type=float, default=1.0e-4,
help='initial learning rate')
group.add_argument('--warmup', type=float, default=0.01,
help='percentage of data to warmup on (.01 = 1% of all '
'training iters). Default 0.01')
# model checkpointing
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=5000,
help='number of iterations between saves')
group.add_argument('--no-save-optim', action='store_true',
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true',
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Path to a directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true',
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true',
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
# distributed training args
group.add_argument('--distributed-backend', default='nccl',
help='which backend to use for distributed '
'training. One of [gloo, nccl]')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--results_dir', type=str, default=None,
help='The dir to save the model.')
group.add_argument('--model_name', type=str, default="test",
help="The name you give to the model.")
# eval
group.add_argument('--eval_ckpt_path', type=str, default=None,
help='The checkpoint path used for evaluation')
return parser
def add_evaluation_args(parser):
"""Evaluation arguments."""
group = parser.add_argument_group('validation', 'validation configurations')
group.add_argument('--eval-batch-size', type=int, default=None,
help='Data Loader batch size for evaluation datasets.'
'Defaults to `--batch-size`')
group.add_argument('--eval-iters', type=int, default=100,
help='number of iterations to run for evaluation'
'validation/test for')
group.add_argument('--eval-interval', type=int, default=1000,
help='interval between running evaluation on validation set')
group.add_argument('--eval-seq-length', type=int, default=None,
help='Maximum sequence length to process for '
'evaluation. Defaults to `--seq-length`')
group.add_argument('--eval-max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use for '
'evaluation. Defaults to '
'math.ceil(`--eval-seq-length`*.15/10)*10')
group.add_argument('--overlapping-eval', type=int, default=32,
help='sliding window for overlapping eval ')
group.add_argument('--cloze-eval', action='store_true',
help='Evaluation dataset from `--valid-data` is a cloze task')
group.add_argument('--eval-hf', action='store_true',
help='perform evaluation with huggingface openai model.'
'use `--load` to specify weights path to be loaded')
group.add_argument('--load-openai', action='store_true',
help='load openai weights into our model. Use `--load` '
'to specify weights path to be loaded')
return parser
def add_text_generate_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('Text generation', 'configurations')
group.add_argument("--temperature", type=float, default=1.0)
group.add_argument("--top_p", type=float, default=0.0)
group.add_argument("--top_k", type=int, default=0)
group.add_argument("--out-seq-length", type=int, default=256)
return parser
def add_data_args(parser):
"""Train/valid/test data arguments."""
group = parser.add_argument_group('data', 'data configurations')
group.add_argument('--data_dir', type=str, required=True,
help="Training data dir")
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--model-parallel-size', type=int, default=1,
help='size of the model parallel.')
group.add_argument('--shuffle', action='store_true',
help='Shuffle data. Shuffling is deterministic '
'based on seed and current epoch.')
group.add_argument('--use-npy-data-loader', action='store_true',
help='Use the numpy data loader. If set, then'
'train-data-path, val-data-path, and test-data-path'
'should also be provided.')
group.add_argument('--num-workers', type=int, default=2,
help="""Number of workers to use for dataloading""")
group.add_argument('--tokenizer-model-type', type=str,
default='bert-large-uncased',
help="Model type to use for sentencepiece tokenization \
(one of ['bpe', 'char', 'unigram', 'word']) or \
bert vocab to use for BertWordPieceTokenizer (one of \
['bert-large-uncased', 'bert-large-cased', etc.])")
group.add_argument('--tokenizer-path', type=str, default='tokenizer.model',
help='path used to save/load sentencepiece tokenization '
'models')
group.add_argument('--tokenizer-type', type=str,
default='BertWordPieceTokenizer',
choices=['CharacterLevelTokenizer',
'SentencePieceTokenizer',
'BertWordPieceTokenizer',
'GPT2BPETokenizer'],
help='what type of tokenizer to use')
group.add_argument("--cache-dir", default=None, type=str,
help="Where to store pre-trained BERT downloads")
group.add_argument('--use-tfrecords', action='store_true',
help='load `--train-data`, `--valid-data`, '
'`--test-data` from BERT tf records instead of '
'normal data pipeline')
group.add_argument('--seq-length', type=int, default=512,
help="Maximum sequence length to process")
group.add_argument('--max-preds-per-seq', type=int, default=None,
help='Maximum number of predictions to use per sequence.'
'Defaults to math.ceil(`--seq-length`*.15/10)*10.'
'MUST BE SPECIFIED IF `--use-tfrecords` is True.')
return parser
def get_args():
"""Parse all the args."""
parser = argparse.ArgumentParser(description='PyTorch BERT Model')
parser = add_model_config_args(parser)
parser = add_fp16_config_args(parser)
parser = add_training_args(parser)
parser = add_evaluation_args(parser)
parser = add_text_generate_args(parser)
parser = add_data_args(parser)
# Include DeepSpeed configuration arguments
parser = deepspeed.add_config_arguments(parser)
args = parser.parse_args()
if not args.data_dir:
print('WARNING: No data specified')
args.cuda = torch.cuda.is_available()
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
if os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'):
# We are using (OpenMPI) mpirun for launching distributed data parallel processes
local_rank = int(os.getenv('OMPI_COMM_WORLD_LOCAL_RANK'))
local_size = int(os.getenv('OMPI_COMM_WORLD_LOCAL_SIZE'))
# Possibly running with Slurm
num_nodes = int(os.getenv('SLURM_JOB_NUM_NODES', '1'))
nodeid = int(os.getenv('SLURM_NODEID', '0'))
args.local_rank = local_rank
args.rank = nodeid*local_size + local_rank
args.world_size = num_nodes*local_size
args.model_parallel_size = min(args.model_parallel_size, args.world_size)
if args.rank == 0:
print('using world size: {} and model-parallel size: {} '.format(
args.world_size, args.model_parallel_size))
args.dynamic_loss_scale = False
if args.loss_scale is None:
args.dynamic_loss_scale = True
if args.rank == 0:
print(' > using dynamic loss scaling')
# The args fp32_* or fp16_* meant to be active when the
# args fp16 is set. So the default behaviour should all
# be false.
if not args.fp16:
args.fp32_embedding = False
args.fp32_tokentypes = False
args.fp32_layernorm = False
return args
| [
"torch.cuda.is_available"
] | 1.6.0 | wakafengfan/CPM-1-Finetune | b2c30bd94df31bcd6ee75ba90c347113563d4075 |
1.5 | """
This file handles the details of the loss function during training.
This includes: LossComputeBase and the standard NMTLossCompute, and
sharded loss compute stuff.
"""
from __future__ import division
from itertools import count
import torch
import torch.nn as nn
import random as rnd
import table
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CopyGeneratorLoss(nn.Module):
"""Copy generator criterion."""
def __init__(self, vocab_size, force_copy, only_disf_loss, unk_index=0,
ignore_index=-100, eps=1e-20):
super(CopyGeneratorLoss, self).__init__()
self.force_copy = force_copy
self.eps = eps
self.vocab_size = vocab_size
self.ignore_index = ignore_index
self.unk_index = unk_index
self.only_disf_loss=only_disf_loss
def forward(self, scores, tgt):
"""
Args:
scores (FloatTensor): ``(batch_size*tgt_len)`` x dynamic vocab size
whose sum along dim 1 is less than or equal to 1, i.e. cols
softmaxed.
tgt tuple (target, align)
align (LongTensor): ``(tgt_len, batch_size)``
target (LongTensor): ``(tgt_len, batch_size)``
tgt_loss_mask (LongTensor): ``(tgt_len, batch_size)``
"""
# probabilities assigned by the model to the gold targets
align=tgt[1]
target=tgt[0]
tgt_loss_mask=tgt[2]
#print(scores, target)
#print(scores.size(), target.size())
target = target.view(-1)
align = align.view(-1)
tgt_loss_mask = tgt_loss_mask.view(-1)
vocab_probs = scores.gather(1, target.unsqueeze(1)).squeeze(1)
# probability of tokens copied from source
copy_ix = align.unsqueeze(1) + self.vocab_size
copy_tok_probs = scores.gather(1, copy_ix).squeeze(1) # Set scores for unk to 0 and add eps
copy_tok_probs[align == self.unk_index] = 0
copy_tok_probs += self.eps # to avoid -inf logs
# find the indices in which you do not use the copy mechanism
non_copy = align == self.unk_index
if not self.force_copy:
non_copy = non_copy | (target != self.unk_index)
probs = torch.where(
non_copy, copy_tok_probs + vocab_probs, copy_tok_probs
)
loss = - probs.log() # just NLLLoss; can the module be incorporated?
# Drop padding.
if self.only_disf_loss:
loss[tgt_loss_mask == 1] = 0
else:
loss[tgt == self.ignore_index] = 0
'''if self.normalize_by_length:
# Compute Loss as NLL divided by seq length
tgt_lens = batch.tgt[:, :, 0].ne(self.padding_idx).sum(0).float()
# Compute Total Loss per sequence in batch
loss = loss.view(-1, batch.batch_size).sum(0)
# Divide by length of each sequence and sum
loss = torch.div(loss, tgt_lens).sum()
else:'''
loss = loss.sum()
return loss
class LossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(LossCompute, self).__init__()
self.criterion = {}
self.label_weights=torch.ones(len(fields['src_label'].vocab),dtype=torch.float,requires_grad=False,device=device)
self.label_weights[fields['src_label'].vocab.stoi[table.IO.BOD_LABEL]]=opt.disf_label_weight
self.label_weights[fields['src_label'].vocab.stoi[table.IO.UNK_WORD]] = 0
self.label_weights[fields['src_label'].vocab.stoi[table.IO.PAD_WORD]] = 0
self.criterion['lay'] = nn.NLLLoss( weight=self.label_weights,
reduction='sum', ignore_index=ignore_index)
if opt.no_attention:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
if opt.no_copy:
self.criterion['tgt'] = nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
else:
self.criterion['tgt'] = CopyGeneratorLoss(len(vocab),
opt.copy_attn_force, opt.only_disf_loss, unk_index=unk_index,
ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss_list = []
for loss_name in ('lay', 'tgt'):
if loss_name not in gold:
continue
'''print(loss_name)
print(pred[loss_name].size())
print(gold[loss_name].size())'''
loss = self.criterion[loss_name](pred[loss_name], gold[loss_name])
loss_list.append(loss)
# sum up the loss functions
return loss_list, self.label_weights[gold['lay']].sum()#sum(loss_list)
class SegLossCompute(nn.Module):
def __init__(self, vocab, opt, fields,unk_index=0,
ignore_index=-100,smooth_eps=0):
super(SegLossCompute, self).__init__()
self.criterion= nn.NLLLoss(
reduction='sum', ignore_index=ignore_index)
def compute_loss(self, pred, gold):
loss = self.criterion(pred, gold)
return loss
| [
"torch.nn.NLLLoss",
"torch.cuda.is_available",
"torch.where"
] | 1.5.0 | GT-SALT/Disfluency-Generation-and-Detection | 72126172b466aa74277f3cf0f73b915e5dbeefbb |
1.0 | import os
import shutil
import pickle
import traceback
import json
import logging
import math
import time
import psutil
from time import sleep
from copy import deepcopy
from multiprocess import Process, Manager, cpu_count
from multiprocess.queues import Queue
from multiprocess.synchronize import Lock
from typing import Union, List, Tuple, Optional, Dict, Iterable, Set
from itertools import islice, chain, repeat
from datetime import date
from tqdm.autonotebook import tqdm, trange
from spacy.tokens import Span, Doc, Token
from spacy.language import Language
from medcat import __version__
from medcat.preprocessing.tokenizers import spacy_split_all
from medcat.pipe import Pipe
from medcat.preprocessing.taggers import tag_skip_and_punct
from medcat.cdb import CDB
from medcat.utils.matutils import intersect_nonempty_set
from medcat.utils.data_utils import make_mc_train_test, get_false_positives
from medcat.utils.normalizers import BasicSpellChecker
from medcat.utils.checkpoint import Checkpoint, CheckpointConfig, CheckpointManager
from medcat.utils.helpers import tkns_from_doc, get_important_config_parameters
from medcat.utils.hasher import Hasher
from medcat.ner.vocab_based_ner import NER
from medcat.linking.context_based_linker import Linker
from medcat.utils.filters import get_project_filters, check_filters
from medcat.preprocessing.cleaners import prepare_name
from medcat.meta_cat import MetaCAT
from medcat.utils.meta_cat.data_utils import json_to_fake_spacy
from medcat.config import Config
from medcat.vocab import Vocab
from medcat.utils.decorators import deprecated
from medcat.ner.transformers_ner import TransformersNER
class CAT(object):
r"""
The main MedCAT class used to annotate documents, it is built on top of spaCy
and works as a spaCy pipline. Creates an instance of a spaCy pipline that can
be used as a spacy nlp model.
Args:
cdb (medcat.cdb.CDB):
The concept database that will be used for NER+L
config (medcat.config.Config):
Global configuration for medcat
vocab (medcat.vocab.Vocab, optional):
Vocabulary used for vector embeddings and spelling. Default: None
meta_cats (list of medcat.meta_cat.MetaCAT, optional):
A list of models that will be applied sequentially on each
detected annotation.
Attributes (limited):
cdb (medcat.cdb.CDB):
Concept database used with this CAT instance, please do not assign
this value directly.
config (medcat.config.Config):
The global configuration for medcat. Usually cdb.config will be used for this
field. WILL BE REMOVED - TEMPORARY PLACEHOLDER
vocab (medcat.utils.vocab.Vocab):
The vocabulary object used with this instance, please do not assign
this value directly.
Examples:
>>> cat = CAT(cdb, vocab)
>>> spacy_doc = cat("Put some text here")
>>> print(spacy_doc.ents) # Detected entites
"""
# Add file and console handlers
log = logging.getLogger(__package__)
DEFAULT_MODEL_PACK_NAME = "medcat_model_pack"
def __init__(self,
cdb: CDB,
vocab: Union[Vocab, None] = None,
config: Optional[Config] = None,
meta_cats: List[MetaCAT] = [],
addl_ner: Union[TransformersNER, List[TransformersNER]] = []) -> None:
self.cdb = cdb
self.vocab = vocab
if config is None:
# Take config from the cdb
self.config = cdb.config
else:
# Take the new config and assign it to the CDB also
self.config = config
self.cdb.config = config
self._meta_cats = meta_cats
self._addl_ner = addl_ner if isinstance(addl_ner, list) else [addl_ner]
self._create_pipeline(self.config)
def _create_pipeline(self, config):
# Set log level
self.log.setLevel(config.general['log_level'])
# Build the pipeline
self.pipe = Pipe(tokenizer=spacy_split_all, config=config)
self.pipe.add_tagger(tagger=tag_skip_and_punct,
name='skip_and_punct',
additional_fields=['is_punct'])
if self.vocab is not None:
spell_checker = BasicSpellChecker(cdb_vocab=self.cdb.vocab, config=config, data_vocab=self.vocab)
self.pipe.add_token_normalizer(spell_checker=spell_checker, config=config)
# Add NER
self.ner = NER(self.cdb, config)
self.pipe.add_ner(self.ner)
# Add LINKER
self.linker = Linker(self.cdb, self.vocab, config)
self.pipe.add_linker(self.linker)
# Add addl_ner if they exist
for ner in self._addl_ner:
self.pipe.add_addl_ner(ner, ner.config.general['name'])
# Add meta_annotaiton classes if they exist
for meta_cat in self._meta_cats:
self.pipe.add_meta_cat(meta_cat, meta_cat.config.general['category_name'])
# Set max document length
self.pipe.spacy_nlp.max_length = config.preprocessing.get('max_document_length', 1000000)
@deprecated(message="Replaced with cat.pipe.spacy_nlp.")
def get_spacy_nlp(self) -> Language:
""" Returns the spacy pipeline with MedCAT
"""
return self.pipe.spacy_nlp
def get_hash(self):
r""" Will not be a deep hash but will try to cactch all the changing parts during training.
"""
hasher = Hasher()
hasher.update(self.cdb.get_hash())
hasher.update(self.config.get_hash())
for mc in self._meta_cats:
hasher.update(mc.get_hash())
for trf in self._addl_ner:
hasher.update(trf.get_hash())
return hasher.hexdigest()
def get_model_card(self, as_dict=False):
"""
A minimal model card for MedCAT model packs.
Args:
as_dict: return the model card as a dictionary instead of a str.
Returns:
By default a str - indented JSON object.
"""
card = {
'Model ID': self.config.version['id'],
'Last Modified On': self.config.version['last_modified'],
'History (from least to most recent)': self.config.version['history'],
'Description': self.config.version['description'],
'Source Ontology': self.config.version['ontology'],
'Location': self.config.version['location'],
'MetaCAT models': self.config.version['meta_cats'],
'Basic CDB Stats': self.config.version['cdb_info'],
'Performance': self.config.version['performance'],
'Important Parameters (Partial view, all available in cat.config)': get_important_config_parameters(self.config),
'MedCAT Version': self.config.version['medcat_version']
}
if as_dict:
return card
else:
return json.dumps(card, indent=2, sort_keys=False)
def _versioning(self):
# Check version info and do not allow without it
if self.config.version['description'] == 'No description':
self.log.warning("Please consider populating the version information [description, performance, location, ontology] in cat.config.version")
# Fill the stuff automatically that is needed for versioning
m = self.get_hash()
version = self.config.version
if version['id'] is None or m != version['id']:
if version['id'] is not None:
version['history'].append(version['id'])
version['id'] = m
version['last_modified'] = date.today().strftime("%d %B %Y")
version['cdb_info'] = self.cdb._make_stats()
version['meta_cats'] = [meta_cat.get_model_card(as_dict=True) for meta_cat in self._meta_cats]
version['medcat_version'] = __version__
self.log.warning("Please consider updating [description, performance, location, ontology] in cat.config.version")
def create_model_pack(self, save_dir_path: str, model_pack_name: str = DEFAULT_MODEL_PACK_NAME) -> str:
r""" Will crete a .zip file containing all the models in the current running instance
of MedCAT. This is not the most efficient way, for sure, but good enough for now.
model_pack_name - an id will be appended to this name
returns:
Model pack name
"""
# Spacy model always should be just the name, but during loading it can be reset to path
self.config.general['spacy_model'] = os.path.basename(self.config.general['spacy_model'])
# Versioning
self._versioning()
model_pack_name += "_{}".format(self.config.version['id'])
self.log.warning("This will save all models into a zip file, can take some time and require quite a bit of disk space.")
_save_dir_path = save_dir_path
save_dir_path = os.path.join(save_dir_path, model_pack_name)
# expand user path to make this work with '~'
os.makedirs(os.path.expanduser(save_dir_path), exist_ok=True)
# Save the used spacy model
spacy_path = os.path.join(save_dir_path, self.config.general['spacy_model'])
if str(self.pipe.spacy_nlp._path) != spacy_path:
# First remove if something is there
shutil.rmtree(spacy_path, ignore_errors=True)
shutil.copytree(str(self.pipe.spacy_nlp._path), spacy_path)
# Save the CDB
cdb_path = os.path.join(save_dir_path, "cdb.dat")
self.cdb.save(cdb_path)
# Save the Vocab
vocab_path = os.path.join(save_dir_path, "vocab.dat")
if self.vocab is not None:
# We will allow creation of modelpacks without vocabs
self.vocab.save(vocab_path)
# Save addl_ner
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
trf_path = os.path.join(save_dir_path, "trf_" + comp[1].config.general['name'])
comp[1].save(trf_path)
# Save all meta_cats
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], MetaCAT):
name = comp[0]
meta_path = os.path.join(save_dir_path, "meta_" + name)
comp[1].save(meta_path)
# Add a model card also, why not
model_card_path = os.path.join(save_dir_path, "model_card.json")
json.dump(self.get_model_card(as_dict=True), open(model_card_path, 'w'), indent=2)
# Zip everything
shutil.make_archive(os.path.join(_save_dir_path, model_pack_name), 'zip', root_dir=save_dir_path)
# Log model card and return new name
self.log.info(self.get_model_card()) # Print the model card
return model_pack_name
@classmethod
def load_model_pack(cls, zip_path: str, meta_cat_config_dict: Optional[Dict] = None) -> "CAT":
r"""Load everything within the 'model pack', i.e. the CDB, config, vocab and any MetaCAT models
(if present)
Args:
zip_path:
path to model pack zip.
meta_cat_config_dict:
A config dict that will overwrite existing configs in meta_cat.
e.g. meta_cat_config_dict = {'general': {'device': 'cpu'}}
"""
from medcat.cdb import CDB
from medcat.vocab import Vocab
from medcat.meta_cat import MetaCAT
base_dir = os.path.dirname(zip_path)
filename = os.path.basename(zip_path)
foldername = filename.replace(".zip", '')
model_pack_path = os.path.join(base_dir, foldername)
if os.path.exists(model_pack_path):
cls.log.info("Found an existing unziped model pack at: {}, the provided zip will not be touched.".format(model_pack_path))
else:
cls.log.info("Unziping the model pack and loading models.")
shutil.unpack_archive(zip_path, extract_dir=model_pack_path)
# Load the CDB
cdb_path = os.path.join(model_pack_path, "cdb.dat")
cdb = CDB.load(cdb_path)
# TODO load addl_ner
# Modify the config to contain full path to spacy model
cdb.config.general['spacy_model'] = os.path.join(model_pack_path, os.path.basename(cdb.config.general['spacy_model']))
# Load Vocab
vocab_path = os.path.join(model_pack_path, "vocab.dat")
if os.path.exists(vocab_path):
vocab = Vocab.load(vocab_path)
else:
vocab = None
# Find meta models in the model_pack
trf_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('trf_')]
addl_ner = []
for trf_path in trf_paths:
trf = TransformersNER.load(save_dir_path=trf_path)
trf.cdb = cdb # Set the cat.cdb to be the CDB of the TRF model
addl_ner.append(trf)
# Find meta models in the model_pack
meta_paths = [os.path.join(model_pack_path, path) for path in os.listdir(model_pack_path) if path.startswith('meta_')]
meta_cats = []
for meta_path in meta_paths:
meta_cats.append(MetaCAT.load(save_dir_path=meta_path,
config_dict=meta_cat_config_dict))
cat = cls(cdb=cdb, config=cdb.config, vocab=vocab, meta_cats=meta_cats, addl_ner=addl_ner)
cls.log.info(cat.get_model_card()) # Print the model card
return cat
def __call__(self, text: Optional[str], do_train: bool = False) -> Optional[Doc]:
r"""
Push the text through the pipeline.
Args:
text (string):
The text to be annotated, if the text length is longer than
self.config.preprocessing['max_document_length'] it will be trimmed to that length.
do_train (bool, defaults to `False`):
This causes so many screwups when not there, so I'll force training
to False. To run training it is much better to use the self.train() function
but for some special cases I'm leaving it here also.
Returns:
A single spacy document or multiple spacy documents with the extracted entities
"""
# Should we train - do not use this for training, unless you know what you are doing. Use the
#self.train() function
self.config.linking['train'] = do_train
if text is None:
self.log.error("The input text should be either a string or a sequence of strings but got %s", type(text))
return None
else:
text = self._get_trimmed_text(str(text))
return self.pipe(text)
def __repr__(self):
"""
Prints the model_card for this CAT instance.
Returns:
the 'Model Card' for this CAT instance. This includes NER+L config and any MetaCATs
"""
return self.get_model_card(as_dict=False)
def _print_stats(self,
data: Dict,
epoch: int = 0,
use_project_filters: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
use_groups: bool = False,
extra_cui_filter: Optional[Set] = None) -> Tuple:
r""" TODO: Refactor and make nice
Print metrics on a dataset (F1, P, R), it will also print the concepts that have the most FP,FN,TP.
Args:
data (list of dict):
The json object that we get from MedCATtrainer on export.
epoch (int):
Used during training, so we know what epoch is it.
use_project_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
use_overlaps (boolean):
Allow overlapping entites, nearly always False as it is very difficult to annotate overlapping entites.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
Returns:
fps (dict):
False positives for each CUI
fns (dict):
False negatives for each CUI
tps (dict):
True positives for each CUI
cui_prec (dict):
Precision for each CUI
cui_rec (dict):
Recall for each CUI
cui_f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
Examples for each of the fp, fn, tp. Format will be examples['fp']['cui'][<list_of_examples>]
"""
tp = 0
fp = 0
fn = 0
fps: Dict = {}
fns: Dict = {}
tps: Dict = {}
cui_prec: Dict = {}
cui_rec: Dict = {}
cui_f1: Dict = {}
cui_counts: Dict = {}
examples: Dict = {'fp': {}, 'fn': {}, 'tp': {}}
fp_docs: Set = set()
fn_docs: Set = set()
# reset and back up filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
for pind, project in tqdm(enumerate(data['projects']), desc="Stats project", total=len(data['projects']), leave=False):
filters['cuis'] = set()
# Add extrafilter if set
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_project_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
# Intersect project filter with existing if it has something
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for dind, doc in tqdm(
enumerate(project["documents"]),
desc="Stats document",
total=len(project["documents"]),
leave=False,
):
anns = self._get_doc_annotations(doc)
# Apply document level filtering, in this case project_filter is ignored while the extra_cui_filter is respected still
if use_cui_doc_limit:
_cuis = set([ann['cui'] for ann in anns])
if _cuis:
filters['cuis'] = intersect_nonempty_set(_cuis, extra_cui_filter)
else:
filters['cuis'] = {'empty'}
spacy_doc: Doc = self(doc['text'])
if use_overlaps:
p_anns = spacy_doc._.ents
else:
p_anns = spacy_doc.ents
anns_norm = []
anns_norm_neg = []
anns_examples = []
anns_norm_cui = []
for ann in anns:
cui = ann['cui']
if check_filters(cui, filters):
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
if ann.get('validated', True) and (not ann.get('killed', False) and not ann.get('deleted', False)):
anns_norm.append((ann['start'], cui))
anns_examples.append({"text": doc['text'][max(0, ann['start']-60):ann['end']+60],
"cui": cui,
"source value": ann['value'],
"acc": 1,
"project index": pind,
"document inedex": dind})
elif ann.get('validated', True) and (ann.get('killed', False) or ann.get('deleted', False)):
anns_norm_neg.append((ann['start'], cui))
if ann.get("validated", True):
# This is used to test was someone annotating for this CUI in this document
anns_norm_cui.append(cui)
cui_counts[cui] = cui_counts.get(cui, 0) + 1
p_anns_norm = []
p_anns_examples = []
for ann in p_anns:
cui = ann._.cui
if use_groups:
cui = self.cdb.addl_info['cui2group'].get(cui, cui)
p_anns_norm.append((ann.start_char, cui))
p_anns_examples.append({"text": doc['text'][max(0, ann.start_char-60):ann.end_char+60],
"cui": cui,
"source value": ann.text,
"acc": float(ann._.context_similarity),
"project index": pind,
"document inedex": dind})
for iann, ann in enumerate(p_anns_norm):
cui = ann[1]
if ann in anns_norm:
tp += 1
tps[cui] = tps.get(cui, 0) + 1
example = p_anns_examples[iann]
examples['tp'][cui] = examples['tp'].get(cui, []) + [example]
else:
fp += 1
fps[cui] = fps.get(cui, 0) + 1
fp_docs.add(doc.get('name', 'unk'))
# Add example for this FP prediction
example = p_anns_examples[iann]
if ann in anns_norm_neg:
# Means that it really was annotated as negative
example['real_fp'] = True
examples['fp'][cui] = examples['fp'].get(cui, []) + [example]
for iann, ann in enumerate(anns_norm):
if ann not in p_anns_norm:
cui = ann[1]
fn += 1
fn_docs.add(doc.get('name', 'unk'))
fns[cui] = fns.get(cui, 0) + 1
examples['fn'][cui] = examples['fn'].get(cui, []) + [anns_examples[iann]]
try:
prec = tp / (tp + fp)
rec = tp / (tp + fn)
f1 = 2*(prec*rec) / (prec + rec)
print("Epoch: {}, Prec: {}, Rec: {}, F1: {}\n".format(epoch, prec, rec, f1))
print("Docs with false positives: {}\n".format("; ".join([str(x) for x in list(fp_docs)[0:10]])))
print("Docs with false negatives: {}\n".format("; ".join([str(x) for x in list(fn_docs)[0:10]])))
# Sort fns & prec
fps = {k: v for k, v in sorted(fps.items(), key=lambda item: item[1], reverse=True)}
fns = {k: v for k, v in sorted(fns.items(), key=lambda item: item[1], reverse=True)}
tps = {k: v for k, v in sorted(tps.items(), key=lambda item: item[1], reverse=True)}
# F1 per concept
for cui in tps.keys():
prec = tps[cui] / (tps.get(cui, 0) + fps.get(cui, 0))
rec = tps[cui] / (tps.get(cui, 0) + fns.get(cui, 0))
f1 = 2*(prec*rec) / (prec + rec)
cui_prec[cui] = prec
cui_rec[cui] = rec
cui_f1[cui] = f1
# Get top 10
pr_fps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fps[cui]) for cui in list(fps.keys())[0:10]]
pr_fns = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, fns[cui]) for cui in list(fns.keys())[0:10]]
pr_tps = [(self.cdb.cui2preferred_name.get(cui,
list(self.cdb.cui2names.get(cui, [cui]))[0]), cui, tps[cui]) for cui in list(tps.keys())[0:10]]
print("\n\nFalse Positives\n")
for one in pr_fps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nFalse Negatives\n")
for one in pr_fns:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("\n\nTrue Positives\n")
for one in pr_tps:
print("{:70} - {:20} - {:10}".format(str(one[0])[0:69], str(one[1])[0:19], one[2]))
print("*"*110 + "\n")
except Exception:
traceback.print_exc()
# restore filters to original state
self.config.linking['filters'] = _filters
return fps, fns, tps, cui_prec, cui_rec, cui_f1, cui_counts, examples
def _init_ckpts(self, is_resumed, checkpoint):
if self.config.general['checkpoint']['steps'] is not None or checkpoint is not None:
checkpoint_config = CheckpointConfig(**self.config.general.get('checkpoint', {}))
checkpoint_manager = CheckpointManager('cat_train', checkpoint_config)
if is_resumed:
# TODO: probably remove is_resumed mark and always resume if a checkpoint is provided,
#but I'll leave it for now
checkpoint = checkpoint or checkpoint_manager.get_latest_checkpoint()
self.log.info(f"Resume training on the most recent checkpoint at {checkpoint.dir_path}...")
self.cdb = checkpoint.restore_latest_cdb()
self.cdb.config.merge_config(self.config.__dict__)
self.config = self.cdb.config
self._create_pipeline(self.config)
else:
checkpoint = checkpoint or checkpoint_manager.create_checkpoint()
self.log.info(f"Start new training and checkpoints will be saved at {checkpoint.dir_path}...")
return checkpoint
def train(self,
data_iterator: Iterable,
nepochs: int = 1,
fine_tune: bool = True,
progress_print: int = 1000,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> None:
""" Runs training on the data, note that the maximum length of a line
or document is 1M characters. Anything longer will be trimmed.
Args:
data_iterator (Iterable):
Simple iterator over sentences/documents, e.g. a open file
or an array or anything that we can use in a for loop.
nepochs (int):
Number of epochs for which to run the training.
fine_tune (bool):
If False old training will be removed.
progress_print (int):
Print progress after N lines.
checkpoint (Optional[medcat.utils.checkpoint.CheckpointUT]):
The MedCAT checkpoint object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
"""
if not fine_tune:
self.log.info("Removing old training data!")
self.cdb.reset_training()
checkpoint = self._init_ckpts(is_resumed, checkpoint)
latest_trained_step = checkpoint.count if checkpoint is not None else 0
epochal_data_iterator = chain.from_iterable(repeat(data_iterator, nepochs))
for line in islice(epochal_data_iterator, latest_trained_step, None):
if line is not None and line:
# Convert to string
line = str(line).strip()
try:
_ = self(line, do_train=True)
except Exception as e:
self.log.warning("LINE: '%s...' \t WAS SKIPPED", line[0:100])
self.log.warning("BECAUSE OF: %s", str(e))
else:
self.log.warning("EMPTY LINE WAS DETECTED AND SKIPPED")
latest_trained_step += 1
if latest_trained_step % progress_print == 0:
self.log.info("DONE: %s", str(latest_trained_step))
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(cdb=self.cdb, count=latest_trained_step)
self.config.linking['train'] = False
def add_cui_to_group(self, cui: str, group_name: str) -> None:
r"""
Ads a CUI to a group, will appear in cdb.addl_info['cui2group']
Args:
cui (str):
The concept to be added
group_name (str):
The group to whcih the concept will be added
Examples:
>>> cat.add_cui_to_group("S-17", 'pain')
"""
# Add group_name
self.cdb.addl_info['cui2group'][cui] = group_name
def unlink_concept_name(self, cui: str, name: str, preprocessed_name: bool = False) -> None:
r"""
Unlink a concept name from the CUI (or all CUIs if full_unlink), removes the link from
the Concept Database (CDB). As a consequence medcat will never again link the `name`
to this CUI - meaning the name will not be detected as a concept in the future.
Args:
cui (str):
The CUI from which the `name` will be removed
name (str):
The span of text to be removed from the linking dictionary
Examples:
>>> # To never again link C0020538 to HTN
>>> cat.unlink_concept_name('C0020538', 'htn', False)
"""
cuis = [cui]
if preprocessed_name:
names = {name: 'nothing'}
else:
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# If full unlink find all CUIs
if self.config.general.get('full_unlink', False):
for n in names:
cuis.extend(self.cdb.name2cuis.get(n, []))
# Remove name from all CUIs
for c in cuis:
self.cdb.remove_names(cui=c, names=names)
def add_and_train_concept(self,
cui: str,
name: str,
spacy_doc: Optional[Doc] = None,
spacy_entity: Optional[Union[List[Token], Span]] = None,
ontologies: Set = set(),
name_status: str = 'A',
type_ids: Set = set(),
description: str = '',
full_build: bool = True,
negative: bool = False,
devalue_others: bool = False,
do_add_concept: bool = True) -> None:
r""" Add a name to an existing concept, or add a new concept, or do not do anything if the name or concept already exists. Perform
training if spacy_entity and spacy_doc are set.
Args:
cui (str):
CUI of the concept
name (str):
Name to be linked to the concept (in the case of MedCATtrainer this is simply the
selected value in text, no preprocessing or anything needed).
spacy_doc (spacy.tokens.Doc):
Spacy represenation of the document that was manually annotated.
spacy_entity (Optional[Union[List[Token], Span]]):
Given the spacy document, this is the annotated span of text - list of annotated tokens that are marked with this CUI.
negative (bool):
Is this a negative or positive example.
devalue_others:
If set, cuis to which this name is assigned and are not `cui` will receive negative training given
that negative=False.
\*\*other:
Refer to medcat.cat.cdb.CDB.add_concept
"""
names = prepare_name(name, self.pipe.spacy_nlp, {}, self.config)
# Only if not negative, otherwise do not add the new name if in fact it should not be detected
if do_add_concept and not negative:
self.cdb.add_concept(cui=cui, names=names, ontologies=ontologies, name_status=name_status, type_ids=type_ids, description=description,
full_build=full_build)
if spacy_entity is not None and spacy_doc is not None:
# Train Linking
self.linker.context_model.train(cui=cui, entity=spacy_entity, doc=spacy_doc, negative=negative, names=names)
if not negative and devalue_others:
# Find all cuis
cuis = set()
for n in names:
cuis.update(self.cdb.name2cuis.get(n, []))
# Remove the cui for which we just added positive training
if cui in cuis:
cuis.remove(cui)
# Add negative training for all other CUIs that link to these names
for _cui in cuis:
self.linker.context_model.train(cui=_cui, entity=spacy_entity, doc=spacy_doc, negative=True)
def train_supervised(self,
data_path: str,
reset_cui_count: bool = False,
nepochs: int = 1,
print_stats: int = 0,
use_filters: bool = False,
terminate_last: bool = False,
use_overlaps: bool = False,
use_cui_doc_limit: bool = False,
test_size: int = 0,
devalue_others: bool = False,
use_groups: bool = False,
never_terminate: bool = False,
train_from_false_positives: bool = False,
extra_cui_filter: Optional[Set] = None,
checkpoint: Optional[Checkpoint] = None,
is_resumed: bool = False) -> Tuple:
r""" TODO: Refactor, left from old
Run supervised training on a dataset from MedCATtrainer. Please take care that this is more a simulated
online training then supervised.
Args:
data_path (str):
The path to the json file that we get from MedCATtrainer on export.
reset_cui_count (boolean):
Used for training with weight_decay (annealing). Each concept has a count that is there
from the beginning of the CDB, that count is used for annealing. Resetting the count will
significantly increase the training impact. This will reset the count only for concepts
that exist in the the training data.
nepochs (int):
Number of epochs for which to run the training.
print_stats (int):
If > 0 it will print stats every print_stats epochs.
use_filters (boolean):
Each project in medcattrainer can have filters, do we want to respect those filters
when calculating metrics.
terminate_last (boolean):
If true, concept termination will be done after all training.
use_overlaps (boolean):
Allow overlapping entities, nearly always False as it is very difficult to annotate overlapping entities.
use_cui_doc_limit (boolean):
If True the metrics for a CUI will be only calculated if that CUI appears in a document, in other words
if the document was annotated for that CUI. Useful in very specific situations when during the annotation
process the set of CUIs changed.
test_size (float):
If > 0 the data set will be split into train test based on this ration. Should be between 0 and 1.
Usually 0.1 is fine.
devalue_others(bool):
Check add_name for more details.
use_groups (boolean):
If True concepts that have groups will be combined and stats will be reported on groups.
never_terminate (boolean):
If True no termination will be applied
train_from_false_positives (boolean):
If True it will use false positive examples detected by medcat and train from them as negative examples.
extra_cui_filter(Optional[Set]):
This filter will be intersected with all other filters, or if all others are not set then only this one will be used.
checkpoint (Optional[Optional[medcat.utils.checkpoint.CheckpointST]):
The MedCAT CheckpointST object
is_resumed (bool):
If True resume the previous training; If False, start a fresh new training.
Returns:
fp (dict):
False positives for each CUI
fn (dict):
False negatives for each CUI
tp (dict):
True positives for each CUI
p (dict):
Precision for each CUI
r (dict):
Recall for each CUI
f1 (dict):
F1 for each CUI
cui_counts (dict):
Number of occurrence for each CUI
examples (dict):
FP/FN examples of sentences for each CUI
"""
checkpoint = self._init_ckpts(is_resumed, checkpoint)
# Backup filters
_filters = deepcopy(self.config.linking['filters'])
filters = self.config.linking['filters']
fp = fn = tp = p = r = f1 = examples = {}
with open(data_path) as f:
data = json.load(f)
cui_counts = {}
if test_size == 0:
self.log.info("Running without a test set, or train==test")
test_set = data
train_set = data
else:
train_set, test_set, _, _ = make_mc_train_test(data, self.cdb, test_size=test_size)
if print_stats > 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
if reset_cui_count:
# Get all CUIs
cuis = []
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
cuis.append(ann['cui'])
for cui in set(cuis):
if cui in self.cdb.cui2count_train:
self.cdb.cui2count_train[cui] = 100
# Remove entities that were terminated
if not never_terminate:
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
latest_trained_step = checkpoint.count if checkpoint is not None else 0
current_epoch, current_project, current_document = self._get_training_start(train_set, latest_trained_step)
for epoch in trange(current_epoch, nepochs, initial=current_epoch, total=nepochs, desc='Epoch', leave=False):
# Print acc before training
for idx_project in trange(current_project, len(train_set['projects']), initial=current_project, total=len(train_set['projects']), desc='Project', leave=False):
project = train_set['projects'][idx_project]
# Set filters in case we are using the train_from_fp
filters['cuis'] = set()
if isinstance(extra_cui_filter, set):
filters['cuis'] = extra_cui_filter
if use_filters:
project_filter = get_project_filters(cuis=project.get('cuis', None),
type_ids=project.get('tuis', None),
cdb=self.cdb,
project=project)
if project_filter:
filters['cuis'] = intersect_nonempty_set(project_filter, filters['cuis'])
for idx_doc in trange(current_document, len(project['documents']), initial=current_document, total=len(project['documents']), desc='Document', leave=False):
doc = project['documents'][idx_doc]
spacy_doc: Doc = self(doc['text'])
# Compatibility with old output where annotations are a list
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if not ann.get('killed', False):
cui = ann['cui']
start = ann['start']
end = ann['end']
spacy_entity = tkns_from_doc(spacy_doc=spacy_doc, start=start, end=end)
deleted = ann.get('deleted', False)
self.add_and_train_concept(cui=cui,
name=ann['value'],
spacy_doc=spacy_doc,
spacy_entity=spacy_entity,
negative=deleted,
devalue_others=devalue_others)
if train_from_false_positives:
fps: List[Span] = get_false_positives(doc, spacy_doc)
for fp in fps:
fp_: Span = fp
self.add_and_train_concept(cui=fp_._.cui,
name=fp_.text,
spacy_doc=spacy_doc,
spacy_entity=fp_,
negative=True,
do_add_concept=False)
latest_trained_step += 1
if checkpoint is not None and checkpoint.steps is not None and latest_trained_step % checkpoint.steps == 0:
checkpoint.save(self.cdb, latest_trained_step)
if terminate_last and not never_terminate:
# Remove entities that were terminated, but after all training is done
for project in train_set['projects']:
for doc in project['documents']:
doc_annotations = self._get_doc_annotations(doc)
for ann in doc_annotations:
if ann.get('killed', False):
self.unlink_concept_name(ann['cui'], ann['value'])
if print_stats > 0 and (epoch + 1) % print_stats == 0:
fp, fn, tp, p, r, f1, cui_counts, examples = self._print_stats(test_set,
epoch=epoch + 1,
use_project_filters=use_filters,
use_cui_doc_limit=use_cui_doc_limit,
use_overlaps=use_overlaps,
use_groups=use_groups,
extra_cui_filter=extra_cui_filter)
# Set the filters again
self.config.linking['filters'] = _filters
return fp, fn, tp, p, r, f1, cui_counts, examples
def get_entities(self,
text: str,
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed']) -> Dict:
doc = self(text)
out = self._doc_to_out(doc, only_cui, addl_info)
return out
def get_entities_multi_texts(self,
texts: Union[Iterable[str], Iterable[Tuple]],
only_cui: bool = False,
addl_info: List[str] = ['cui2icd10', 'cui2ontologies', 'cui2snomed'],
n_process: Optional[int] = None,
batch_size: Optional[int] = None) -> List[Dict]:
r""" Get entities
text: text to be annotated
return: entities
"""
out: List[Dict] = []
if n_process is None:
texts_ = self._generate_trimmed_texts(texts)
for text in texts_:
out.append(self._doc_to_out(self(text), only_cui, addl_info))
else:
self.pipe.set_error_handler(self._pipe_error_handler)
try:
texts_ = self._get_trimmed_texts(texts)
docs = self.pipe.batch_multi_process(texts_, n_process, batch_size)
for doc in tqdm(docs, total=len(texts_)):
doc = None if doc.text.strip() == '' else doc
out.append(self._doc_to_out(doc, only_cui, addl_info, out_with_text=True))
# Currently spaCy cannot mark which pieces of texts failed within the pipe so be this workaround,
# which also assumes texts are different from each others.
if len(out) < len(texts_):
self.log.warning("Found at least one failed batch and set output for enclosed texts to empty")
for i, text in enumerate(texts_):
if i == len(out):
out.append(self._doc_to_out(None, only_cui, addl_info))
elif out[i].get('text', '') != text:
out.insert(i, self._doc_to_out(None, only_cui, addl_info))
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if not(cnf_annotation_output.get('include_text_in_output', False)):
for o in out:
if o is not None:
o.pop('text', None)
finally:
self.pipe.reset_error_handler()
return out
def get_json(self, text: str, only_cui: bool = False, addl_info=['cui2icd10', 'cui2ontologies']) -> str:
""" Get output in json format
text: text to be annotated
return: json with fields {'entities': <>, 'text': text}
"""
ents = self.get_entities(text, only_cui, addl_info=addl_info)['entities']
out = {'annotations': ents, 'text': text}
return json.dumps(out)
@staticmethod
def _get_training_start(train_set, latest_trained_step):
total_steps_per_epoch = sum([1 for project in train_set['projects'] for _ in project['documents']])
if total_steps_per_epoch == 0:
raise ValueError("MedCATtrainer export contains no documents")
current_epoch, last_step_in_epoch = divmod(latest_trained_step, total_steps_per_epoch)
document_count = 0
current_project = 0
current_document = 0
for idx_project, project in enumerate(train_set['projects']):
for idx_doc, _ in enumerate(project['documents']):
document_count += 1
if document_count == last_step_in_epoch:
current_project = idx_project
current_document = idx_doc
break
if current_project > 0:
break
current_document = 0
return current_epoch, current_project, current_document
def _separate_nn_components(self):
# Loop though the models and check are there GPU devices
nn_components = []
for component in self.pipe.spacy_nlp.components:
if isinstance(component[1], MetaCAT) or isinstance(component[1], TransformersNER):
self.pipe.spacy_nlp.disable_pipe(component[0])
nn_components.append(component)
return nn_components
def _run_nn_components(self, docs: Dict, nn_components: List, id2text: Dict) -> None:
r""" This will add meta_anns in-place to the docs dict.
"""
self.log.debug("Running GPU components separately")
# First convert the docs into the fake spacy doc format
spacy_docs = json_to_fake_spacy(docs, id2text=id2text)
# Disable component locks also
for name, component in nn_components:
component.config.general['disable_component_lock'] = True
# For meta_cat compoments
for name, component in [c for c in nn_components if isinstance(c[1], MetaCAT)]:
spacy_docs = component.pipe(spacy_docs)
for spacy_doc in spacy_docs:
for ent in spacy_doc.ents:
docs[spacy_doc.id]['entities'][ent._.id]['meta_anns'].update(ent._.meta_anns)
def _batch_generator(self, data: Iterable, batch_size_chars: int, skip_ids: Set = set()):
docs = []
char_count = 0
for doc in data:
if doc[0] not in skip_ids:
char_count += len(str(doc[1]))
docs.append(doc)
if char_count < batch_size_chars:
continue
yield docs
docs = []
char_count = 0
if len(docs) > 0:
yield docs
def _save_docs_to_file(self, docs: Iterable, annotated_ids: List[str], save_dir_path: str, annotated_ids_path: Optional[str], part_counter: int = 0) -> int:
path = os.path.join(save_dir_path, 'part_{}.pickle'.format(part_counter))
pickle.dump(docs, open(path, "wb"))
self.log.info("Saved part: %s, to: %s", part_counter, path)
part_counter = part_counter + 1 # Increase for save, as it should be what is the next part
if annotated_ids_path is not None:
pickle.dump((annotated_ids, part_counter), open(annotated_ids_path, 'wb'))
return part_counter
def multiprocessing(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 2,
batch_size_chars: int = 5000 * 1000,
only_cui: bool = False,
addl_info: List[str] = [],
separate_nn_components: bool = True,
out_split_size_chars: Optional[int] = None,
save_dir_path: str = os.path.abspath(os.getcwd()),
min_free_memory=0.1) -> Dict:
r""" Run multiprocessing for inference, if out_save_path and out_split_size_chars is used this will also continue annotating
documents if something is saved in that directory.
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters, this should be around: NPROC * average_document_length * 200
separate_nn_components (`bool`, defaults to True):
If set the medcat pipe will be broken up into NN and not-NN components and
they will be run sequentially. This is useful as the NN components
have batching and like to process many docs at once, while the rest of the pipeline
runs the documents one by one.
out_split_size_chars (`int`, None):
If set once more than out_split_size_chars are annotated
they will be saved to a file (save_dir_path) and the memory cleared. Recommended
value is 20*batch_size_chars.
save_dir_path(`str`, defaults to the current working directory):
Where to save the annotated documents if splitting.
min_free_memory(`float`, defaults to 0.1):
If set a process will not start unless there is at least this much RAM memory left,
should be a range between [0, 1] meaning how much of the memory has to be free. Helps when annotating
very large datasets because spacy is not the best with memory management and multiprocessing.
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}, in case out_split_size_chars is used
the last batch will be returned while that and all previous batches will be
written to disk (out_save_dir).
"""
for comp in self.pipe.spacy_nlp.components:
if isinstance(comp[1], TransformersNER):
raise Exception("Please do not use multiprocessing when running a transformer model for NER, run sequentially.")
# Set max document length
self.pipe.spacy_nlp.max_length = self.config.preprocessing.get('max_document_length', 1000000)
if self._meta_cats and not separate_nn_components:
# Hack for torch using multithreading, which is not good if not
#separate_nn_components, need for CPU runs only
import torch
torch.set_num_threads(1)
nn_components = []
if separate_nn_components:
nn_components = self._separate_nn_components()
if save_dir_path is not None:
os.makedirs(save_dir_path, exist_ok=True)
# "5" looks like a magic number here so better with comment about why the choice was made.
internal_batch_size_chars = batch_size_chars // (5 * nproc)
annotated_ids_path = os.path.join(save_dir_path, 'annotated_ids.pickle') if save_dir_path is not None else None
if annotated_ids_path is not None and os.path.exists(annotated_ids_path):
annotated_ids, part_counter = pickle.load(open(annotated_ids_path, 'rb'))
else:
annotated_ids = []
part_counter = 0
docs = {}
_start_time = time.time()
_batch_counter = 0 # Used for splitting the output, counts batches inbetween saves
for batch in self._batch_generator(data, batch_size_chars, skip_ids=set(annotated_ids)):
self.log.info("Annotated until now: %s docs; Current BS: %s docs; Elapsed time: %.2f minutes",
len(annotated_ids),
len(batch),
(time.time() - _start_time)/60)
try:
_docs = self._multiprocessing_batch(data=batch,
nproc=nproc,
only_cui=only_cui,
batch_size_chars=internal_batch_size_chars,
addl_info=addl_info,
nn_components=nn_components,
min_free_memory=min_free_memory)
docs.update(_docs)
annotated_ids.extend(_docs.keys())
_batch_counter += 1
del _docs
if out_split_size_chars is not None and (_batch_counter * batch_size_chars) > out_split_size_chars:
# Save to file and reset the docs
part_counter = self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
del docs
docs = {}
_batch_counter = 0
except Exception as e:
self.log.warning("Failed an outer batch in the multiprocessing script")
self.log.warning(e, exc_info=True, stack_info=True)
# Save the last batch
if out_split_size_chars is not None and len(docs) > 0:
# Save to file and reset the docs
self._save_docs_to_file(docs=docs,
annotated_ids=annotated_ids,
save_dir_path=save_dir_path,
annotated_ids_path=annotated_ids_path,
part_counter=part_counter)
# Enable the GPU Components again
if separate_nn_components:
for name, _ in nn_components:
# No need to do anything else as it was already in the pipe
self.pipe.spacy_nlp.enable_pipe(name)
return docs
def _multiprocessing_batch(self,
data: Union[List[Tuple], Iterable[Tuple]],
nproc: int = 8,
batch_size_chars: int = 1000000,
only_cui: bool = False,
addl_info: List[str] = [],
nn_components: List = [],
min_free_memory: int = 0) -> Dict:
r""" Run multiprocessing on one batch
Args:
data:
Iterator or array with format: [(id, text), (id, text), ...]
nproc (`int`, defaults to 8):
Number of processors
batch_size_chars (`int`, defaults to 1000000):
Size of a batch in number of characters
Returns:
A dictionary: {id: doc_json, id2: doc_json2, ...}
"""
# Create the input output for MP
with Manager() as manager:
out_list = manager.list()
lock = manager.Lock()
in_q = manager.Queue(maxsize=10*nproc)
id2text = {}
for batch in self._batch_generator(data, batch_size_chars):
if nn_components:
# We need this for the json_to_fake_spacy
id2text.update({k:v for k,v in batch})
in_q.put(batch)
# Final data point for workers
for _ in range(nproc):
in_q.put(None)
sleep(2)
# Create processes
procs = []
for i in range(nproc):
p = Process(target=self._mp_cons,
kwargs={'in_q': in_q,
'out_list': out_list,
'pid': i,
'only_cui': only_cui,
'addl_info': addl_info,
'min_free_memory': min_free_memory,
'lock': lock})
p.start()
procs.append(p)
# Join processes
for p in procs:
p.join()
docs = {}
# Covnerts a touple into a dict
docs.update({k:v for k,v in out_list})
# If we have separate GPU components now we pipe that
if nn_components:
try:
self._run_nn_components(docs, nn_components, id2text=id2text)
except Exception as e:
self.log.warning(e, exc_info=True, stack_info=True)
return docs
def multiprocessing_pipe(self,
in_data: Union[List[Tuple], Iterable[Tuple]],
nproc: Optional[int] = None,
batch_size: Optional[int] = None,
only_cui: bool = False,
addl_info: List[str] = [],
return_dict: bool = True,
batch_factor: int = 2) -> Union[List[Tuple], Dict]:
r""" Run multiprocessing NOT FOR TRAINING
in_data: a list with format: [(id, text), (id, text), ...]
nproc: the number of processors
batch_size: the number of texts to buffer
return_dict: a flag for returning either a dict or a list of tuples
return: a dict: {id: doc_json, id: doc_json, ...} or if return_dict is False, a list of tuples: [(id, doc_json), (id, doc_json), ...]
"""
out: Union[Dict, List[Tuple]]
if nproc == 0:
raise ValueError("nproc cannot be set to zero")
in_data = list(in_data) if isinstance(in_data, Iterable) else in_data
n_process = nproc if nproc is not None else min(max(cpu_count() - 1, 1), math.ceil(len(in_data) / batch_factor))
batch_size = batch_size if batch_size is not None else math.ceil(len(in_data) / (batch_factor * abs(n_process)))
start_method = None
try:
if self._meta_cats:
import torch
if torch.multiprocessing.get_start_method() != "spawn":
start_method = torch.multiprocessing.get_start_method()
torch.multiprocessing.set_start_method("spawn", force=True)
entities = self.get_entities_multi_texts(texts=in_data, only_cui=only_cui, addl_info=addl_info,
n_process=n_process, batch_size=batch_size)
finally:
if start_method is not None:
import torch
torch.multiprocessing.set_start_method(start_method, force=True)
if return_dict:
out = {}
for idx, data in enumerate(in_data):
out[data[0]] = entities[idx]
else:
out = []
for idx, data in enumerate(in_data):
out.append((data[0], entities[idx]))
return out
def _mp_cons(self, in_q: Queue, out_list: List, min_free_memory: int, lock: Lock, pid: int = 0, only_cui: bool = False, addl_info: List = []) -> None:
out: List = []
while True:
if not in_q.empty():
if psutil.virtual_memory().available / psutil.virtual_memory().total < min_free_memory:
with lock:
out_list.extend(out)
# Stop a process if there is not enough memory left
break
data = in_q.get()
if data is None:
with lock:
out_list.extend(out)
break
for i_text, text in data:
try:
# Annotate document
doc = self.get_entities(text=text, only_cui=only_cui, addl_info=addl_info)
out.append((i_text, doc))
except Exception as e:
self.log.warning("PID: %s failed one document in _mp_cons, running will continue normally. \n" +
"Document length in chars: %s, and ID: %s", pid, len(str(text)), i_text)
self.log.warning(str(e))
sleep(2)
def _doc_to_out(self,
doc: Doc,
only_cui: bool,
addl_info: List[str],
out_with_text: bool = False) -> Dict:
out: Dict = {'entities': {}, 'tokens': []}
cnf_annotation_output = getattr(self.config, 'annotation_output', {})
if doc is not None:
out_ent: Dict = {}
if self.config.general.get('show_nested_entities', False):
_ents = []
for _ent in doc._.ents:
entity = Span(doc, _ent['start'], _ent['end'], label=_ent['label'])
entity._.cui = _ent['cui']
entity._.detected_name = _ent['detected_name']
entity._.context_similarity = _ent['context_similarity']
entity._.id = _ent['id']
if 'meta_anns' in _ent:
entity._.meta_anns = _ent['meta_anns']
_ents.append(entity)
else:
_ents = doc.ents
if cnf_annotation_output.get("lowercase_context", True):
doc_tokens = [tkn.text_with_ws.lower() for tkn in list(doc)]
else:
doc_tokens = [tkn.text_with_ws for tkn in list(doc)]
if cnf_annotation_output.get('doc_extended_info', False):
# Add tokens if extended info
out['tokens'] = doc_tokens
context_left = cnf_annotation_output.get('context_left', -1)
context_right = cnf_annotation_output.get('context_right', -1)
doc_extended_info = cnf_annotation_output.get('doc_extended_info', False)
for _, ent in enumerate(_ents):
cui = str(ent._.cui)
if not only_cui:
out_ent['pretty_name'] = self.cdb.get_name(cui)
out_ent['cui'] = cui
out_ent['type_ids'] = list(self.cdb.cui2type_ids.get(cui, ''))
out_ent['types'] = [self.cdb.addl_info['type_id2name'].get(tui, '') for tui in out_ent['type_ids']]
out_ent['source_value'] = ent.text
out_ent['detected_name'] = str(ent._.detected_name)
out_ent['acc'] = float(ent._.context_similarity)
out_ent['context_similarity'] = float(ent._.context_similarity)
out_ent['start'] = ent.start_char
out_ent['end'] = ent.end_char
for addl in addl_info:
tmp = self.cdb.addl_info.get(addl, {}).get(cui, [])
out_ent[addl.split("2")[-1]] = list(tmp) if type(tmp) == set else tmp
out_ent['id'] = ent._.id
out_ent['meta_anns'] = {}
if doc_extended_info:
out_ent['start_tkn'] = ent.start
out_ent['end_tkn'] = ent.end
if context_left > 0 and context_right > 0:
out_ent['context_left'] = doc_tokens[max(ent.start - context_left, 0):ent.start]
out_ent['context_right'] = doc_tokens[ent.end:min(ent.end + context_right, len(doc_tokens))]
out_ent['context_center'] = doc_tokens[ent.start:ent.end]
if hasattr(ent._, 'meta_anns') and ent._.meta_anns:
out_ent['meta_anns'] = ent._.meta_anns
out['entities'][out_ent['id']] = dict(out_ent)
else:
out['entities'][ent._.id] = cui
if cnf_annotation_output.get('include_text_in_output', False) or out_with_text:
out['text'] = doc.text
return out
def _get_trimmed_text(self, text: Optional[str]) -> str:
return text[0:self.config.preprocessing.get('max_document_length')] if text is not None and len(text) > 0 else ""
def _generate_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> Iterable[str]:
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
yield self._get_trimmed_text(text_)
def _get_trimmed_texts(self, texts: Union[Iterable[str], Iterable[Tuple]]) -> List[str]:
trimmed: List = []
text_: str
for text in texts:
text_ = text[1] if isinstance(text, tuple) else text
trimmed.append(self._get_trimmed_text(text_))
return trimmed
@staticmethod
def _pipe_error_handler(proc_name: str, proc: "Pipe", docs: List[Doc], e: Exception) -> None:
CAT.log.warning("Exception raised when applying component %s to a batch of docs.", proc_name)
CAT.log.warning(e, exc_info=True, stack_info=True)
if docs is not None:
CAT.log.warning("Docs contained in the batch:")
for doc in docs:
if hasattr(doc, "text"):
CAT.log.warning("%s...", doc.text[:50])
@staticmethod
def _get_doc_annotations(doc: Doc):
if type(doc['annotations']) == list:
return doc['annotations']
if type(doc['annotations']) == dict:
return doc['annotations'].values()
return None
def destroy_pipe(self):
self.pipe.destroy()
| [
"torch.multiprocessing.set_start_method",
"torch.multiprocessing.get_start_method",
"torch.set_num_threads"
] | 1.0 | CogStack/CAT | 5ac04d2676aede13f8e8d0ab408472c3c6d46a86 |
1.6 | import os
import re
import shutil
from subprocess import check_output, run, PIPE
import numpy as np
import torch
import logging
logger = logging.getLogger(__name__)
def get_gpu_memory_map():
result = check_output(
["nvidia-smi", "--query-gpu=memory.used", "--format=csv,nounits,noheader"]
)
return [int(x) for x in result.split()]
def least_used_device():
"""Get the GPU device with most available memory."""
if not torch.cuda.is_available():
raise RuntimeError("cuda unavailable")
if shutil.which("nvidia-smi") is None:
raise RuntimeError(
"nvidia-smi unavailable: \
cannot select device with most least memory used."
)
memory_map = get_gpu_memory_map()
device_id = np.argmin(memory_map)
logger.info(
f"Choosing GPU device: {device_id}, " f"memory used: {memory_map[device_id]}"
)
return torch.device("cuda:{}".format(device_id))
def choose_device(preferred_device, default_device="cpu"):
if preferred_device == "cuda:best":
try:
preferred_device = least_used_device()
except RuntimeError:
logger.info(
f"Could not find least used device (nvidia-smi might be missing), use cuda:0 instead"
)
if torch.cuda.is_available():
return choose_device("cuda:0")
else:
return choose_device("cpu")
try:
torch.zeros((1,), device=preferred_device) # Test availability
except (RuntimeError, AssertionError) as e:
logger.info(
f"Preferred device {preferred_device} unavailable ({e})."
f"Switching to default {default_device}"
)
return default_device
return preferred_device
def get_memory(pid=None):
if not pid:
pid = os.getpid()
command = "nvidia-smi"
result = run(
command, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True
).stdout
m = re.findall(
"\| *[0-9] *" + str(pid) + " *C *.*python.*? +([0-9]+).*\|",
result,
re.MULTILINE,
)
return [int(mem) for mem in m]
| [
"torch.zeros",
"torch.cuda.is_available"
] | 1.6.0 | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf |
1.1 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Modifications copyright (C) 2020 Zi-Yi Dou
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
import itertools
import os
import numpy as np
import torch
from tqdm import trange
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, SequentialSampler
import modeling
from configuration_bert import BertConfig
from modeling import BertForMaskedLM
from tokenization_bert import BertTokenizer
from tokenization_utils import PreTrainedTokenizer
from modeling_utils import PreTrainedModel
def set_seed(args):
if args.seed >= 0:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
class LineByLineTextDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, file_path):
assert os.path.isfile(file_path)
print('Loading the dataset...')
self.examples = []
with open(file_path, encoding="utf-8") as f:
for idx, line in enumerate(f.readlines()):
if len(line) == 0 or line.isspace() or not len(line.split(' ||| ')) == 2:
raise ValueError(f'Line {idx+1} is not in the correct format!')
src, tgt = line.split(' ||| ')
if src.rstrip() == '' or tgt.rstrip() == '':
raise ValueError(f'Line {idx+1} is not in the correct format!')
sent_src, sent_tgt = src.strip().split(), tgt.strip().split()
token_src, token_tgt = [tokenizer.tokenize(word) for word in sent_src], [tokenizer.tokenize(word) for word in sent_tgt]
wid_src, wid_tgt = [tokenizer.convert_tokens_to_ids(x) for x in token_src], [tokenizer.convert_tokens_to_ids(x) for x in token_tgt]
ids_src, ids_tgt = tokenizer.prepare_for_model(list(itertools.chain(*wid_src)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids'], tokenizer.prepare_for_model(list(itertools.chain(*wid_tgt)), return_tensors='pt', max_length=tokenizer.max_len)['input_ids']
bpe2word_map_src = []
for i, word_list in enumerate(token_src):
bpe2word_map_src += [i for x in word_list]
bpe2word_map_tgt = []
for i, word_list in enumerate(token_tgt):
bpe2word_map_tgt += [i for x in word_list]
self.examples.append( (ids_src[0], ids_tgt[0], bpe2word_map_src, bpe2word_map_tgt) )
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i]
def word_align(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, output_word_alignments = False):
def collate(examples):
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = zip(*examples)
ids_src = pad_sequence(ids_src, batch_first=True, padding_value=tokenizer.pad_token_id)
ids_tgt = pad_sequence(ids_tgt, batch_first=True, padding_value=tokenizer.pad_token_id)
return ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt
dataset = LineByLineTextDataset(tokenizer, args, file_path=args.data_file)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate
)
model.to(args.device)
model.eval()
tqdm_iterator = trange(dataset.__len__(), desc="Extracting")
with open(args.output_file, 'w') as writer:
for batch in dataloader:
with torch.no_grad():
ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt = batch
word_aligns_list = model.get_aligned_word(ids_src, ids_tgt, bpe2word_map_src, bpe2word_map_tgt, args.device, 0, 0, align_layer=args.align_layer, extraction=args.extraction, softmax_threshold=args.softmax_threshold, test=True)
for word_aligns in word_aligns_list:
output_str = []
for word_align in word_aligns:
output_str.append(f'{word_align[0]}-{word_align[1]}')
writer.write(' '.join(output_str)+'\n')
tqdm_iterator.update(len(ids_src))
if output_word_alignments:
with open(args.output_file, 'r') as fh:
outputf = (fh.read()).split("\n")
with open(args.data_file, 'r') as fh:
datalines = (fh.read()).split("\n")
with open(args.output_file+".outtxt", 'w') as fwriter:
for indices, line in zip(outputf, datalines):
srcline, tgtline = line.split(' ||| ')
indices = indices.split()
srcwrds = srcline.split()
tgtwrds = tgtline.split()
output_wrds = []
for wrd in indices:
srcix,tgtix = wrd.split("-")
srcix, tgtix = int(srcix), int(tgtix)
output_wrds.append(f"{srcwrds[srcix]}-{tgtwrds[tgtix]}")
fwriter.write(' '.join(output_wrds)+'\n')
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_file", default=None, type=str, required=True, help="The input data file (a text file)."
)
parser.add_argument(
"--output_file",
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--align_layer", type=int, default=8, help="layer for alignment extraction")
parser.add_argument(
"--extraction", default='softmax', type=str, help='softmax or entmax15'
)
parser.add_argument(
"--softmax_threshold", type=float, default=0.001
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
help="The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.",
)
parser.add_argument(
"--config_name",
default=None,
type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.",
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument(
"--cache_dir",
default='cache_dir',
type=str,
help="Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.device = device
# Set seed
set_seed(args)
config_class, model_class, tokenizer_class = BertConfig, BertForMaskedLM, BertTokenizer
if args.config_name:
config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
config = config_class()
if args.tokenizer_name:
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
elif args.model_name_or_path:
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir)
else:
raise ValueError(
"You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,"
"and load it from here, using --tokenizer_name".format(tokenizer_class.__name__)
)
modeling.PAD_ID = tokenizer.pad_token_id
modeling.CLS_ID = tokenizer.cls_token_id
modeling.SEP_ID = tokenizer.sep_token_id
if args.model_name_or_path:
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir,
)
else:
model = model_class(config=config)
word_align(args, model, tokenizer)
if __name__ == "__main__":
main()
| [
"torch.cuda.manual_seed_all",
"torch.nn.utils.rnn.pad_sequence",
"torch.no_grad",
"torch.utils.data.SequentialSampler",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.1.0 | gitlost-murali/awesome-align | 39fb45ca85a98e005447bddb52c48e65ce7d399b |
1.8 | import copy
import os
import pdb
import random
from typing import Dict, List, Text, TypeVar
import torch
import torch.nn as nn
import torch.nn.functional as F
from elvis.modeling.models import build_net
from elvis.modeling.models.layers import FC, MLP
from elvis.utils.vlp_objectives import optimal_transport_dist
from .base import MetaArch
from .build import ARCH_REGISTRY
Tensor = TypeVar('torch.tensor')
__all__ = ['AlignmentVLP',
'build_align_vlp']
class AlignmentVLP(MetaArch):
"""Meta architecture for Visual Language Pretraining (VLP) based on image-caption alignment
"""
def __init__(self, model, max_visual, max_tokens, tasks_dict) -> None:
super().__init__()
self.model = model
self.max_visual = max_visual
self.max_tokens = max_tokens+2 #take into account [CLS] and [SEP]
self.tasks_dict = tasks_dict
self.lm_mlp = MLP(in_features=self.model.embed_dim,
hidden_dim=self.model.embed_dim,
out_features=len(self.model.tokenizer)-1,
dropout_p=.1)
self.itm_fc = FC(in_features=self.model.embed_dim, out_features=2)
def forward(self, vis_in, txt_in, vis_mask, txt_mask, **kwargs) -> Dict:
cntx_emb = self.model(vis_in=vis_in, vis_mask=vis_mask, txt_in=txt_in, txt_mask=txt_mask)
txt_emb = cntx_emb[:, :self.max_tokens]
itm_logits = self.itm_fc(txt_emb[:, 0, :]) #pass everything but use only [CLS]: better parallelization of loss computation
lm_logits = self.lm_mlp(txt_emb[:, 1:, :])
#? exclude special tokens from ot computation
vis_mask = torch.cat(
(torch.ones((vis_mask.shape[0], 1), device=vis_mask.device), vis_mask),
dim=-1) #add attention for [IMG]
ot_dist = optimal_transport_dist(txt_emb=cntx_emb[:, :self.max_tokens, :].float(),
img_emb=cntx_emb[:, self.max_tokens:, :].float(),
txt_pad=~txt_mask.bool(),
img_pad=~vis_mask.bool()
)
return {'lm_logits': lm_logits, 'itm_logits': itm_logits, 'ot_dist': ot_dist}
def compute_loss(self, lm_logits, itm_logits, lm_targets, itm_targets, **kwargs) -> Dict:
B = lm_logits.shape[0]
n_mlm = sum([t == 'MLM' for t in kwargs['tasks']])
n_itm = len(kwargs['tasks']) - n_mlm
loss_dict = {}
#compute lm loss (compute it also if n_mlm > 0 otherwise the DDP will raise an exception)
lm_loss = F.cross_entropy(lm_logits.transpose(1, 2), lm_targets[:, 1:], reduction='sum')
if n_mlm > 0:
lm_loss /= n_mlm
loss_dict['lm_loss'] = lm_loss
#compute itm loss (compute it also if n_itm > 0 otherwise the DDP will raise an exception)
itm_loss = F.cross_entropy(itm_logits, itm_targets[:, 0], reduction='sum')
ot_pos = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 1)
ot_neg = kwargs['ot_dist'].masked_select(itm_targets[:, 0] == 0)
#we want to maximize the OT distance for negative pairs and minimize OT distance for positive ones
ot_loss = ot_pos.sum() - ot_neg.sum()
itm_loss = (itm_loss + 0.1 * ot_loss)
if n_itm > 0:
itm_loss /= n_itm
loss_dict['itm_loss'] = itm_loss
loss_dict['loss'] = sum(loss_dict.values())
return loss_dict
def save_on_disk(self, path):
state_dict = copy.deepcopy(self).cpu().state_dict()
ckp_file = os.path.join(path, 'state_dict.pt')
torch.save(state_dict, ckp_file)
@ARCH_REGISTRY.register()
def build_align_vlp(cfg):
model, data_interface = build_net(cfg.MODEL, get_interface='vlp')
vlp = AlignmentVLP(model,
max_visual=cfg.MODEL.MAX_N_VISUAL,
max_tokens=cfg.MODEL.MAX_N_TOKENS,
tasks_dict=cfg.MODEL.TASKS.get_as_dict())
return vlp, data_interface
| [
"torch.nn.functional.cross_entropy",
"torch.save",
"torch.ones"
] | 1.8.1 | seo-95/elvis | a89c759acdf6ce64c7e6863aeb68dc0ba3293fed |
1.4 | import numpy as np
import math
import functools
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
import layers
from sync_batchnorm import SynchronizedBatchNorm2d as SyncBatchNorm2d
# Architectures for G
# Attention is passed in in the format '32_64' to mean applying an attention
# block at both resolution 32x32 and 64x64. Just '64' will apply at 64x64.
def G_arch(ch=64, attention='64', ksize='333333', dilation='111111'):
arch = {}
arch[512] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2, 1]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1, 1]],
'upsample' : [True] * 7,
'resolution' : [8, 16, 32, 64, 128, 256, 512],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,10)}}
arch[256] = {'in_channels' : [ch * item for item in [16, 16, 8, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 8, 4, 2, 1]],
'upsample' : [True] * 6,
'resolution' : [8, 16, 32, 64, 128, 256],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,9)}}
arch[128] = {'in_channels' : [ch * item for item in [16, 16, 8, 4, 2]],
'out_channels' : [ch * item for item in [16, 8, 4, 2, 1]],
'upsample' : [True] * 5,
'resolution' : [8, 16, 32, 64, 128],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,8)}}
arch[64] = {'in_channels' : [ch * item for item in [16, 16, 8, 4]],
'out_channels' : [ch * item for item in [16, 8, 4, 2]],
'upsample' : [True] * 4,
'resolution' : [8, 16, 32, 64],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,7)}}
arch[32] = {'in_channels' : [ch * item for item in [4, 4, 4]],
'out_channels' : [ch * item for item in [4, 4, 4]],
'upsample' : [True] * 3,
'resolution' : [8, 16, 32],
'attention' : {2**i: (2**i in [int(item) for item in attention.split('_')])
for i in range(3,6)}}
return arch
class Generator(nn.Module):
def __init__(self, G_ch=64, dim_z=128, bottom_width=4, resolution=128,
G_kernel_size=3, G_attn='64', n_classes=1000,
num_G_SVs=1, num_G_SV_itrs=1,
G_shared=True, shared_dim=0, hier=False,
cross_replica=False, mybn=False,
G_activation=nn.ReLU(inplace=False),
G_lr=5e-5, G_B1=0.0, G_B2=0.999, adam_eps=1e-8,
BN_eps=1e-5, SN_eps=1e-12, G_mixed_precision=False, G_fp16=False,
G_init='ortho', skip_init=False, no_optim=False,
G_param='SN', norm_style='bn',
**kwargs):
super(Generator, self).__init__()
# Channel width mulitplier
self.ch = G_ch
# Dimensionality of the latent space
self.dim_z = dim_z
# The initial spatial dimensions
self.bottom_width = bottom_width
# Resolution of the output
self.resolution = resolution
# Kernel size?
self.kernel_size = G_kernel_size
# Attention?
self.attention = G_attn
# number of classes, for use in categorical conditional generation
self.n_classes = n_classes
# Use shared embeddings?
self.G_shared = G_shared
# Dimensionality of the shared embedding? Unused if not using G_shared
self.shared_dim = shared_dim if shared_dim > 0 else dim_z
# Hierarchical latent space?
self.hier = hier
# Cross replica batchnorm?
self.cross_replica = cross_replica
# Use my batchnorm?
self.mybn = mybn
# nonlinearity for residual blocks
self.activation = G_activation
# Initialization style
self.init = G_init
# Parameterization style
self.G_param = G_param
# Normalization style
self.norm_style = norm_style
# Epsilon for BatchNorm?
self.BN_eps = BN_eps
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# fp16?
self.fp16 = G_fp16
# Architecture dict
self.arch = G_arch(self.ch, self.attention)[resolution]
# If using hierarchical latents, adjust z
if self.hier:
# Number of places z slots into
self.num_slots = len(self.arch['in_channels']) + 1
self.z_chunk_size = (self.dim_z // self.num_slots)
# Recalculate latent dimensionality for even splitting into chunks
self.dim_z = self.z_chunk_size * self.num_slots
else:
self.num_slots = 1
self.z_chunk_size = 0
# Which convs, batchnorms, and linear layers to use
if self.G_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_G_SVs, num_itrs=num_G_SV_itrs,
eps=self.SN_eps)
else:
self.which_conv = functools.partial(nn.Conv2d, kernel_size=3, padding=1)
self.which_linear = nn.Linear
# We use a non-spectral-normed embedding here regardless;
# For some reason applying SN to G's embedding seems to randomly cripple G
self.which_embedding = nn.Embedding
# bn_linear = (functools.partial(self.which_linear, bias=False) if self.G_shared
# else self.which_embedding)
self.which_bn = functools.partial(layers.bn,
# which_linear=bn_linear,
cross_replica=self.cross_replica,
mybn=self.mybn,
# input_size=(self.shared_dim + self.z_chunk_size if self.G_shared
# else self.n_classes),
# norm_style=self.norm_style,
eps=self.BN_eps)
# Prepare model
# If not using shared embeddings, self.shared is just a passthrough
self.shared = (self.which_embedding(n_classes, self.shared_dim) if G_shared
else layers.identity())
# First linear layer
self.linear = self.which_linear(self.dim_z // self.num_slots,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.GBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in G at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index], self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# output layer: batchnorm-relu-conv.
# Consider using a non-spectral conv here
self.output_layer = nn.Sequential(layers.bn(self.arch['out_channels'][-1],
cross_replica=self.cross_replica,
mybn=self.mybn),
self.activation,
self.which_conv(self.arch['out_channels'][-1], 3))
# Prepare spatial modulation model
# If not using shared embeddings, self.shared is just a passthrough
self.spatial_modulation_shared = (self.which_embedding(n_classes, self.shared_dim))
# First linear layer
self.spatial_modulation_linear = self.which_linear(self.dim_z + self.shared_dim,
self.arch['in_channels'][0] * (self.bottom_width **2))
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
# while the inner loop is over a given block
self.spatial_modulation_blocks = []
for index in range(len(self.arch['out_channels'])):
self.spatial_modulation_blocks += [[layers.SpatialModulationGBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
which_bn=self.which_bn,
activation=self.activation,
upsample=(functools.partial(F.interpolate, scale_factor=2)
if self.arch['upsample'][index] else None))]]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.spatial_modulation_blocks = nn.ModuleList([nn.ModuleList(block) for block in self.spatial_modulation_blocks])
# Initialize weights. Optionally skip init for testing.
if not skip_init:
self.init_weights()
# Set up optimizer
# If this is an EMA copy, no need for an optim, so just return now
if no_optim:
return
self.lr, self.B1, self.B2, self.adam_eps = G_lr, G_B1, G_B2, adam_eps
if G_mixed_precision:
print('Using fp16 adam in G...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0,
eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
# Note on this forward function: we pass in a y vector which has
# already been passed through G.shared to enable easy class-wise
# interpolation later. If we passed in the one-hot and then ran it through
# G.shared in this forward function, it would be harder to handle.
def forward(self, z, y):
# If hierarchical, concatenate zs and ys
if self.hier:
zs = torch.split(z, self.z_chunk_size, 1)
z = zs[0]
ys = [torch.cat([y, item], 1) for item in zs[1:]]
# Class embedding layer
# spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
spatial_h = self.spatial_modulation_linear(torch.cat([y, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
else:
ys = [y] * len(self.blocks)
# Class embedding layer
spatial_c = self.spatial_modulation_shared(y)
# Mixing layer
if len(spatial_c.shape) == 3:
spatial_c = torch.squeeze(spatial_c, dim=1)
spatial_h = self.spatial_modulation_linear(torch.cat([spatial_c, z], 1))
# Reshape
spatial_h = spatial_h.view(spatial_h.size(0), -1, self.bottom_width, self.bottom_width)
# First linear layer
h = self.linear(z)
# Reshape
h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
# Spatial modulation calculation
spatial_h, voxelwise_a_mod, voxelwise_b_mod = self.spatial_modulation_blocks[index][0](spatial_h)
# Second inner loop in case block has multiple layers
for block in blocklist:
# Main layer forward
h = block(h, ys[index])
# Most coarse modulation
# h = (h - torch.mean(h, dim=(2, 3), keepdim=True)) / torch.std(h, dim=(2, 3), keepdim=True)
# h = h * (1 + global_a_mod.repeat(1, 1, h.shape[2], h.shape[3])) + global_b_mod.repeat(1, 1, h.shape[2], h.shape[3])
# Most fine modulation
h = (h - torch.mean(h, dim=(1, 2, 3), keepdim=True)) / torch.std(h, dim=(1, 2, 3), keepdim=True)
h = h * (1 + voxelwise_a_mod) + voxelwise_b_mod
# Apply batchnorm-relu-conv-tanh at output
return torch.tanh(self.output_layer(h))
# Discriminator architecture, same paradigm as G's above
def D_arch(ch=64, attention='64',ksize='333333', dilation='111111'):
arch = {}
arch[256] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 8, 16, 16]],
'downsample' : [True] * 6 + [False],
'resolution' : [128, 64, 32, 16, 8, 4, 4 ],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[128] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8, 16]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16, 16]],
'downsample' : [True] * 5 + [False],
'resolution' : [64, 32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,8)}}
arch[64] = {'in_channels' : [3] + [ch*item for item in [1, 2, 4, 8]],
'out_channels' : [item * ch for item in [1, 2, 4, 8, 16]],
'downsample' : [True] * 4 + [False],
'resolution' : [32, 16, 8, 4, 4],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,7)}}
arch[32] = {'in_channels' : [3] + [item * ch for item in [4, 4, 4]],
'out_channels' : [item * ch for item in [4, 4, 4, 4]],
'downsample' : [True, True, False, False],
'resolution' : [16, 16, 16, 16],
'attention' : {2**i: 2**i in [int(item) for item in attention.split('_')]
for i in range(2,6)}}
return arch
class Discriminator(nn.Module):
def __init__(self, D_ch=64, D_wide=True, resolution=128,
D_kernel_size=3, D_attn='64', n_classes=1000,
num_D_SVs=1, num_D_SV_itrs=1, D_activation=nn.ReLU(inplace=False),
D_lr=2e-4, D_B1=0.0, D_B2=0.999, adam_eps=1e-8,
SN_eps=1e-12, output_dim=1, D_mixed_precision=False, D_fp16=False,
D_init='ortho', skip_init=False, D_param='SN', **kwargs):
super(Discriminator, self).__init__()
# Width multiplier
self.ch = D_ch
# Use Wide D as in BigGAN and SA-GAN or skinny D as in SN-GAN?
self.D_wide = D_wide
# Resolution
self.resolution = resolution
# Kernel size
self.kernel_size = D_kernel_size
# Attention?
self.attention = D_attn
# Number of classes
self.n_classes = n_classes
# Activation
self.activation = D_activation
# Initialization style
self.init = D_init
# Parameterization style
self.D_param = D_param
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Fp16?
self.fp16 = D_fp16
# Architecture
self.arch = D_arch(self.ch, self.attention)[resolution]
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right now
if self.D_param == 'SN':
self.which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=1,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_linear = functools.partial(layers.SNLinear,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
self.which_embedding = functools.partial(layers.SNEmbedding,
num_svs=num_D_SVs, num_itrs=num_D_SV_itrs,
eps=self.SN_eps)
# Prepare model
# self.blocks is a doubly-nested list of modules, the outer loop intended
# to be over blocks at a given resolution (resblocks and/or self-attention)
self.blocks = []
for index in range(len(self.arch['out_channels'])):
self.blocks += [[layers.DBlock(in_channels=self.arch['in_channels'][index],
out_channels=self.arch['out_channels'][index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# If attention on this block, attach it to the end
if self.arch['attention'][self.arch['resolution'][index]]:
print('Adding attention layer in D at resolution %d' % self.arch['resolution'][index])
self.blocks[-1] += [layers.Attention(self.arch['out_channels'][index],
self.which_conv)]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# Linear output layer. The output dimension is typically 1, but may be
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
# Initialize weights
if not skip_init:
self.init_weights()
# Set up optimizer
self.lr, self.B1, self.B2, self.adam_eps = D_lr, D_B1, D_B2, adam_eps
if D_mixed_precision:
print('Using fp16 adam in D...')
import utils
self.optim = utils.Adam16(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
else:
self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# LR scheduling, left here for forward compatibility
# self.lr_sched = {'itr' : 0}# if self.progressive else {}
# self.j = 0
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for D''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
# Parallelized G_D to minimize cross-gpu communication
# Without this, Generator outputs would get all-gathered and then rebroadcast.
class G_D(nn.Module):
def __init__(self, G, D):
super(G_D, self).__init__()
self.G = G
self.D = D
def forward(self, z, gy, x=None, dy=None, train_G=False, return_G_z=False,
split_D=False):
# If training G, enable grad tape
with torch.set_grad_enabled(train_G):
# Get Generator output given noise
G_z = self.G(z, self.G.shared(gy))
# Cast as necessary
if self.G.fp16 and not self.D.fp16:
G_z = G_z.float()
if self.D.fp16 and not self.G.fp16:
G_z = G_z.half()
# Split_D means to run D once with real data and once with fake,
# rather than concatenating along the batch dimension.
if split_D:
D_fake = self.D(G_z, gy)
if x is not None:
D_real = self.D(x, dy)
return D_fake, D_real
else:
if return_G_z:
return D_fake, G_z
else:
return D_fake
# If real data is provided, concatenate it with the Generator's output
# along the batch dimension for improved efficiency.
else:
D_input = torch.cat([G_z, x], 0) if x is not None else G_z
D_class = torch.cat([gy, dy], 0) if dy is not None else gy
# Get Discriminator output
D_out = self.D(D_input, D_class)
if x is not None:
return torch.split(D_out, [G_z.shape[0], x.shape[0]]) # D_fake, D_real
else:
if return_G_z:
return D_out, G_z
else:
return D_out
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.AvgPool2d",
"torch.split",
"torch.std",
"torch.nn.init.xavier_uniform_",
"torch.nn.ReLU",
"torch.mean",
"torch.squeeze",
"torch.nn.init.normal_",
"torch.nn.init.orthogonal_",
"torch.set_grad_enabled"
] | 1.4 | twice154/Spatial-Self-modulation-on-BigGAN | 6ca691231bf7e8fd388a08b5ce6b4e30a50dd57b |
1.7 | from collections import OrderedDict
import numpy as np
import torch
from torch import nn as nn
import torch.nn.functional as F
import torch.optim as optim
import itertools
import rlkit.torch.utils.pytorch_util as ptu
from rlkit.core.trainer import Trainer
from rlkit.core.eval_util import create_stats_ordered_dict
class SoftActorCritic(Trainer):
"""
version that:
- uses reparameterization trick
- has two Q functions
- has auto-tuned alpha
"""
def __init__(
self,
policy,
qf1,
qf2,
reward_scale=1.0,
discount=0.99,
policy_lr=1e-3,
qf_lr=1e-3,
alpha_lr=3e-4,
soft_target_tau=1e-2,
alpha=0.2,
train_alpha=True,
policy_mean_reg_weight=1e-3,
policy_std_reg_weight=1e-3,
optimizer_class=optim.Adam,
beta_1=0.9,
**kwargs
):
self.policy = policy
self.qf1 = qf1
self.qf2 = qf2
self.reward_scale = reward_scale
self.discount = discount
self.soft_target_tau = soft_target_tau
self.policy_mean_reg_weight = policy_mean_reg_weight
self.policy_std_reg_weight = policy_std_reg_weight
self.train_alpha = train_alpha
self.log_alpha = torch.tensor(np.log(alpha), requires_grad=train_alpha)
self.alpha = self.log_alpha.detach().exp()
assert "env" in kwargs.keys(), "env info should be taken into SAC alpha"
self.target_entropy = -np.prod(kwargs["env"].action_space.shape)
self.target_qf1 = qf1.copy()
self.target_qf2 = qf2.copy()
self.eval_statistics = None
self.policy_optimizer = optimizer_class(
self.policy.parameters(), lr=policy_lr, betas=(beta_1, 0.999)
)
self.qf1_optimizer = optimizer_class(
self.qf1.parameters(), lr=qf_lr, betas=(beta_1, 0.999)
)
self.qf2_optimizer = optimizer_class(
self.qf2.parameters(), lr=qf_lr, betas=(beta_1, 0.999)
)
self.alpha_optimizer = optimizer_class(
[self.log_alpha], lr=alpha_lr, betas=(beta_1, 0.999)
)
def train_step(self, batch):
# q_params = itertools.chain(self.qf1.parameters(), self.qf2.parameters())
# policy_params = itertools.chain(self.policy.parameters())
rewards = self.reward_scale * batch["rewards"]
terminals = batch["terminals"]
obs = batch["observations"]
actions = batch["actions"]
next_obs = batch["next_observations"]
"""
QF Loss
"""
# Only unfreeze parameter of Q
# for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):
# p.requires_grad = True
# for p in self.policy.parameters():
# p.requires_grad = False
self.qf1_optimizer.zero_grad()
self.qf2_optimizer.zero_grad()
q1_pred = self.qf1(obs, actions)
q2_pred = self.qf2(obs, actions)
# Make sure policy accounts for squashing functions like tanh correctly!
next_policy_outputs = self.policy(next_obs, return_log_prob=True)
# in this part, we only need new_actions and log_pi with no grad
(
next_new_actions,
next_policy_mean,
next_policy_log_std,
next_log_pi,
) = next_policy_outputs[:4]
target_qf1_values = self.target_qf1(
next_obs, next_new_actions
) # do not need grad || it's the shared part of two calculation
target_qf2_values = self.target_qf2(
next_obs, next_new_actions
) # do not need grad || it's the shared part of two calculation
min_target_value = torch.min(target_qf1_values, target_qf2_values)
q_target = rewards + (1.0 - terminals) * self.discount * (
min_target_value - self.alpha * next_log_pi
) ## original implementation has detach
q_target = q_target.detach()
qf1_loss = 0.5 * torch.mean((q1_pred - q_target) ** 2)
qf2_loss = 0.5 * torch.mean((q2_pred - q_target) ** 2)
# freeze parameter of Q
# for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):
# p.requires_grad = False
qf1_loss.backward()
qf2_loss.backward()
self.qf1_optimizer.step()
self.qf2_optimizer.step()
"""
Policy Loss
"""
# for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):
# p.requires_grad = False
# for p in self.policy.parameters():
# p.requires_grad = True
policy_outputs = self.policy(obs, return_log_prob=True)
new_actions, policy_mean, policy_log_std, log_pi = policy_outputs[:4]
q1_new_acts = self.qf1(obs, new_actions)
q2_new_acts = self.qf2(obs, new_actions) ## error
q_new_actions = torch.min(q1_new_acts, q2_new_acts)
self.policy_optimizer.zero_grad()
policy_loss = torch.mean(self.alpha * log_pi - q_new_actions) ##
mean_reg_loss = self.policy_mean_reg_weight * (policy_mean ** 2).mean()
std_reg_loss = self.policy_std_reg_weight * (policy_log_std ** 2).mean()
policy_reg_loss = mean_reg_loss + std_reg_loss
policy_loss = policy_loss + policy_reg_loss
policy_loss.backward()
self.policy_optimizer.step()
"""
Update alpha
"""
if self.train_alpha:
log_prob = log_pi.detach() + self.target_entropy
alpha_loss = -(self.log_alpha * log_prob).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.detach().exp()
"""
Update networks
"""
# unfreeze all -> initial states
# for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):
# p.requires_grad = True
# for p in self.policy.parameters():
# p.requires_grad = True
# unfreeze parameter of Q
# for p in itertools.chain(self.qf1.parameters(), self.qf2.parameters()):
# p.requires_grad = True
self._update_target_network()
"""
Save some statistics for eval
"""
if self.eval_statistics is None:
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
self.eval_statistics = OrderedDict()
self.eval_statistics["Reward Scale"] = self.reward_scale
self.eval_statistics["QF1 Loss"] = np.mean(ptu.get_numpy(qf1_loss))
self.eval_statistics["QF2 Loss"] = np.mean(ptu.get_numpy(qf2_loss))
if self.train_alpha:
self.eval_statistics["Alpha Loss"] = np.mean(ptu.get_numpy(alpha_loss))
self.eval_statistics["Policy Loss"] = np.mean(ptu.get_numpy(policy_loss))
self.eval_statistics.update(
create_stats_ordered_dict(
"Q1 Predictions",
ptu.get_numpy(q1_pred),
)
)
self.eval_statistics.update(
create_stats_ordered_dict(
"Q2 Predictions",
ptu.get_numpy(q2_pred),
)
)
self.eval_statistics.update(
create_stats_ordered_dict(
"Alpha",
[ptu.get_numpy(self.alpha)],
)
)
self.eval_statistics.update(
create_stats_ordered_dict(
"Log Pis",
ptu.get_numpy(log_pi),
)
)
self.eval_statistics.update(
create_stats_ordered_dict(
"Policy mu",
ptu.get_numpy(policy_mean),
)
)
self.eval_statistics.update(
create_stats_ordered_dict(
"Policy log std",
ptu.get_numpy(policy_log_std),
)
)
@property
def networks(self):
return [
self.policy,
self.qf1,
self.qf2,
self.target_qf1,
self.target_qf2,
]
def _update_target_network(self):
ptu.soft_update_from_to(self.qf1, self.target_qf1, self.soft_target_tau)
ptu.soft_update_from_to(self.qf2, self.target_qf2, self.soft_target_tau)
def get_snapshot(self):
return dict(
qf1=self.qf1,
qf2=self.qf2,
policy=self.policy,
target_qf1=self.target_qf1,
target_qf2=self.target_qf2,
log_alpha=self.log_alpha,
policy_optimizer=self.policy_optimizer,
qf1_optimizer=self.qf1_optimizer,
qf2_optimizer=self.qf2_optimizer,
alpha_optimizer=self.alpha_optimizer,
)
def load_snapshot(self, snapshot):
self.qf1 = snapshot["qf1"]
self.qf2 = snapshot["qf2"]
self.policy = snapshot["policy"]
self.target_qf1 = snapshot["target_qf1"]
self.target_qf2 = snapshot["target_qf2"]
self.log_alpha = snapshot["log_alpha"]
self.policy_optimizer = snapshot["policy_optimizer"]
self.qf1_optimizer = snapshot["qf1_optimizer"]
self.qf2_optimizer = snapshot["qf2_optimizer"]
self.alpha_optimizer = snapshot["alpha_optimizer"]
def get_eval_statistics(self):
return self.eval_statistics
def end_epoch(self):
self.eval_statistics = None
def to(self, device):
self.log_alpha.to(device)
super.to(device)
| [
"torch.mean",
"torch.min"
] | 1.7.0 | Ericonaldo/ILSwiss | efd25d457fd1578005c6fbc45cae29e9ab64a99d |
1.0 | import argparse
import os
import os.path as osp
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json
# , coco_eval,
from txt_val import txt_eval
from mmdet.core import wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from mmdet import datasets
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
# batch_size = data['img'][0].size(0)
batch_size = 1
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
# parser.add_argument(
# '--eval',
# type=str,
# nargs='+',
# choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
# help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--iou_thr', type=float, default=0.5)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
#os.environ["CUDA_VISIBLE_DEVICES"] = "1"
args = parse_args()
assert args.out or args.show, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
result_file = args.out
# args = parser.parse_args()
# cfg = mmcv.Config.fromfile(args.config)
# test_dataset = mmcv.runner.obj_from_dict(cfg.data.test, datasets)
# txt_eval(args.result, test_dataset, args.iou_thr)
txt_eval(result_file, dataset, iou_thr=args.iou_thr)
# eval_types = args.eval
# if eval_types:
# print('Starting evaluate {}'.format(' and '.join(eval_types)))
# if eval_types == ['proposal_fast']:
# result_file = args.out
# coco_eval(result_file, eval_types, dataset.coco)
# else:
# if not isinstance(outputs[0], dict):
# result_files = results2json(dataset, outputs, args.out)
# coco_eval(result_files, eval_types, dataset.coco)
# else:
# for name in outputs[0]:
# print('\nEvaluating {}'.format(name))
# outputs_ = [out[name] for out in outputs]
# result_file = args.out + '.{}'.format(name)
# result_files = results2json(dataset, outputs_,
# result_file)
# coco_eval(result_files, eval_types, dataset.coco)
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.distributed.barrier",
"torch.full",
"torch.distributed.broadcast"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
1.0 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from ..registry import LOSSES
# def gaussian_radius(det_size, min_overlap=0.7):
# height, width = det_size
# a1 = 1
# b1 = (height + width)
# c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
# sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
# r1 = (b1 + sq1) / 2
# a2 = 4
# b2 = 2 * (height + width)
# c2 = (1 - min_overlap) * width * height
# sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
# r2 = (b2 + sq2) / 2
# a3 = 4 * min_overlap
# b3 = -2 * min_overlap * (height + width)
# c3 = (min_overlap - 1) * width * height
# sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
# r3 = (b3 + sq3) / 2
# return min(r1, r2, r3)
# def gaussian2D(shape, sigma=1):
# m, n = [(ss - 1.) / 2. for ss in shape]
# y, x = np.ogrid[-m:m+1,-n:n+1]
# h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
# h[h < np.finfo(h.dtype).eps * h.max()] = 0
# return h
# def draw_umich_gaussian(heatmap, center, radius, k=1):
# diameter = 2 * radius + 1
# gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
# x, y = int(center[0]), int(center[1])
# height, width = heatmap.shape[0:2]
# left, right = min(x, radius), min(width - x, radius + 1)
# top, bottom = min(y, radius), min(height - y, radius + 1)
# masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
# masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
# if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
# np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
# return heatmap
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
# print(pred) # 几乎全部是0
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
# print("num_pos:", num_pos)
# print("pos_loss:", pos_loss)
# print("neg_loss:", neg_loss)
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
class FocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target)
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
class RegL1Loss(nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _tranpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# print(target)
# import pdb; pdb.set_trace()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
# loss = F.l1_loss(pred * mask, target * mask, size_average=False)
loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
@LOSSES.register_module
class CtdetLoss(torch.nn.Module):
def __init__(self):
super(CtdetLoss, self).__init__()
# self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()
# self.crit_reg = RegL1Loss() if opt.reg_loss == 'l1' else \
# RegLoss() if opt.reg_loss == 'sl1' else None
# self.crit_wh = torch.nn.L1Loss(reduction='sum') if opt.dense_wh else \
# NormRegL1Loss() if opt.norm_wh else \
# RegWeightedL1Loss() if opt.cat_spec_wh else self.crit_reg
self.crit = FocalLoss()
self.crit_reg = RegL1Loss()
# self.crit_wh = self.crit_reg
# self.opt = opt
# opts
self.num_stacks = 1
self.wh_weight = 0.1
self.off_weight = 1
self.hm_weight = 1
def forward(self, outputs, **kwargs):
batch = kwargs
hm_loss, wh_loss, off_loss = 0, 0, 0
for s in range(self.num_stacks):
output = outputs[s]
# for key, value in output.items():
# print(key, value.shape)
# if not opt.mse_loss:
output['hm'] = torch.clamp(output['hm'].sigmoid_(), min=1e-4, max=1-1e-4)
# output['hm'] = output['hm'].sigmoid_()
# if opt.eval_oracle_hm:
# output['hm'] = batch['hm']
# if opt.eval_oracle_wh:
# output['wh'] = torch.from_numpy(gen_oracle_map(
# batch['wh'].detach().cpu().numpy(),
# batch['ind'].detach().cpu().numpy(),
# output['wh'].shape[3], output['wh'].shape[2])).to(opt.device)
# if opt.eval_oracle_offset:
# output['reg'] = torch.from_numpy(gen_oracle_map(
# batch['reg'].detach().cpu().numpy(),
# batch['ind'].detach().cpu().numpy(),
# output['reg'].shape[3], output['reg'].shape[2])).to(opt.device)
hm_loss += self.crit(output['hm'], batch['hm']) / self.num_stacks
if self.wh_weight > 0:
wh_loss += self.crit_reg(
output['wh'], batch['reg_mask'],
batch['ind'], batch['wh']) / self.num_stacks
if self.off_weight > 0:
off_loss += self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg']) / self.num_stacks
# loss = self.hm_weight * hm_loss + self.wh_weight * wh_loss + \
# self.off_weight * off_loss
losses = {'hm_loss': self.hm_weight * hm_loss,
'wh_loss': self.wh_weight * wh_loss, 'off_loss': self.off_weight * off_loss}
# loss_stats = {'loss': loss, 'hm_loss': hm_loss,
# 'wh_loss': wh_loss, 'off_loss': off_loss}
# return loss, loss_stats
return losses | [
"torch.nn.functional.l1_loss",
"torch.log",
"torch.pow"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
1.0 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
import numpy as np
import cv2
import math
#import torch.nn.functional as F
from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, Scale, ConvModule
INF = 1e8
@HEADS.register_module
class WeightCenterHead(nn.Module):
def __init__(self,
num_classes, # init 80
in_channels,
feat_channels=256,
stacked_convs=1,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
use_cross = False,
loss_hm = dict(
type="CenterFocalLoss"
), # 这里实现 CenterFocalLoss
loss_wh = dict(
type="L1Loss",
loss_weight=0.1
),
loss_offset = dict(
type="L1Loss",
loss_weight=1.0
),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(WeightCenterHead, self).__init__()
self.num_classes = num_classes
# self.cls_out_channels = num_classes - 1
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.featmap_sizes = None
self.loss_hm = build_loss(loss_hm)
self.loss_wh = build_loss(loss_wh)
self.loss_offset = build_loss(loss_offset)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.use_cross = use_cross
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.wh_convs = nn.ModuleList()
self.offset_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.wh_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.offset_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)
self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
# for m in self.cls_convs:
# normal_init(m.conv, std=0.01)
# for m in self.wh_convs:
# normal_init(m.conv, std=0.01)
# for m in self.offset_convs:
# normal_init(m.conv, std=0.01)
#bias_hm = bias_init_with_prob(0.01) # 这里的初始化?
#normal_init(self.center_hm, std=0.01, bias=bias_hm)
self.center_hm.bias.data.fill_(-2.19)
nn.init.constant_(self.center_wh.bias, 0)
nn.init.constant_(self.center_offset.bias, 0)
# normal_init(self.center_hm, std=0.01)
# normal_init(self.center_wh, std=0.01)
# normal_init(self.center_offset, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
wh_feat = x
offset_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.center_hm(cls_feat)
for wh_layer in self.wh_convs:
wh_feat = wh_layer(wh_feat)
wh_pred = self.center_wh(wh_feat)
for offset_layer in self.offset_convs:
offset_feat = offset_layer(offset_feat)
offset_pred = self.center_offset(offset_feat)
return cls_score, wh_pred, offset_pred
@force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))
def loss(self,
cls_scores,
wh_preds,
offset_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(wh_preds) == len(offset_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
self.featmap_sizes = featmap_sizes
all_level_points = self.get_points(featmap_sizes, offset_preds[0].dtype,
offset_preds[0].device)
#print(img_metas)
#self.c = img_metas['c']
#self.s = img_metas['s']
self.tensor_dtype = offset_preds[0].dtype
self.tensor_device = offset_preds[0].device
heatmaps, wh_targets, offset_targets = self.center_target(gt_bboxes, gt_labels, img_metas, all_level_points) # 所有层的concat的, 每张图对应一个
num_imgs = cls_scores[0].size(0) # batch_size
#print(num_imgs)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
] # cls_scores(num_levels, batch_size, 80, h, w) => (num_levels, batch_size * w * h, 80)
flatten_wh_preds = [
wh_pred.permute(0, 2, 3, 1).reshape(-1, 2) # batchsize, h, w, 2 => batchsize, h, w, 2
for wh_pred in wh_preds
]
flatten_offset_preds = [
offset_pred.permute(0, 2, 3, 1).reshape(-1, 2)
for offset_pred in offset_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_wh_preds = torch.cat(flatten_wh_preds)
flatten_offset_preds = torch.cat(flatten_offset_preds)
# targets
flatten_heatmaps = torch.cat(heatmaps)
flatten_wh_targets = torch.cat(wh_targets) # torch.Size([all_level_points, 2])
flatten_offset_targets = torch.cat(offset_targets)
# repeat points to align with bbox_preds
# flatten_points = torch.cat(
# [points.repeat(num_imgs, 1) for points in all_level_points])
# pos_inds = flatten_labels.nonzero().reshape(-1)
#print(flatten_wh_targets.shape)
#print(flatten_wh_targets.nonzero())
center_inds = flatten_wh_targets[...,0].nonzero().reshape(-1)
#print(center_inds)
num_center = len(center_inds)
#print(num_center)
# what about use the centerness * labels to indict an object
# loss_cls = self.loss_cls(
# flatten_cls_scores, flatten_labels, # labels gt is small area
# avg_factor=num_pos + num_imgs) # avoid num_pos is 0
flatten_cls_scores = torch.clamp(flatten_cls_scores.sigmoid_(), min=1e-4, max=1-1e-4)
loss_hm = self.loss_hm(flatten_cls_scores, flatten_heatmaps)
pos_wh_targets = flatten_wh_targets[center_inds]
#print(pos_wh_targets.shape)
pos_wh_preds = flatten_wh_preds[center_inds]
pos_offset_preds = flatten_offset_preds[center_inds]
pos_offset_targets = flatten_offset_targets[center_inds]
if num_center > 0:
# TODO: use the iou loss
# center_points = flatten_points[center_inds]
# center_decoded_bbox_preds = wh_offset2bbox(center_points, pos_wh_preds, pos_offset_preds)
# center_decoded_bbox_targets = wh_offset2bbox(center_points, pos_wh_targets, pos_offset_targets)
loss_wh = self.loss_wh(pos_wh_preds, pos_wh_targets, avg_factor=num_center + num_imgs)
#loss_wh = F.l1_loss(pos_wh_preds, pos_wh_targets, reduction='sum') / (num_center + num_imgs)
#loss_wh = 0.1 * loss_wh
loss_offset = self.loss_offset(pos_offset_preds, pos_offset_targets, avg_factor=num_center + num_imgs)
else:
loss_wh = pos_wh_preds.sum()
loss_offset = pos_offset_preds.sum()
return dict(
loss_hm = loss_hm,
loss_wh = loss_wh,
loss_offset = loss_offset)
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device) # 以一定间隔取x的值
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range) # 得到featmap的所有点
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def center_target(self, gt_bboxes_list, gt_labels_list, img_metas, all_level_points):
assert len(self.featmap_sizes) == len(self.regress_ranges)
# get heatmaps and targets of each image
# heatmaps in heatmaps_list: [num_points, 80]
# wh_targets: [num_points, 2] => [batch_size, num_points, 2]
heatmaps_list, wh_targets_list, offset_targets_list = multi_apply(
self.center_target_single,
gt_bboxes_list,
gt_labels_list,
img_metas
)
# split to per img, per level
num_points = [center.size(0) for center in all_level_points] # 每一层多少个点 all_level_points [[12414, 2], []]
heatmaps_list = [heatmaps.split(num_points, 0) for heatmaps in heatmaps_list]
wh_targets_list = [wh_targets.split(num_points, 0) for wh_targets in wh_targets_list]
offset_targets_list = [offset_targets.split(num_points, 0) for offset_targets in offset_targets_list]
# concat per level image, 同一层的concat # [(batch_size,featmap_size[1]), ...)
concat_lvl_heatmaps = []
concat_lvl_wh_targets = []
concat_lvl_offset_targets = []
num_levels = len(self.featmap_sizes)
for i in range(num_levels):
concat_lvl_heatmaps.append(
torch.cat([heatmaps[i] for heatmaps in heatmaps_list])) # (num_levels, batch_size * w * h, 80)
concat_lvl_wh_targets.append(
torch.cat(
[wh_targets[i] for wh_targets in wh_targets_list]))
concat_lvl_offset_targets.append(
torch.cat(
[offset_targets[i] for offset_targets in offset_targets_list]))
return concat_lvl_heatmaps, concat_lvl_wh_targets, concat_lvl_offset_targets
def center_target_single(self, gt_bboxes, gt_labels, img_meta):
"""
single image
gt_bboxes:torch.Size([6, 4])
gt_labels:torch.Size([6]) tensor([34, 34, 34, 34, 34, 34], device='cuda:0')
featmap_sizes:(list[tuple]): Multi-level feature map sizes.
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))
"""
# transform the gt_bboxes, gt_labels to numpy
gt_bboxes = gt_bboxes.data.cpu().numpy()
gt_labels = gt_labels.data.cpu().numpy()
#print(gt_bboxes, gt_labels)
num_objs = gt_labels.shape[0]
#print(num_objs)
# heatmaps [level1, level2, level3, level4, level5]
num_levels = len(self.featmap_sizes)
heatmaps_targets = []
wh_targets = []
offset_targets = []
# get the target shape for each image
for i in range(num_levels):
h, w = self.featmap_sizes[i]
hm = np.zeros((self.cls_out_channels, h, w), dtype=np.float32)
heatmaps_targets.append(hm)
wh = np.zeros((h, w, 2), dtype=np.float32)
wh_targets.append(wh)
offset = np.zeros((h, w, 2), dtype=np.float32)
offset_targets.append(offset)
for k in range(num_objs):
bbox = gt_bboxes[k]
cls_id = gt_labels[k]
if img_meta['flipped']:
bbox[[0, 2]] = img_meta['width'] - bbox[[2, 0]] - 1
# condition: in the regress_ranges
origin_h, origin_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
#max_h_w = max(h, w) / 2
max_h_w = max(origin_h, origin_w)
#max_h_w = max(origin_h, origin_w) * 2 # 最长边为32在P2
# 根据max_h_w在哪一层将output设置为当前层的
index_levels = []
#index_level = 0
for i in range(num_levels):
min_regress_distance, max_regress_distance = self.regress_ranges[i]
if not self.use_cross and (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):
index_levels.append(i)
break
if self.use_cross:
min_regress_distance = min_regress_distance * 0.8
max_regress_distance = max_regress_distance * 1.3
if (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):
index_levels.append(i)
for index_level in index_levels:
output_h, output_w = self.featmap_sizes[index_level]
#print(output_h, output_w)
hm = heatmaps_targets[index_level]
wh = wh_targets[index_level]
offset = offset_targets[index_level]
# c, s is passed by meta
trans_output = get_affine_transform(img_meta['c'], img_meta['s'], 0, [output_w, output_h])
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1) #x1, x2
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
#print(h, w)
# 转换到当层
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
#print(ct)
ct_int = ct.astype(np.int32)
#hm[cls_id, ct_int[1], ct_int[0]] = 1
#if (ct_int[1] - 1) > 0:
# hm[cls_id, ct_int[1] - 1, ct_int[0]] = 0.5
#if (ct_int[0] - 1) > 0:
# hm[cls_id, ct_int[1], ct_int[0] - 1] = 0.5
#if (ct_int[1] + 1) < output_h:
# hm[cls_id, ct_int[1] + 1, ct_int[0]] = 0.5
#if (ct_int[0] + 1) < output_w:
# hm[cls_id, ct_int[1], ct_int[0] + 1] = 0.5
draw_umich_gaussian(hm[cls_id], ct_int, radius)
h, w = 1. * h, 1. * w
offset_count = ct - ct_int # h, w
# ct_int即表明在featmap的位置 ct_int[1] * output_w + ct_int[0]
# TODO:如果当前位置有物体的中心,现在是直接覆盖
# 这里设置监督信号,第1位表示w,第2位表示h
# 这里对featmap进行缩放?
# wh[ct_int[1], ct_int[0], 0] = w / output_w# output_h, output_w <= y, x
# wh[ct_int[1], ct_int[0], 1] = h / output_h
# offset[ct_int[1], ct_int[0], 0] = offset_count[0] / output_w
# offset[ct_int[1], ct_int[0], 0] = offset_count[1] / output_h
wh[ct_int[1], ct_int[0], 0] = w * (2 ** index_level) # baseline is P2
wh[ct_int[1], ct_int[0], 1] = h * (2 ** index_level)
offset[ct_int[1], ct_int[0], 0] = offset_count[0] * (2 ** index_level)
offset[ct_int[1], ct_int[0], 0] = offset_count[1] * (2 ** index_level)
heatmaps_targets[index_level] = hm
wh_targets[index_level] = wh
offset_targets[index_level] = offset
flatten_heatmaps_targets = [
hm.transpose(1, 2, 0).reshape(-1, self.cls_out_channels)
for hm in heatmaps_targets
]
#for i in range(len(flatten_heatmaps_targets)):
# print(flatten_heatmaps_targets[i].shape)
heatmaps_targets = np.concatenate(flatten_heatmaps_targets, axis=0)
#print(heatmaps_targets.shape) # (13343, 80)
#print(heatmaps_targets)
flatten_wh_targets = [
wh.reshape(-1, 2) for wh in wh_targets
]
wh_targets = np.concatenate(flatten_wh_targets)
flatten_offset_targets = [
offset.reshape(-1, 2) for offset in offset_targets
]
offset_targets = np.concatenate(flatten_offset_targets)
# transform the heatmaps_targets, wh_targets, offset_targets into tensor
heatmaps_targets = torch.from_numpy(np.stack(heatmaps_targets))
heatmaps_targets = torch.tensor(heatmaps_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
wh_targets = torch.from_numpy(np.stack(wh_targets))
wh_targets = torch.tensor(wh_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
offset_targets = torch.from_numpy(np.stack(offset_targets))
offset_targets = torch.tensor(offset_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
return heatmaps_targets, wh_targets, offset_targets
# test use
@force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))
def get_bboxes(self,
cls_scores,
wh_preds,
offset_preds,
img_metas,
cfg):
assert len(cls_scores) == len(wh_preds) == len(offset_preds)
# cls_scores => [num_levels] => [batch featmap] => [batch, 80, h, w]
# wh_preds => [num_levels] => [featmap] => [2, h, w]
# offset_preds => [num_levels] => [featmap] => [2, h, w]
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
result_list = []
#print(cls_scores[0].shape) # torch.Size([1, 80, 84, 56])
#print(img_metas)
for img_id in range(len(img_metas)): # 每个batch中id
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
] # =>[num_levels] => [80, h, w]
wh_pred_list = [
wh_preds[i][img_id].detach() for i in range(num_levels)
]
offset_pred_list = [
offset_preds[i][img_id].detach() for i in range(num_levels)
]
#img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
c = img_metas[img_id]['c']
s = img_metas[img_id]['s']
det_bboxes = self.get_bboxes_single(cls_score_list, wh_pred_list,
offset_pred_list,
featmap_sizes, c, s,
scale_factor, cfg) # 对每一张图像进行解调
result_list.append(det_bboxes)
return result_list # [batch_size]
def get_bboxes_single(self,
cls_scores,
wh_preds,
offset_preds,
featmap_sizes,
c,
s,
scale_factor,
cfg):
assert len(cls_scores) == len(wh_preds) == len(offset_preds) == len(featmap_sizes)
detections = []
for cls_score, wh_pred, offset_pred, featmap_size in zip(
cls_scores, wh_preds, offset_preds, featmap_sizes): # 取出每一层的点
assert cls_score.size()[-2:] == wh_pred.size()[-2:] == offset_pred.size()[-2:] == featmap_size
output_h, output_w = featmap_size
index_level = int((512 / 4) / output_h) - 1
wh_pred = wh_pred / (2 ** index_level)
offset_pred = offset_pred / (2 ** index_level)
#实际上得到了每一层的hm, wh, offset
hm = torch.clamp(cls_score.sigmoid_(), min=1e-4, max=1-1e-4).unsqueeze(0) # 增加一个纬度
#wh_pred[0, :, :] = wh_pred[0, :, :] * output_w
#wh_pred[1, :, :] = wh_pred[1, :, :] * output_h # 2, output_h, output_w
wh = wh_pred.unsqueeze(0) # 这里需要乘以featuremap的尺度
#offset_pred[0, : ,:] = offset_pred[0, : ,:] * output_w
#offset_pred[1, : ,:] = offset_pred[1, : ,:] * output_h
reg = offset_pred.unsqueeze(0)
dets = ctdet_decode(hm, wh, reg=reg, K=40)
dets = post_process(dets, c, s, output_h, output_w, scale=scale_factor, num_classes=self.num_classes)
detections.append(dets)
results = merge_outputs(detections, self.num_classes) # 单张图的结果
return results
#num_classes = 80
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian_small_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=40):
batch, cat, height, width = heat.size() # 1, 80, 128, 128
#print("batch, cat, height, width\n", batch, cat, height, width)
if height * width <= K:
K = height * width
#print("k:", K)
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds) # inds 对应 h, w的尺度
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1) # 0, 1, 2
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _topk(scores, K=40):
batch, cat, height, width = scores.size() # 1, 80,height, width
#print("batch, cat, height, width\n", batch, cat, height, width)
#print("k:", K)
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float() # y-> h, x-> w
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def post_process(dets, c, s, out_height, out_width, scale, num_classes):
dets = dets.detach().cpu().numpy()
# print("dets", dets) # (1, 100, 6)
dets = dets.reshape(1, -1, dets.shape[2]) # (x1, y1, x2, y2)
dets = ctdet_post_process(
dets.copy(), [c], [s],
out_height, out_width, num_classes)
for j in range(1, num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def ctdet_post_process(dets, c, s, h, w, num_classes):
ret = []
# print(dets.shape) # (1, 100, 6)
# print(c)
for i in range(dets.shape[0]):
top_preds = {}
dets[i, :, :2] = transform_preds(
dets[i, :, 0:2], c[i], s[i], (w, h))
dets[i, :, 2:4] = transform_preds(
dets[i, :, 2:4], c[i], s[i], (w, h))
classes = dets[i, :, -1] # 类别这里是80
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = np.concatenate([
dets[i, inds, :4].astype(np.float32),
dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() # 这里将框按照类别进行分类
ret.append(top_preds)
return ret
def merge_outputs(detections, num_classes):
# print(detections)
results = {}
max_per_image = 100
for j in range(1, num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
# if len(self.scales) > 1 or self.opt.nms:
results[j] = soft_nms(results[j], Nt=0.5, method=2, threshold=0.01)
# print(results)
scores = np.hstack([results[j][:, 4] for j in range(1, num_classes + 1)])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
# print("after merge out\n", results)
return results2coco_boxes(results, num_classes)
def results2coco_boxes(results, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
bboxes = [0 for i in range(num_classes)]
for j in range(1, num_classes + 1):
if len(results[j]) == 0:
bboxes[j - 1] = np.zeros((0, 5), dtype=np.float32)
continue
bboxes[j - 1] = results[j]
# print(bboxes) # xyxy
return bboxes
def soft_nms(boxes, sigma=0.5, Nt=0.3, threshold=0.01, method=0):
N = boxes.shape[0]
# cdef float iw, ih, box_area
# cdef float ua
# cdef int
pos = 0
# cdef float
maxscore = 0
# cdef int
maxpos = 0
# cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
boxes = boxes[keep]
return boxes
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.meshgrid",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.arange",
"torch.nn.functional.max_pool2d"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
1.0 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, Scale, ConvModule
INF = 1e8
@HEADS.register_module
class FCOSHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(FCOSHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
centerness = self.fcos_centerness(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
if num_pos > 0:
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, centerness, points in zip(
cls_scores, bbox_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
#for i in range(num_levels):
# print(i)
# print("points.shape:",points[i].shape) # torch.Size([15200, 2])
# print("regress_ranges shape:", self.regress_ranges[i])
# print("expanded_regress_ranges shape", expanded_regress_ranges[i].shape)
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
#print("concat_regress_ranges.shape", concat_regress_ranges.shape)
#print("concat_points.shape", concat_points.shape)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
print("gt_bboxes", gt_bboxes.shape)
print("gt_labels", gt_labels.shape)
print("points", points.shape)
print(points[:10])
print("regress_ranges", regress_ranges.shape)
print(regress_ranges[:10])
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
| [
"torch.cat",
"torch.sqrt",
"torch.stack",
"torch.arange",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.meshgrid"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
1.0 | import torch
import torch.nn as nn
from ..registry import LOSSES
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w) => (batch, c, num_points)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
count = int(num_pos.cpu().detach())
if count == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
@LOSSES.register_module
class CenterFocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(CenterFocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target) | [
"torch.log",
"torch.pow"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
1.0 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
import numpy as np
import cv2
import math
#import torch.nn.functional as F
from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, Scale, ConvModule
INF = 1e8
@HEADS.register_module
class CenterHead(nn.Module):
def __init__(self,
num_classes, # init 80
in_channels,
feat_channels=256,
stacked_convs=1,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
use_cross = False,
loss_hm = dict(
type="CenterFocalLoss"
), # 这里实现 CenterFocalLoss
loss_wh = dict(
type="L1Loss",
loss_weight=0.1
),
loss_offset = dict(
type="L1Loss",
loss_weight=1.0
),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(CenterHead, self).__init__()
self.num_classes = num_classes
# self.cls_out_channels = num_classes - 1
self.cls_out_channels = num_classes
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.featmap_sizes = None
self.loss_hm = build_loss(loss_hm)
self.loss_wh = build_loss(loss_wh)
self.loss_offset = build_loss(loss_offset)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.use_cross = use_cross
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.wh_convs = nn.ModuleList()
self.offset_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.wh_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.offset_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)
self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
# for m in self.cls_convs:
# normal_init(m.conv, std=0.01)
# for m in self.wh_convs:
# normal_init(m.conv, std=0.01)
# for m in self.offset_convs:
# normal_init(m.conv, std=0.01)
#bias_hm = bias_init_with_prob(0.01) # 这里的初始化?
#normal_init(self.center_hm, std=0.01, bias=bias_hm)
self.center_hm.bias.data.fill_(-2.19)
nn.init.constant_(self.center_wh.bias, 0)
nn.init.constant_(self.center_offset.bias, 0)
# normal_init(self.center_hm, std=0.01)
# normal_init(self.center_wh, std=0.01)
# normal_init(self.center_offset, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
wh_feat = x
offset_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.center_hm(cls_feat)
for wh_layer in self.wh_convs:
wh_feat = wh_layer(wh_feat)
wh_pred = self.center_wh(wh_feat)
for offset_layer in self.offset_convs:
offset_feat = offset_layer(offset_feat)
offset_pred = self.center_offset(offset_feat)
return cls_score, wh_pred, offset_pred
@force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))
def loss(self,
cls_scores,
wh_preds,
offset_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(wh_preds) == len(offset_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
self.featmap_sizes = featmap_sizes
all_level_points = self.get_points(featmap_sizes, offset_preds[0].dtype,
offset_preds[0].device)
#print(img_metas)
#self.c = img_metas['c']
#self.s = img_metas['s']
self.tensor_dtype = offset_preds[0].dtype
self.tensor_device = offset_preds[0].device
heatmaps, wh_targets, offset_targets = self.center_target(gt_bboxes, gt_labels, img_metas, all_level_points) # 所有层的concat的, 每张图对应一个
num_imgs = cls_scores[0].size(0) # batch_size
#print(num_imgs)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
] # cls_scores(num_levels, batch_size, 80, h, w) => (num_levels, batch_size * w * h, 80)
flatten_wh_preds = [
wh_pred.permute(0, 2, 3, 1).reshape(-1, 2) # batchsize, h, w, 2 => batchsize, h, w, 2
for wh_pred in wh_preds
]
flatten_offset_preds = [
offset_pred.permute(0, 2, 3, 1).reshape(-1, 2)
for offset_pred in offset_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_wh_preds = torch.cat(flatten_wh_preds)
flatten_offset_preds = torch.cat(flatten_offset_preds)
# targets
flatten_heatmaps = torch.cat(heatmaps)
flatten_wh_targets = torch.cat(wh_targets) # torch.Size([all_level_points, 2])
flatten_offset_targets = torch.cat(offset_targets)
# repeat points to align with bbox_preds
# flatten_points = torch.cat(
# [points.repeat(num_imgs, 1) for points in all_level_points])
# pos_inds = flatten_labels.nonzero().reshape(-1)
#print(flatten_wh_targets.shape)
#print(flatten_wh_targets.nonzero())
center_inds = flatten_wh_targets[...,0].nonzero().reshape(-1)
#print(center_inds)
num_center = len(center_inds)
#print(num_center)
# what about use the centerness * labels to indict an object
# loss_cls = self.loss_cls(
# flatten_cls_scores, flatten_labels, # labels gt is small area
# avg_factor=num_pos + num_imgs) # avoid num_pos is 0
flatten_cls_scores = torch.clamp(flatten_cls_scores.sigmoid_(), min=1e-4, max=1-1e-4)
loss_hm = self.loss_hm(flatten_cls_scores, flatten_heatmaps)
pos_wh_targets = flatten_wh_targets[center_inds]
#print(pos_wh_targets.shape)
pos_wh_preds = flatten_wh_preds[center_inds]
pos_offset_preds = flatten_offset_preds[center_inds]
pos_offset_targets = flatten_offset_targets[center_inds]
if num_center > 0:
# TODO: use the iou loss
# center_points = flatten_points[center_inds]
# center_decoded_bbox_preds = wh_offset2bbox(center_points, pos_wh_preds, pos_offset_preds)
# center_decoded_bbox_targets = wh_offset2bbox(center_points, pos_wh_targets, pos_offset_targets)
loss_wh = self.loss_wh(pos_wh_preds, pos_wh_targets, avg_factor=num_center + num_imgs)
#loss_wh = F.l1_loss(pos_wh_preds, pos_wh_targets, reduction='sum') / (num_center + num_imgs)
#loss_wh = 0.1 * loss_wh
loss_offset = self.loss_offset(pos_offset_preds, pos_offset_targets, avg_factor=num_center + num_imgs)
else:
loss_wh = pos_wh_preds.sum()
loss_offset = pos_offset_preds.sum()
return dict(
loss_hm = loss_hm,
loss_wh = loss_wh,
loss_offset = loss_offset)
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device) # 以一定间隔取x的值
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range) # 得到featmap的所有点
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def center_target(self, gt_bboxes_list, gt_labels_list, img_metas, all_level_points):
assert len(self.featmap_sizes) == len(self.regress_ranges)
# get heatmaps and targets of each image
# heatmaps in heatmaps_list: [num_points, 80]
# wh_targets: [num_points, 2] => [batch_size, num_points, 2]
heatmaps_list, wh_targets_list, offset_targets_list = multi_apply(
self.center_target_single,
gt_bboxes_list,
gt_labels_list,
img_metas
)
# split to per img, per level
num_points = [center.size(0) for center in all_level_points] # 每一层多少个点 all_level_points [[12414, 2], []]
heatmaps_list = [heatmaps.split(num_points, 0) for heatmaps in heatmaps_list]
wh_targets_list = [wh_targets.split(num_points, 0) for wh_targets in wh_targets_list]
offset_targets_list = [offset_targets.split(num_points, 0) for offset_targets in offset_targets_list]
# concat per level image, 同一层的concat # [(batch_size,featmap_size[1]), ...)
concat_lvl_heatmaps = []
concat_lvl_wh_targets = []
concat_lvl_offset_targets = []
num_levels = len(self.featmap_sizes)
for i in range(num_levels):
concat_lvl_heatmaps.append(
torch.cat([heatmaps[i] for heatmaps in heatmaps_list])) # (num_levels, batch_size * w * h, 80)
concat_lvl_wh_targets.append(
torch.cat(
[wh_targets[i] for wh_targets in wh_targets_list]))
concat_lvl_offset_targets.append(
torch.cat(
[offset_targets[i] for offset_targets in offset_targets_list]))
return concat_lvl_heatmaps, concat_lvl_wh_targets, concat_lvl_offset_targets
def center_target_single(self, gt_bboxes, gt_labels, img_meta):
"""
single image
gt_bboxes:torch.Size([6, 4])
gt_labels:torch.Size([6]) tensor([34, 34, 34, 34, 34, 34], device='cuda:0')
featmap_sizes:(list[tuple]): Multi-level feature map sizes.
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),(512, INF))
"""
# transform the gt_bboxes, gt_labels to numpy
gt_bboxes = gt_bboxes.data.cpu().numpy()
gt_labels = gt_labels.data.cpu().numpy()
#print(gt_bboxes, gt_labels)
num_objs = gt_labels.shape[0]
#print(num_objs)
# heatmaps [level1, level2, level3, level4, level5]
num_levels = len(self.featmap_sizes)
heatmaps_targets = []
wh_targets = []
offset_targets = []
# get the target shape for each image
for i in range(num_levels):
h, w = self.featmap_sizes[i]
hm = np.zeros((self.cls_out_channels, h, w), dtype=np.float32)
heatmaps_targets.append(hm)
wh = np.zeros((h, w, 2), dtype=np.float32)
wh_targets.append(wh)
offset = np.zeros((h, w, 2), dtype=np.float32)
offset_targets.append(offset)
for k in range(num_objs):
bbox = gt_bboxes[k]
cls_id = gt_labels[k]
if img_meta['flipped']:
bbox[[0, 2]] = img_meta['width'] - bbox[[2, 0]] - 1
# condition: in the regress_ranges
origin_h, origin_w = bbox[3] - bbox[1], bbox[2] - bbox[0]
#max_h_w = max(h, w) / 2
max_h_w = max(origin_h, origin_w)
#max_h_w = max(origin_h, origin_w) * 2 # 最长边为32在P2
# 根据max_h_w在哪一层将output设置为当前层的
index_levels = []
#index_level = 0
for i in range(num_levels):
min_regress_distance, max_regress_distance = self.regress_ranges[i]
if not self.use_cross and (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):
index_levels.append(i)
break
if self.use_cross:
min_regress_distance = min_regress_distance * 0.8
max_regress_distance = max_regress_distance * 1.3
if (max_h_w > min_regress_distance) and (max_h_w <= max_regress_distance):
index_levels.append(i)
for index_level in index_levels:
output_h, output_w = self.featmap_sizes[index_level]
#print(output_h, output_w)
hm = heatmaps_targets[index_level]
wh = wh_targets[index_level]
offset = offset_targets[index_level]
# c, s is passed by meta
trans_output = get_affine_transform(img_meta['c'], img_meta['s'], 0, [output_w, output_h])
bbox[:2] = affine_transform(bbox[:2], trans_output)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1) #x1, x2
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
#print(h, w)
# 转换到当层
if h > 0 and w > 0:
radius = gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
#print(ct)
ct_int = ct.astype(np.int32)
#hm[cls_id, ct_int[1], ct_int[0]] = 1
#if (ct_int[1] - 1) > 0:
# hm[cls_id, ct_int[1] - 1, ct_int[0]] = 0.5
#if (ct_int[0] - 1) > 0:
# hm[cls_id, ct_int[1], ct_int[0] - 1] = 0.5
#if (ct_int[1] + 1) < output_h:
# hm[cls_id, ct_int[1] + 1, ct_int[0]] = 0.5
#if (ct_int[0] + 1) < output_w:
# hm[cls_id, ct_int[1], ct_int[0] + 1] = 0.5
draw_umich_gaussian(hm[cls_id], ct_int, radius)
h, w = 1. * h, 1. * w
offset_count = ct - ct_int # h, w
# ct_int即表明在featmap的位置 ct_int[1] * output_w + ct_int[0]
# TODO:如果当前位置有物体的中心,现在是直接覆盖
# 这里设置监督信号,第1位表示w,第2位表示h
# 这里对featmap进行缩放?
# wh[ct_int[1], ct_int[0], 0] = w / output_w# output_h, output_w <= y, x
# wh[ct_int[1], ct_int[0], 1] = h / output_h
# offset[ct_int[1], ct_int[0], 0] = offset_count[0] / output_w
# offset[ct_int[1], ct_int[0], 0] = offset_count[1] / output_h
wh[ct_int[1], ct_int[0], 0] = w
wh[ct_int[1], ct_int[0], 1] = h
offset[ct_int[1], ct_int[0], 0] = offset_count[0]
offset[ct_int[1], ct_int[0], 0] = offset_count[1]
heatmaps_targets[index_level] = hm
wh_targets[index_level] = wh
offset_targets[index_level] = offset
flatten_heatmaps_targets = [
hm.transpose(1, 2, 0).reshape(-1, self.cls_out_channels)
for hm in heatmaps_targets
]
#for i in range(len(flatten_heatmaps_targets)):
# print(flatten_heatmaps_targets[i].shape)
heatmaps_targets = np.concatenate(flatten_heatmaps_targets, axis=0)
#print(heatmaps_targets.shape) # (13343, 80)
#print(heatmaps_targets)
flatten_wh_targets = [
wh.reshape(-1, 2) for wh in wh_targets
]
wh_targets = np.concatenate(flatten_wh_targets)
flatten_offset_targets = [
offset.reshape(-1, 2) for offset in offset_targets
]
offset_targets = np.concatenate(flatten_offset_targets)
# transform the heatmaps_targets, wh_targets, offset_targets into tensor
heatmaps_targets = torch.from_numpy(np.stack(heatmaps_targets))
heatmaps_targets = torch.tensor(heatmaps_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
wh_targets = torch.from_numpy(np.stack(wh_targets))
wh_targets = torch.tensor(wh_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
offset_targets = torch.from_numpy(np.stack(offset_targets))
offset_targets = torch.tensor(offset_targets.detach(), dtype=self.tensor_dtype, device=self.tensor_device)
return heatmaps_targets, wh_targets, offset_targets
# test use
@force_fp32(apply_to=('cls_scores', 'wh_preds', 'offset_preds'))
def get_bboxes(self,
cls_scores,
wh_preds,
offset_preds,
img_metas,
cfg):
assert len(cls_scores) == len(wh_preds) == len(offset_preds)
# cls_scores => [num_levels] => [batch featmap] => [batch, 80, h, w]
# wh_preds => [num_levels] => [featmap] => [2, h, w]
# offset_preds => [num_levels] => [featmap] => [2, h, w]
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
result_list = []
#print(cls_scores[0].shape) # torch.Size([1, 80, 84, 56])
#print(img_metas)
for img_id in range(len(img_metas)): # 每个batch中id
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
] # =>[num_levels] => [80, h, w]
wh_pred_list = [
wh_preds[i][img_id].detach() for i in range(num_levels)
]
offset_pred_list = [
offset_preds[i][img_id].detach() for i in range(num_levels)
]
#img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
c = img_metas[img_id]['c']
s = img_metas[img_id]['s']
det_bboxes = self.get_bboxes_single(cls_score_list, wh_pred_list,
offset_pred_list,
featmap_sizes, c, s,
scale_factor, cfg) # 对每一张图像进行解调
result_list.append(det_bboxes)
return result_list # [batch_size]
def get_bboxes_single(self,
cls_scores,
wh_preds,
offset_preds,
featmap_sizes,
c,
s,
scale_factor,
cfg):
assert len(cls_scores) == len(wh_preds) == len(offset_preds) == len(featmap_sizes)
detections = []
for cls_score, wh_pred, offset_pred, featmap_size in zip(
cls_scores, wh_preds, offset_preds, featmap_sizes): # 取出每一层的点
assert cls_score.size()[-2:] == wh_pred.size()[-2:] == offset_pred.size()[-2:] == featmap_size
output_h, output_w = featmap_size
#实际上得到了每一层的hm, wh, offset
hm = torch.clamp(cls_score.sigmoid_(), min=1e-4, max=1-1e-4).unsqueeze(0) # 增加一个纬度
#wh_pred[0, :, :] = wh_pred[0, :, :] * output_w
#wh_pred[1, :, :] = wh_pred[1, :, :] * output_h # 2, output_h, output_w
wh = wh_pred.unsqueeze(0) # 这里需要乘以featuremap的尺度
#offset_pred[0, : ,:] = offset_pred[0, : ,:] * output_w
#offset_pred[1, : ,:] = offset_pred[1, : ,:] * output_h
reg = offset_pred.unsqueeze(0)
dets = ctdet_decode(hm, wh, reg=reg, K=100)
dets = post_process(dets, c, s, output_h, output_w, scale=scale_factor, num_classes=self.num_classes)
detections.append(dets)
results = merge_outputs(detections, self.num_classes) # 单张图的结果
return results
#num_classes = 80
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def gaussian_small_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = np.sqrt(b3 ** 2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_umich_gaussian(heatmap, center, radius, k=1):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom, radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def ctdet_decode(heat, wh, reg=None, cat_spec_wh=False, K=40):
batch, cat, height, width = heat.size() # 1, 80, 128, 128
#print("batch, cat, height, width\n", batch, cat, height, width)
if height * width <= K:
K = height * width
#print("k:", K)
heat = _nms(heat)
scores, inds, clses, ys, xs = _topk(heat, K=K)
if reg is not None:
reg = _tranpose_and_gather_feat(reg, inds)
reg = reg.view(batch, K, 2)
xs = xs.view(batch, K, 1) + reg[:, :, 0:1]
ys = ys.view(batch, K, 1) + reg[:, :, 1:2]
else:
xs = xs.view(batch, K, 1) + 0.5
ys = ys.view(batch, K, 1) + 0.5
wh = _tranpose_and_gather_feat(wh, inds) # inds 对应 h, w的尺度
if cat_spec_wh:
wh = wh.view(batch, K, cat, 2)
clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long()
wh = wh.gather(2, clses_ind).view(batch, K, 2)
else:
wh = wh.view(batch, K, 2)
clses = clses.view(batch, K, 1).float()
scores = scores.view(batch, K, 1) # 0, 1, 2
bboxes = torch.cat([xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], dim=2)
detections = torch.cat([bboxes, scores, clses], dim=2)
return detections
def _nms(heat, kernel=3):
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(
heat, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
def _topk(scores, K=40):
batch, cat, height, width = scores.size() # 1, 80,height, width
#print("batch, cat, height, width\n", batch, cat, height, width)
#print("k:", K)
topk_scores, topk_inds = torch.topk(scores.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float() # y-> h, x-> w
topk_xs = (topk_inds % width).int().float()
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_ind / K).int()
topk_inds = _gather_feat(
topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
def post_process(dets, c, s, out_height, out_width, scale, num_classes):
dets = dets.detach().cpu().numpy()
# print("dets", dets) # (1, 100, 6)
dets = dets.reshape(1, -1, dets.shape[2]) # (x1, y1, x2, y2)
dets = ctdet_post_process(
dets.copy(), [c], [s],
out_height, out_width, num_classes)
for j in range(1, num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def ctdet_post_process(dets, c, s, h, w, num_classes):
ret = []
# print(dets.shape) # (1, 100, 6)
# print(c)
for i in range(dets.shape[0]):
top_preds = {}
dets[i, :, :2] = transform_preds(
dets[i, :, 0:2], c[i], s[i], (w, h))
dets[i, :, 2:4] = transform_preds(
dets[i, :, 2:4], c[i], s[i], (w, h))
classes = dets[i, :, -1] # 类别这里是80
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = np.concatenate([
dets[i, inds, :4].astype(np.float32),
dets[i, inds, 4:5].astype(np.float32)], axis=1).tolist() # 这里将框按照类别进行分类
ret.append(top_preds)
return ret
def merge_outputs(detections, num_classes):
# print(detections)
results = {}
max_per_image = 100
for j in range(1, num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
# if len(self.scales) > 1 or self.opt.nms:
results[j] = soft_nms(results[j], Nt=0.5, method=2, threshold=0.01)
# print(results)
scores = np.hstack([results[j][:, 4] for j in range(1, num_classes + 1)])
if len(scores) > max_per_image:
kth = len(scores) - max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, num_classes + 1):
keep_inds = (results[j][:, 4] >= thresh)
results[j] = results[j][keep_inds]
# print("after merge out\n", results)
return results2coco_boxes(results, num_classes)
def results2coco_boxes(results, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
bboxes = [0 for i in range(num_classes)]
for j in range(1, num_classes + 1):
if len(results[j]) == 0:
bboxes[j - 1] = np.zeros((0, 5), dtype=np.float32)
continue
bboxes[j - 1] = results[j]
# print(bboxes) # xyxy
return bboxes
def soft_nms(boxes, sigma=0.5, Nt=0.3, threshold=0.01, method=0):
N = boxes.shape[0]
# cdef float iw, ih, box_area
# cdef float ua
# cdef int
pos = 0
# cdef float
maxscore = 0
# cdef int
maxpos = 0
# cdef float x1,x2,y1,y2,tx1,tx2,ty1,ty2,ts,area,weight,ov
for i in range(N):
maxscore = boxes[i, 4]
maxpos = i
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# get max box
while pos < N:
if maxscore < boxes[pos, 4]:
maxscore = boxes[pos, 4]
maxpos = pos
pos = pos + 1
# add max box as a detection
boxes[i,0] = boxes[maxpos,0]
boxes[i,1] = boxes[maxpos,1]
boxes[i,2] = boxes[maxpos,2]
boxes[i,3] = boxes[maxpos,3]
boxes[i,4] = boxes[maxpos,4]
# swap ith box with position of max box
boxes[maxpos,0] = tx1
boxes[maxpos,1] = ty1
boxes[maxpos,2] = tx2
boxes[maxpos,3] = ty2
boxes[maxpos,4] = ts
tx1 = boxes[i,0]
ty1 = boxes[i,1]
tx2 = boxes[i,2]
ty2 = boxes[i,3]
ts = boxes[i,4]
pos = i + 1
# NMS iterations, note that N changes if detection boxes fall below threshold
while pos < N:
x1 = boxes[pos, 0]
y1 = boxes[pos, 1]
x2 = boxes[pos, 2]
y2 = boxes[pos, 3]
s = boxes[pos, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
iw = (min(tx2, x2) - max(tx1, x1) + 1)
if iw > 0:
ih = (min(ty2, y2) - max(ty1, y1) + 1)
if ih > 0:
ua = float((tx2 - tx1 + 1) * (ty2 - ty1 + 1) + area - iw * ih)
ov = iw * ih / ua #iou between max box and detection box
if method == 1: # linear
if ov > Nt:
weight = 1 - ov
else:
weight = 1
elif method == 2: # gaussian
weight = np.exp(-(ov * ov)/sigma)
else: # original NMS
if ov > Nt:
weight = 0
else:
weight = 1
boxes[pos, 4] = weight*boxes[pos, 4]
# if box score falls below threshold, discard the box by swapping with last box
# update N
if boxes[pos, 4] < threshold:
boxes[pos,0] = boxes[N-1, 0]
boxes[pos,1] = boxes[N-1, 1]
boxes[pos,2] = boxes[N-1, 2]
boxes[pos,3] = boxes[N-1, 3]
boxes[pos,4] = boxes[N-1, 4]
N = N - 1
pos = pos - 1
pos = pos + 1
keep = [i for i in range(N)]
boxes = boxes[keep]
return boxes
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.meshgrid",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.arange",
"torch.nn.functional.max_pool2d"
] | 1.0 | lizhe960118/CenterNet | d1a0d13974e2316c6d127ca7860866cdd93bcfa7 |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d.ops import cot_laplacian, laplacian, norm_laplacian
from pytorch3d.structures.meshes import Meshes
class TestLaplacianMatrices(TestCaseMixin, unittest.TestCase):
def setUp(self) -> None:
super().setUp()
torch.manual_seed(1)
def init_mesh(self) -> Meshes:
V, F = 32, 64
device = get_random_cuda_device()
# random vertices
verts = torch.rand((V, 3), dtype=torch.float32, device=device)
# random valid faces (no self circles, e.g. (v0, v0, v1))
faces = torch.stack([torch.randperm(V) for f in range(F)], dim=0)[:, :3]
faces = faces.to(device=device)
return Meshes(verts=[verts], faces=[faces])
def test_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
V, E = verts.shape[0], edges.shape[0]
L = laplacian(verts, edges)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
for e in range(E):
e0, e1 = edges[e]
Lnaive[e0, e1] = 1
# symetric
Lnaive[e1, e0] = 1
deg = Lnaive.sum(1).view(-1, 1)
deg[deg > 0] = 1.0 / deg[deg > 0]
Lnaive = Lnaive * deg
diag = torch.eye(V, dtype=torch.float32, device=mesh.device)
Lnaive.masked_fill_(diag > 0, -1)
self.assertClose(L.to_dense(), Lnaive)
def test_cot_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
faces = mesh.faces_packed()
V = verts.shape[0]
eps = 1e-12
L, inv_areas = cot_laplacian(verts, faces, eps=eps)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
inv_areas_naive = torch.zeros((V, 1), dtype=torch.float32, device=verts.device)
for f in faces:
v0 = verts[f[0], :]
v1 = verts[f[1], :]
v2 = verts[f[2], :]
A = (v1 - v2).norm()
B = (v0 - v2).norm()
C = (v0 - v1).norm()
s = 0.5 * (A + B + C)
face_area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=1e-12).sqrt()
inv_areas_naive[f[0]] += face_area
inv_areas_naive[f[1]] += face_area
inv_areas_naive[f[2]] += face_area
A2, B2, C2 = A * A, B * B, C * C
cota = (B2 + C2 - A2) / face_area / 4.0
cotb = (A2 + C2 - B2) / face_area / 4.0
cotc = (A2 + B2 - C2) / face_area / 4.0
Lnaive[f[1], f[2]] += cota
Lnaive[f[2], f[0]] += cotb
Lnaive[f[0], f[1]] += cotc
# symetric
Lnaive[f[2], f[1]] += cota
Lnaive[f[0], f[2]] += cotb
Lnaive[f[1], f[0]] += cotc
idx = inv_areas_naive > 0
inv_areas_naive[idx] = 1.0 / inv_areas_naive[idx]
self.assertClose(inv_areas, inv_areas_naive)
self.assertClose(L.to_dense(), Lnaive)
def test_norm_laplacian(self):
mesh = self.init_mesh()
verts = mesh.verts_packed()
edges = mesh.edges_packed()
V, E = verts.shape[0], edges.shape[0]
eps = 1e-12
L = norm_laplacian(verts, edges, eps=eps)
Lnaive = torch.zeros((V, V), dtype=torch.float32, device=verts.device)
for e in range(E):
e0, e1 = edges[e]
v0 = verts[e0]
v1 = verts[e1]
w01 = 1.0 / ((v0 - v1).norm() + eps)
Lnaive[e0, e1] += w01
Lnaive[e1, e0] += w01
self.assertClose(L.to_dense(), Lnaive)
| [
"torch.zeros",
"torch.rand",
"torch.randperm",
"torch.manual_seed",
"torch.eye"
] | 3 | jkxing/pytorch3d | 71dbebe8010a0dac3e56be464778aa48fbd3bcd3 |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import unittest
import torch
from common_testing import TestCaseMixin, get_random_cuda_device
from pytorch3d import _C
from pytorch3d.renderer import FoVPerspectiveCameras, look_at_view_transform
from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings
from pytorch3d.renderer.mesh.rasterize_meshes import (
rasterize_meshes,
rasterize_meshes_python,
)
from pytorch3d.renderer.mesh.utils import (
_clip_barycentric_coordinates,
_interpolate_zbuf,
)
from pytorch3d.structures import Meshes
from pytorch3d.utils import ico_sphere
class TestRasterizeMeshes(TestCaseMixin, unittest.TestCase):
def test_simple_python(self):
device = torch.device("cpu")
self._simple_triangle_raster(rasterize_meshes_python, device, bin_size=-1)
self._simple_blurry_raster(rasterize_meshes_python, device, bin_size=-1)
self._test_behind_camera(rasterize_meshes_python, device, bin_size=-1)
self._test_perspective_correct(rasterize_meshes_python, device, bin_size=-1)
self._test_barycentric_clipping(rasterize_meshes_python, device, bin_size=-1)
self._test_back_face_culling(rasterize_meshes_python, device, bin_size=-1)
def test_simple_cpu_naive(self):
device = torch.device("cpu")
self._simple_triangle_raster(rasterize_meshes, device, bin_size=0)
self._simple_blurry_raster(rasterize_meshes, device, bin_size=0)
self._test_behind_camera(rasterize_meshes, device, bin_size=0)
self._test_perspective_correct(rasterize_meshes, device, bin_size=0)
self._test_back_face_culling(rasterize_meshes, device, bin_size=0)
def test_simple_cuda_naive(self):
device = get_random_cuda_device()
self._simple_triangle_raster(rasterize_meshes, device, bin_size=0)
self._simple_blurry_raster(rasterize_meshes, device, bin_size=0)
self._test_behind_camera(rasterize_meshes, device, bin_size=0)
self._test_perspective_correct(rasterize_meshes, device, bin_size=0)
self._test_back_face_culling(rasterize_meshes, device, bin_size=0)
def test_simple_cuda_binned(self):
device = get_random_cuda_device()
self._simple_triangle_raster(rasterize_meshes, device, bin_size=5)
self._simple_blurry_raster(rasterize_meshes, device, bin_size=5)
self._test_behind_camera(rasterize_meshes, device, bin_size=5)
self._test_perspective_correct(rasterize_meshes, device, bin_size=5)
self._test_back_face_culling(rasterize_meshes, device, bin_size=5)
def test_python_vs_cpu_vs_cuda(self):
torch.manual_seed(231)
device = torch.device("cpu")
image_size = 32
blur_radius = 0.1 ** 2
faces_per_pixel = 3
for d in ["cpu", get_random_cuda_device()]:
device = torch.device(d)
compare_grads = True
# Mesh with a single face.
verts1 = torch.tensor(
[[0.0, 0.6, 0.1], [-0.7, -0.4, 0.5], [0.7, -0.4, 0.7]],
dtype=torch.float32,
requires_grad=True,
device=device,
)
faces1 = torch.tensor([[0, 1, 2]], dtype=torch.int64, device=device)
meshes1 = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes1, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.detach().clone()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
# Mesh with multiple faces.
# fmt: off
verts1 = torch.tensor(
[
[ -0.5, 0.0, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.5], # noqa: E241, E201
[ 0.5, 0.0, 0.7], # noqa: E241, E201
[-0.25, 0.0, 0.9], # noqa: E241, E201
[ 0.26, 0.5, 0.8], # noqa: E241, E201
[ 0.76, 0.0, 0.8], # noqa: E241, E201
[-0.41, 0.0, 0.5], # noqa: E241, E201
[ 0.61, 0.6, 0.6], # noqa: E241, E201
[ 0.41, 0.0, 0.5], # noqa: E241, E201
[ -0.2, 0.0, -0.5], # noqa: E241, E201
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
requires_grad=True
)
faces1 = torch.tensor(
[
[ 1, 0, 2], # noqa: E241, E201
[ 4, 3, 5], # noqa: E241, E201
[ 7, 6, 8], # noqa: E241, E201
[10, 9, 11] # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.clone().detach()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
# Icosphere
meshes = ico_sphere(device=device)
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes = Meshes(verts=[verts1], faces=[faces1])
args1 = (meshes, image_size, blur_radius, faces_per_pixel)
verts2 = verts1.detach().clone()
verts2.requires_grad = True
meshes2 = Meshes(verts=[verts2], faces=[faces1])
args2 = (meshes2, image_size, blur_radius, faces_per_pixel)
self._compare_impls(
rasterize_meshes_python,
rasterize_meshes,
args1,
args2,
verts1,
verts2,
compare_grads=compare_grads,
)
def test_cpu_vs_cuda_naive(self):
"""
Compare naive versions of cuda and cpp
"""
torch.manual_seed(231)
image_size = 64
radius = 0.1 ** 2
faces_per_pixel = 3
device = torch.device("cpu")
meshes_cpu = ico_sphere(0, device)
verts1, faces1 = meshes_cpu.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes_cpu = Meshes(verts=[verts1], faces=[faces1])
device = get_random_cuda_device()
meshes_cuda = ico_sphere(0, device)
verts2, faces2 = meshes_cuda.get_mesh_verts_faces(0)
verts2.requires_grad = True
meshes_cuda = Meshes(verts=[verts2], faces=[faces2])
barycentric_clip = True
args_cpu = (
meshes_cpu,
image_size,
radius,
faces_per_pixel,
None,
None,
False,
barycentric_clip,
False,
)
args_cuda = (
meshes_cuda,
image_size,
radius,
faces_per_pixel,
0,
0,
False,
barycentric_clip,
False,
)
self._compare_impls(
rasterize_meshes,
rasterize_meshes,
args_cpu,
args_cuda,
verts1,
verts2,
compare_grads=True,
)
def test_coarse_cpu(self):
return self._test_coarse_rasterize(torch.device("cpu"))
def test_coarse_cuda(self):
return self._test_coarse_rasterize(get_random_cuda_device())
def test_cpp_vs_cuda_naive_vs_cuda_binned(self):
# Make sure that the backward pass runs for all pathways
image_size = 64 # test is too slow for very large images.
N = 1
radius = 0.1 ** 2
faces_per_pixel = 3
grad_zbuf = torch.randn(N, image_size, image_size, faces_per_pixel)
grad_dist = torch.randn(N, image_size, image_size, faces_per_pixel)
grad_bary = torch.randn(N, image_size, image_size, faces_per_pixel, 3)
device = torch.device("cpu")
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
# Option I: CPU, naive
args = (meshes, image_size, radius, faces_per_pixel)
idx1, zbuf1, bary1, dist1 = rasterize_meshes(*args)
loss = (
(zbuf1 * grad_zbuf).sum()
+ (dist1 * grad_dist).sum()
+ (bary1 * grad_bary).sum()
)
loss.backward()
idx1 = idx1.data.cpu().clone()
zbuf1 = zbuf1.data.cpu().clone()
dist1 = dist1.data.cpu().clone()
grad1 = verts.grad.data.cpu().clone()
# Option II: CUDA, naive
device = get_random_cuda_device()
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
args = (meshes, image_size, radius, faces_per_pixel, 0, 0)
idx2, zbuf2, bary2, dist2 = rasterize_meshes(*args)
grad_zbuf = grad_zbuf.to(device)
grad_dist = grad_dist.to(device)
grad_bary = grad_bary.to(device)
loss = (
(zbuf2 * grad_zbuf).sum()
+ (dist2 * grad_dist).sum()
+ (bary2 * grad_bary).sum()
)
loss.backward()
idx2 = idx2.data.cpu().clone()
zbuf2 = zbuf2.data.cpu().clone()
dist2 = dist2.data.cpu().clone()
grad2 = verts.grad.data.cpu().clone()
# Option III: CUDA, binned
meshes = ico_sphere(0, device)
verts, faces = meshes.get_mesh_verts_faces(0)
verts.requires_grad = True
meshes = Meshes(verts=[verts], faces=[faces])
args = (meshes, image_size, radius, faces_per_pixel, 32, 500)
idx3, zbuf3, bary3, dist3 = rasterize_meshes(*args)
loss = (
(zbuf3 * grad_zbuf).sum()
+ (dist3 * grad_dist).sum()
+ (bary3 * grad_bary).sum()
)
loss.backward()
idx3 = idx3.data.cpu().clone()
zbuf3 = zbuf3.data.cpu().clone()
dist3 = dist3.data.cpu().clone()
grad3 = verts.grad.data.cpu().clone()
# Make sure everything was the same
self.assertTrue((idx1 == idx2).all().item())
self.assertTrue((idx1 == idx3).all().item())
self.assertClose(zbuf1, zbuf2, atol=1e-6)
self.assertClose(zbuf1, zbuf3, atol=1e-6)
self.assertClose(dist1, dist2, atol=1e-6)
self.assertClose(dist1, dist3, atol=1e-6)
self.assertClose(grad1, grad2, rtol=5e-3) # flaky test
self.assertClose(grad1, grad3, rtol=5e-3)
self.assertClose(grad2, grad3, rtol=5e-3)
def test_compare_coarse_cpu_vs_cuda(self):
torch.manual_seed(231)
N = 1
image_size = (512, 512)
blur_radius = 0.0
bin_size = 32
max_faces_per_bin = 20
device = torch.device("cpu")
meshes = ico_sphere(2, device)
faces = meshes.faces_packed()
verts = meshes.verts_packed()
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
bin_faces_cpu = _C._rasterize_meshes_coarse(
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
device = get_random_cuda_device()
meshes = meshes.clone().to(device)
faces = meshes.faces_packed()
verts = meshes.verts_packed()
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
bin_faces_cuda = _C._rasterize_meshes_coarse(
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
# Bin faces might not be the same: CUDA version might write them in
# any order. But if we sort the non-(-1) elements of the CUDA output
# then they should be the same.
for n in range(N):
for by in range(bin_faces_cpu.shape[1]):
for bx in range(bin_faces_cpu.shape[2]):
K = (bin_faces_cuda[n, by, bx] != -1).sum().item()
idxs_cpu = bin_faces_cpu[n, by, bx].tolist()
idxs_cuda = bin_faces_cuda[n, by, bx].tolist()
idxs_cuda[:K] = sorted(idxs_cuda[:K])
self.assertEqual(idxs_cpu, idxs_cuda)
def test_python_vs_cpp_bary_clip(self):
torch.manual_seed(232)
N = 2
V = 10
F = 5
verts1 = torch.randn(N, V, 3, requires_grad=True)
verts2 = verts1.detach().clone().requires_grad_(True)
faces = torch.randint(V, size=(N, F, 3))
meshes1 = Meshes(verts1, faces)
meshes2 = Meshes(verts2, faces)
kwargs = {"image_size": 24, "clip_barycentric_coords": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)
args = ()
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_cpp_vs_cuda_bary_clip(self):
meshes = ico_sphere(2, device=torch.device("cpu"))
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes1 = Meshes(verts=[verts1], faces=[faces1])
device = get_random_cuda_device()
verts2 = verts1.detach().to(device).requires_grad_(True)
faces2 = faces1.detach().clone().to(device)
meshes2 = Meshes(verts=[verts2], faces=[faces2])
kwargs = {"image_size": 64, "clip_barycentric_coords": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)
args = ()
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_python_vs_cpp_perspective_correct(self):
torch.manual_seed(232)
N = 2
V = 10
F = 5
verts1 = torch.randn(N, V, 3, requires_grad=True)
verts2 = verts1.detach().clone().requires_grad_(True)
faces = torch.randint(V, size=(N, F, 3))
meshes1 = Meshes(verts1, faces)
meshes2 = Meshes(verts2, faces)
kwargs = {"image_size": 24, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes_python, meshes2, **kwargs)
args = ()
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_cpp_vs_cuda_perspective_correct(self):
meshes = ico_sphere(2, device=torch.device("cpu"))
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes1 = Meshes(verts=[verts1], faces=[faces1])
device = get_random_cuda_device()
verts2 = verts1.detach().to(device).requires_grad_(True)
faces2 = faces1.detach().clone().to(device)
meshes2 = Meshes(verts=[verts2], faces=[faces2])
kwargs = {"image_size": 64, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=0, **kwargs)
args = ()
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_cuda_naive_vs_binned_perspective_correct(self):
device = get_random_cuda_device()
meshes = ico_sphere(2, device=device)
verts1, faces1 = meshes.get_mesh_verts_faces(0)
verts1.requires_grad = True
meshes1 = Meshes(verts=[verts1], faces=[faces1])
verts2 = verts1.detach().clone().requires_grad_(True)
faces2 = faces1.detach().clone()
meshes2 = Meshes(verts=[verts2], faces=[faces2])
kwargs = {"image_size": 64, "perspective_correct": True}
fn1 = functools.partial(rasterize_meshes, meshes1, bin_size=0, **kwargs)
fn2 = functools.partial(rasterize_meshes, meshes2, bin_size=8, **kwargs)
args = ()
self._compare_impls(fn1, fn2, args, args, verts1, verts2, compare_grads=True)
def test_bin_size_error(self):
meshes = ico_sphere(2)
image_size = 1024
bin_size = 16
with self.assertRaisesRegex(ValueError, "bin_size too small"):
rasterize_meshes(meshes, image_size, 0.0, 2, bin_size)
def _test_back_face_culling(self, rasterize_meshes_fn, device, bin_size):
# Square based pyramid mesh.
# fmt: off
verts = torch.tensor([
[-0.5, 0.0, 0.5], # noqa: E241 E201 Front right
[ 0.5, 0.0, 0.5], # noqa: E241 E201 Front left
[ 0.5, 0.0, 1.5], # noqa: E241 E201 Back left
[-0.5, 0.0, 1.5], # noqa: E241 E201 Back right
[ 0.0, 1.0, 1.0] # noqa: E241 E201 Top point of pyramid
], dtype=torch.float32, device=device)
faces = torch.tensor([
[2, 1, 0], # noqa: E241 E201 Square base
[3, 2, 0], # noqa: E241 E201 Square base
[1, 0, 4], # noqa: E241 E201 Triangle on front
[2, 4, 3], # noqa: E241 E201 Triangle on back
[3, 4, 0], # noqa: E241 E201 Triangle on left side
[1, 4, 2] # noqa: E241 E201 Triangle on right side
], dtype=torch.int64, device=device)
# fmt: on
mesh = Meshes(verts=[verts], faces=[faces])
kwargs = {
"meshes": mesh,
"image_size": 10,
"faces_per_pixel": 2,
"blur_radius": 0.0,
"perspective_correct": False,
"cull_backfaces": False,
}
if bin_size != -1:
kwargs["bin_size"] = bin_size
# fmt: off
pix_to_face_frontface = torch.tensor([
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, 2, 2, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241 E201
], dtype=torch.int64, device=device)
pix_to_face_backface = torch.tensor([
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, 3, 3, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, 3, 3, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, 3, 3, 3, 3, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, 3, 3, 3, 3, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241 E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241 E201
], dtype=torch.int64, device=device)
# fmt: on
pix_to_face_padded = -(torch.ones_like(pix_to_face_frontface))
# Run with and without culling
# Without culling, for k=0, the front face (i.e. face 2) is
# rasterized and for k=1, the back face (i.e. face 3) is
# rasterized.
idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)
self.assertTrue(torch.all(idx_f[..., 0].squeeze() == pix_to_face_frontface))
self.assertTrue(torch.all(idx_f[..., 1].squeeze() == pix_to_face_backface))
# With culling, for k=0, the front face (i.e. face 2) is
# rasterized and for k=1, there are no faces rasterized
kwargs["cull_backfaces"] = True
idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)
self.assertTrue(torch.all(idx_t[..., 0].squeeze() == pix_to_face_frontface))
self.assertTrue(torch.all(idx_t[..., 1].squeeze() == pix_to_face_padded))
def _compare_impls(
self,
fn1,
fn2,
args1,
args2,
grad_var1=None,
grad_var2=None,
compare_grads=False,
):
idx1, zbuf1, bary1, dist1 = fn1(*args1)
idx2, zbuf2, bary2, dist2 = fn2(*args2)
self.assertTrue((idx1.cpu() == idx2.cpu()).all().item())
self.assertClose(zbuf1.cpu(), zbuf2.cpu(), rtol=1e-4)
self.assertClose(dist1.cpu(), dist2.cpu(), rtol=6e-3)
self.assertClose(bary1.cpu(), bary2.cpu(), rtol=1e-3)
if not compare_grads:
return
# Compare gradients.
torch.manual_seed(231)
grad_zbuf = torch.randn_like(zbuf1)
grad_dist = torch.randn_like(dist1)
grad_bary = torch.randn_like(bary1)
loss1 = (
(dist1 * grad_dist).sum()
+ (zbuf1 * grad_zbuf).sum()
+ (bary1 * grad_bary).sum()
)
# avoid gradient error if rasterize_meshes_python() culls all triangles
loss1 += grad_var1.sum() * 0.0
loss1.backward()
grad_verts1 = grad_var1.grad.data.clone().cpu()
grad_zbuf = grad_zbuf.to(zbuf2)
grad_dist = grad_dist.to(dist2)
grad_bary = grad_bary.to(bary2)
loss2 = (
(dist2 * grad_dist).sum()
+ (zbuf2 * grad_zbuf).sum()
+ (bary2 * grad_bary).sum()
)
# avoid gradient error if rasterize_meshes_python() culls all triangles
loss2 += grad_var2.sum() * 0.0
grad_var1.grad.data.zero_()
loss2.backward()
grad_verts2 = grad_var2.grad.data.clone().cpu()
self.assertClose(grad_verts1, grad_verts2, rtol=2e-3)
def _test_perspective_correct(self, rasterize_meshes_fn, device, bin_size=None):
# fmt: off
verts = torch.tensor([
[-0.4, -0.4, 10], # noqa: E241, E201
[ 0.4, -0.4, 10], # noqa: E241, E201
[ 0.0, 0.4, 20], # noqa: E241, E201
], dtype=torch.float32, device=device)
# fmt: on
faces = torch.tensor([[0, 1, 2]], device=device)
meshes = Meshes(verts=[verts], faces=[faces])
kwargs = {
"meshes": meshes,
"image_size": 11,
"faces_per_pixel": 1,
"blur_radius": 0.2,
"perspective_correct": False,
}
if bin_size != -1:
kwargs["bin_size"] = bin_size
# Run with and without perspective correction
idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)
kwargs["perspective_correct"] = True
idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)
# Expected output tensors in the format with axes +X left, +Y up, +Z in
# idx and dists should be the same with or without perspecitve correction
# fmt: off
idx_expected = torch.tensor([
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, 0, 0, 0, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201
], dtype=torch.int64, device=device).view(1, 11, 11, 1)
dists_expected = torch.tensor([
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., 0.1402, 0.1071, 0.1402, -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., - 1., 0.1523, 0.0542, 0.0212, 0.0542, 0.1523, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., 0.0955, 0.0214, -0.0003, 0.0214, 0.0955, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., 0.1523, 0.0518, 0.0042, -0.0095, 0.0042, 0.0518, 0.1523, -1., -1.], # noqa: E241, E201, B950
[-1., -1., 0.0955, 0.0214, -0.0003, -0.032, -0.0003, 0.0214, 0.0955, -1., -1.], # noqa: E241, E201, B950
[-1., 0.1523, 0.0518, 0.0042, -0.0095, -0.0476, -0.0095, 0.0042, 0.0518, 0.1523, -1.], # noqa: E241, E201, B950
[-1., 0.1084, 0.0225, -0.0003, -0.0013, -0.0013, -0.0013, -0.0003, 0.0225, 0.1084, -1.], # noqa: E241, E201, B950
[-1., 0.1283, 0.0423, 0.0212, 0.0212, 0.0212, 0.0212, 0.0212, 0.0423, 0.1283, -1.], # noqa: E241, E201, B950
[-1., -1., 0.1283, 0.1071, 0.1071, 0.1071, 0.1071, 0.1071, 0.1283, -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201, B950
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
# zbuf and barycentric will be different with perspective correction
zbuf_f_expected = torch.tensor([
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., 24.0909, 24.0909, 24.0909, -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., 21.8182, 21.8182, 21.8182, 21.8182, 21.8182, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., 19.5455, 19.5455, 19.5455, 19.5455, 19.5455, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, 17.2727, -1., -1.], # noqa: E241, E201, B950
[-1., -1., 15., 15., 15., 15., 15., 15., 15., -1., -1.], # noqa: E241, E201, B950
[-1., 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, 12.7273, -1.], # noqa: E241, E201, B950
[-1., 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, 10.4545, -1.], # noqa: E241, E201, B950
[-1., 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, 8.1818, -1.], # noqa: E241, E201, B950
[-1., -1., 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, 5.9091, -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
zbuf_t_expected = torch.tensor([
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., 33.8461, 33.8462, 33.8462, -1., -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., 24.4444, 24.4444, 24.4444, 24.4444, 24.4444, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., 19.1304, 19.1304, 19.1304, 19.1304, 19.1304, -1., -1., -1.], # noqa: E241, E201, B950
[-1., -1., 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, 15.7143, -1., -1.], # noqa: E241, E201, B950
[-1., -1., 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, 13.3333, -1., -1.], # noqa: E241, E201, B950
[-1., 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, 11.5789, -1.], # noqa: E241, E201, B950
[-1., 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, 10.2326, -1.], # noqa: E241, E201, B950
[-1., 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, 9.1667, -1.], # noqa: E241, E201, B950
[-1., -1., 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, 8.3019, -1., -1.], # noqa: E241, E201, B950
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201, B950
], dtype=torch.float32, device=device).view(1, 11, 11, 1)
# fmt: on
self.assertTrue(torch.all(idx_f == idx_expected).item())
self.assertTrue(torch.all(idx_t == idx_expected).item())
dists_t_max_diff = (dists_t - dists_expected).abs().max().item()
dists_f_max_diff = (dists_f - dists_expected).abs().max().item()
self.assertLess(dists_t_max_diff, 1e-4)
self.assertLess(dists_f_max_diff, 1e-4)
zbuf_f_max_diff = (zbuf_f - zbuf_f_expected).abs().max().item()
zbuf_t_max_diff = (zbuf_t - zbuf_t_expected).abs().max().item()
self.assertLess(zbuf_f_max_diff, 1e-4)
self.assertLess(zbuf_t_max_diff, 1e-4)
# Check barycentrics by using them to re-compute zbuf
z0 = verts[0, 2]
z1 = verts[1, 2]
z2 = verts[2, 2]
w0_f, w1_f, w2_f = bary_f.unbind(dim=4)
w0_t, w1_t, w2_t = bary_t.unbind(dim=4)
zbuf_f_bary = w0_f * z0 + w1_f * z1 + w2_f * z2
zbuf_t_bary = w0_t * z0 + w1_t * z1 + w2_t * z2
mask = idx_expected != -1
zbuf_f_bary_diff = (zbuf_f_bary[mask] - zbuf_f_expected[mask]).abs().max()
zbuf_t_bary_diff = (zbuf_t_bary[mask] - zbuf_t_expected[mask]).abs().max()
self.assertLess(zbuf_f_bary_diff, 1e-4)
self.assertLess(zbuf_t_bary_diff, 1e-4)
def _test_barycentric_clipping(self, rasterize_meshes_fn, device, bin_size=None):
# fmt: off
verts = torch.tensor([
[-0.4, -0.4, 10], # noqa: E241, E201
[ 0.4, -0.4, 10], # noqa: E241, E201
[ 0.0, 0.4, 20], # noqa: E241, E201
], dtype=torch.float32, device=device)
# fmt: on
faces = torch.tensor([[0, 1, 2]], device=device)
meshes = Meshes(verts=[verts], faces=[faces])
kwargs = {
"meshes": meshes,
"image_size": 5,
"faces_per_pixel": 1,
"blur_radius": 0.2,
"perspective_correct": False,
"clip_barycentric_coords": False, # Initially set this to false
}
if bin_size != -1:
kwargs["bin_size"] = bin_size
# Run with and without perspective correction
idx_f, zbuf_f, bary_f, dists_f = rasterize_meshes_fn(**kwargs)
# fmt: off
expected_bary = torch.tensor([
[
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-0.2500, -0.2500, 1.5000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000] # noqa: E241, E201
],
[
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-0.5000, 0.5000, 1.0000], # noqa: E241, E201
[-0.0000, -0.0000, 1.0000], # noqa: E241, E201
[ 0.5000, -0.5000, 1.0000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000] # noqa: E241, E201
],
[
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[-0.2500, 0.7500, 0.5000], # noqa: E241, E201
[ 0.2500, 0.2500, 0.5000], # noqa: E241, E201
[ 0.7500, -0.2500, 0.5000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000] # noqa: E241, E201
],
[
[-0.5000, 1.5000, -0.0000], # noqa: E241, E201
[-0.0000, 1.0000, -0.0000], # noqa: E241, E201
[ 0.5000, 0.5000, -0.0000], # noqa: E241, E201
[ 1.0000, -0.0000, -0.0000], # noqa: E241, E201
[ 1.5000, -0.5000, 0.0000] # noqa: E241, E201
],
[
[-1.0000, -1.0000, -1.0000], # noqa: E241, E201
[ 0.2500, 1.2500, -0.5000], # noqa: E241, E201
[ 0.7500, 0.7500, -0.5000], # noqa: E241, E201
[ 1.2500, 0.2500, -0.5000], # noqa: E241, E201
[-1.0000, -1.0000, -1.0000] # noqa: E241, E201
]
], dtype=torch.float32, device=device).view(1, 5, 5, 1, 3)
# fmt: on
self.assertClose(expected_bary, bary_f, atol=1e-4)
# calculate the expected clipped barycentrics and zbuf
expected_bary_clipped = _clip_barycentric_coordinates(expected_bary)
expected_z_clipped = _interpolate_zbuf(idx_f, expected_bary_clipped, meshes)
kwargs["clip_barycentric_coords"] = True
idx_t, zbuf_t, bary_t, dists_t = rasterize_meshes_fn(**kwargs)
self.assertClose(expected_bary_clipped, bary_t, atol=1e-4)
self.assertClose(expected_z_clipped, zbuf_t, atol=1e-4)
def _test_behind_camera(self, rasterize_meshes_fn, device, bin_size=None):
"""
All verts are behind the camera so nothing should get rasterized.
"""
N = 1
# fmt: off
verts = torch.tensor(
[
[ -0.5, 0.0, -0.1], # noqa: E241, E201
[ 0.0, 0.6, -0.1], # noqa: E241, E201
[ 0.5, 0.0, -0.1], # noqa: E241, E201
[-0.25, 0.0, -0.9], # noqa: E241, E201
[ 0.25, 0.5, -0.9], # noqa: E241, E201
[ 0.75, 0.0, -0.9], # noqa: E241, E201
[ -0.4, 0.0, -0.5], # noqa: E241, E201
[ 0.6, 0.6, -0.5], # noqa: E241, E201
[ 0.8, 0.0, -0.5], # noqa: E241, E201
[ -0.2, 0.0, -0.5], # noqa: E241, E201
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
faces = torch.tensor(
[[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],
dtype=torch.int64,
device=device,
)
meshes = Meshes(verts=[verts], faces=[faces])
image_size = 16
faces_per_pixel = 1
radius = 0.2
idx_expected = torch.full(
(N, image_size, image_size, faces_per_pixel),
fill_value=-1,
dtype=torch.int64,
device=device,
)
bary_expected = torch.full(
(N, image_size, image_size, faces_per_pixel, 3),
fill_value=-1,
dtype=torch.float32,
device=device,
)
zbuf_expected = torch.full(
(N, image_size, image_size, faces_per_pixel),
fill_value=-1,
dtype=torch.float32,
device=device,
)
dists_expected = zbuf_expected.clone()
if bin_size == -1:
# naive python version with no binning
idx, zbuf, bary, dists = rasterize_meshes_fn(
meshes, image_size, radius, faces_per_pixel
)
else:
idx, zbuf, bary, dists = rasterize_meshes_fn(
meshes, image_size, radius, faces_per_pixel, bin_size
)
idx_same = (idx == idx_expected).all().item()
zbuf_same = (zbuf == zbuf_expected).all().item()
self.assertTrue(idx_same)
self.assertTrue(zbuf_same)
self.assertClose(bary, bary_expected)
self.assertClose(dists, dists_expected)
def _simple_triangle_raster(self, raster_fn, device, bin_size=None):
image_size = 10
# Mesh with a single non-symmetrical face - this will help
# check that the XY directions are correctly oriented.
verts0 = torch.tensor(
[[-0.3, -0.4, 0.1], [0.0, 0.6, 0.1], [0.9, -0.4, 0.1]],
dtype=torch.float32,
device=device,
)
faces0 = torch.tensor([[1, 0, 2]], dtype=torch.int64, device=device)
# Mesh with two overlapping faces.
# fmt: off
verts1 = torch.tensor(
[
[-0.9, -0.2, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.1], # noqa: E241, E201
[ 0.7, -0.4, 0.1], # noqa: E241, E201
[-0.7, 0.4, 0.5], # noqa: E241, E201
[ 0.0, -0.6, 0.5], # noqa: E241, E201
[ 0.7, 0.4, 0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt on
faces1 = torch.tensor(
[[1, 0, 2], [3, 4, 5]], dtype=torch.int64, device=device
)
# Expected output tensors in the format with axes +X left, +Y up, +Z in
# k = 0, closest point.
# fmt off
expected_p2face_k0 = torch.tensor(
[
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, 1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 2, 2, 1, 1, 1, 2, -1, -1], # noqa: E241, E201
[-1, -1, -1, 1, 1, 1, 1, 1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 1, 1, 1, 1, 1, 1, -1], # noqa: E241, E201
[-1, -1, 1, 1, 1, 2, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
],
dtype=torch.int64,
device=device,
)
expected_zbuf_k0 = torch.tensor(
[
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 0.1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201
[-1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201
],
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, 0.1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, 0.5, 0.5, 0.1, 0.1, 0.1, 0.5, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1], # noqa: E241, E201
[-1, -1, 0.1, 0.1, 0.1, 0.5, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201
]
],
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts0, verts1], faces=[faces0, faces1])
# k = 1, second closest point.
expected_p2face_k1 = expected_p2face_k0.clone()
expected_p2face_k1[0, :] = torch.ones_like(expected_p2face_k1[0, :]) * -1
# fmt: off
expected_p2face_k1[1, :] = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 2, 2, 2, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, 2, 2, 2, 2, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, 2, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1] # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
expected_zbuf_k1 = expected_zbuf_k0.clone()
expected_zbuf_k1[0, :] = torch.ones_like(expected_zbuf_k1[0, :]) * -1
expected_zbuf_k1[1, :] = torch.tensor(
[
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., 0.5, 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., 0.5, 0.5, 0.5, 0.5, -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., 0.5, -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
# Coordinate conventions +Y up, +Z in, +X left
if bin_size == -1:
# simple python, no bin_size
p2face, zbuf, bary, pix_dists = raster_fn(meshes, image_size, 0.0, 2)
else:
p2face, zbuf, bary, pix_dists = raster_fn(
meshes, image_size, 0.0, 2, bin_size
)
self.assertClose(p2face[..., 0], expected_p2face_k0)
self.assertClose(zbuf[..., 0], expected_zbuf_k0)
self.assertClose(p2face[..., 1], expected_p2face_k1)
self.assertClose(zbuf[..., 1], expected_zbuf_k1)
def _simple_blurry_raster(self, raster_fn, device, bin_size=None):
"""
Check that pix_to_face, dist and zbuf values are invariant to the
ordering of faces.
"""
image_size = 10
blur_radius = 0.12 ** 2
faces_per_pixel = 1
# fmt: off
verts = torch.tensor(
[
[ -0.3, 0.0, 0.1], # noqa: E241, E201
[ 0.0, 0.6, 0.1], # noqa: E241, E201
[ 0.8, 0.0, 0.1], # noqa: E241, E201
[-0.25, 0.0, 0.9], # noqa: E241, E201
[0.25, 0.5, 0.9], # noqa: E241, E201
[0.75, 0.0, 0.9], # noqa: E241, E201
[-0.4, 0.0, 0.5], # noqa: E241, E201
[ 0.6, 0.6, 0.5], # noqa: E241, E201
[ 0.8, 0.0, 0.5], # noqa: E241, E201
[-0.2, 0.0, -0.5], # noqa: E241, E201 face behind the camera
[ 0.3, 0.6, -0.5], # noqa: E241, E201
[ 0.4, 0.0, -0.5], # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# Face with index 0 is non symmetric about the X and Y axis to
# test that the positive Y and X directions are correct in the output.
faces_packed = torch.tensor(
[[1, 0, 2], [4, 3, 5], [7, 6, 8], [10, 9, 11]],
dtype=torch.int64,
device=device,
)
expected_p2f = torch.tensor(
[
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, 2, 2, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, 2, 0, 0, 0, 0, -1, -1, -1, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, 0, 0, 0, 0, 0, 0, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1], # noqa: E241, E201
],
dtype=torch.int64,
device=device,
)
expected_zbuf = torch.tensor(
[
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., 0.5, 0.5, 0.1, 0.1, 0.1, -1., -1., -1., -1.], # noqa: E241, E201
[-1., 0.5, 0.1, 0.1, 0.1, 0.1, -1., -1., -1., -1.], # noqa: E241, E201
[-1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1., -1., -1.], # noqa: E241, E201
[-1., 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.], # noqa: E241, E201
[-1., -1., -1., -1., -1., -1., -1., -1., -1., -1.] # noqa: E241, E201
],
dtype=torch.float32,
device=device,
)
# fmt: on
for i, order in enumerate([[0, 1, 2], [1, 2, 0], [2, 0, 1]]):
faces = faces_packed[order] # rearrange order of faces.
mesh = Meshes(verts=[verts], faces=[faces])
if bin_size == -1:
# simple python, no bin size arg
pix_to_face, zbuf, bary_coords, dists = raster_fn(
mesh, image_size, blur_radius, faces_per_pixel
)
else:
pix_to_face, zbuf, bary_coords, dists = raster_fn(
mesh, image_size, blur_radius, faces_per_pixel, bin_size
)
if i == 0:
expected_dists = dists
p2f = expected_p2f.clone()
p2f[expected_p2f == 0] = order.index(0)
p2f[expected_p2f == 1] = order.index(1)
p2f[expected_p2f == 2] = order.index(2)
self.assertClose(pix_to_face.squeeze(), p2f)
self.assertClose(zbuf.squeeze(), expected_zbuf, rtol=1e-5)
self.assertClose(dists, expected_dists)
def _test_coarse_rasterize(self, device):
image_size = (16, 16)
# No blurring. This test checks that the XY directions are
# correctly oriented.
blur_radius = 0.0
bin_size = 8
max_faces_per_bin = 3
# fmt: off
verts = torch.tensor(
[
[-0.5, 0.1, 0.1], # noqa: E241, E201
[-0.3, 0.6, 0.1], # noqa: E241, E201
[-0.1, 0.1, 0.1], # noqa: E241, E201
[-0.3, -0.1, 0.4], # noqa: E241, E201
[ 0.3, 0.5, 0.4], # noqa: E241, E201
[0.75, -0.1, 0.4], # noqa: E241, E201
[ 0.2, -0.3, 0.9], # noqa: E241, E201
[ 0.3, -0.7, 0.9], # noqa: E241, E201
[ 0.6, -0.3, 0.9], # noqa: E241, E201
[-0.4, 0.0, -1.5], # noqa: E241, E201
[ 0.6, 0.6, -1.5], # noqa: E241, E201
[ 0.8, 0.0, -1.5], # noqa: E241, E201
],
device=device,
)
# Expected faces using axes convention +Y down, + X right, +Z in
# Non symmetrical triangles i.e face 0 and 3 are in one bin only
faces = torch.tensor(
[
[ 1, 0, 2], # noqa: E241, E201 bin 01 only
[ 4, 3, 5], # noqa: E241, E201 all bins
[ 7, 6, 8], # noqa: E241, E201 bin 10 only
[10, 9, 11], # noqa: E241, E201 negative z, should not appear.
],
dtype=torch.int64,
device=device,
)
# fmt: on
meshes = Meshes(verts=[verts], faces=[faces])
faces_verts = verts[faces]
num_faces_per_mesh = meshes.num_faces_per_mesh()
mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx()
# Expected faces using axes convention +Y down, + X right, + Z in
bin_faces_expected = (
torch.ones((1, 2, 2, max_faces_per_bin), dtype=torch.int32, device=device)
* -1
)
bin_faces_expected[0, 1, 1, 0] = torch.tensor([1])
bin_faces_expected[0, 0, 1, 0:2] = torch.tensor([1, 2])
bin_faces_expected[0, 1, 0, 0:2] = torch.tensor([0, 1])
bin_faces_expected[0, 0, 0, 0] = torch.tensor([1])
# +Y up, +X left, +Z in
bin_faces = _C._rasterize_meshes_coarse(
faces_verts,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
blur_radius,
bin_size,
max_faces_per_bin,
)
bin_faces_same = (bin_faces.squeeze() == bin_faces_expected).all()
self.assertTrue(bin_faces_same.item() == 1)
def test_order_of_ties(self):
# Tied faces are rasterized in index order
# We rasterize a mesh with many faces.
device = torch.device("cuda:0")
verts = -5 * torch.eye(3, dtype=torch.float32, device=device)[None]
faces = torch.arange(3, device=device, dtype=torch.int64).expand(1, 100, 3)
mesh = Meshes(verts=verts, faces=faces)
R, T = look_at_view_transform(2.7, 0.0, 0.0)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T)
raster_settings = RasterizationSettings(
image_size=28, faces_per_pixel=100, bin_size=0
)
rasterizer = MeshRasterizer(raster_settings=raster_settings)
out = rasterizer(mesh, cameras=cameras)
self.assertClose(
out.pix_to_face[0, 14:, :14],
torch.arange(100, device=device).expand(14, 14, 100),
)
@staticmethod
def rasterize_meshes_python_with_init(
num_meshes: int,
ico_level: int,
image_size: int,
blur_radius: float,
faces_per_pixel: int,
):
device = torch.device("cpu")
meshes = ico_sphere(ico_level, device)
meshes_batch = meshes.extend(num_meshes)
def rasterize():
rasterize_meshes_python(
meshes_batch, image_size, blur_radius, faces_per_pixel
)
return rasterize
@staticmethod
def rasterize_meshes_cpu_with_init(
num_meshes: int,
ico_level: int,
image_size: int,
blur_radius: float,
faces_per_pixel: int,
):
meshes = ico_sphere(ico_level, torch.device("cpu"))
meshes_batch = meshes.extend(num_meshes)
def rasterize():
rasterize_meshes(
meshes_batch,
image_size,
blur_radius,
faces_per_pixel=faces_per_pixel,
bin_size=0,
)
return rasterize
@staticmethod
def rasterize_meshes_cuda_with_init(
num_meshes: int,
ico_level: int,
image_size: int,
blur_radius: float,
faces_per_pixel: int,
):
device = get_random_cuda_device()
meshes = ico_sphere(ico_level, device)
meshes_batch = meshes.extend(num_meshes)
torch.cuda.synchronize(device)
def rasterize():
rasterize_meshes(meshes_batch, image_size, blur_radius, faces_per_pixel)
torch.cuda.synchronize(device)
return rasterize
@staticmethod
def bm_rasterize_meshes_with_clipping(
num_meshes: int,
ico_level: int,
image_size: int,
blur_radius: float,
faces_per_pixel: int,
dist: float,
):
device = get_random_cuda_device()
meshes = ico_sphere(ico_level, device)
meshes_batch = meshes.extend(num_meshes)
settings = RasterizationSettings(
image_size=image_size,
blur_radius=blur_radius,
faces_per_pixel=faces_per_pixel,
z_clip_value=1e-2,
perspective_correct=True,
cull_to_frustum=True,
)
# The camera is positioned so that the image plane intersects
# the mesh and some faces are partially behind the image plane.
R, T = look_at_view_transform(dist, 0, 0)
cameras = FoVPerspectiveCameras(device=device, R=R, T=T, fov=90)
rasterizer = MeshRasterizer(raster_settings=settings, cameras=cameras)
# Transform the meshes to projec them onto the image plane
meshes_screen = rasterizer.transform(meshes_batch)
torch.cuda.synchronize(device)
def rasterize():
# Only measure rasterization speed (including clipping)
rasterize_meshes(
meshes_screen,
image_size,
blur_radius,
faces_per_pixel,
z_clip_value=1e-2,
perspective_correct=True,
cull_to_frustum=True,
)
torch.cuda.synchronize(device)
return rasterize
| [
"torch.device",
"torch.cuda.synchronize",
"torch.arange",
"torch.ones",
"torch.manual_seed",
"torch.randint",
"torch.randn_like",
"torch.tensor",
"torch.full",
"torch.ones_like",
"torch.eye",
"torch.all",
"torch.randn"
] | 3 | jkxing/pytorch3d | 71dbebe8010a0dac3e56be464778aa48fbd3bcd3 |
1.9 | '''
CrossLink Network
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
def swish(x):
return x * x.sigmoid()
def mish(x):
return x * torch.tanh(F.softplus(x))
class CrossLinkBlock(nn.Module):
'''Cross-Link Block'''
def __init__(self, in_channels, out_channels, kernel_size, pool_enable):
super(CrossLinkBlock, self).__init__()
self.pool_enable = pool_enable
self.ReLU = nn.ReLU()
# basic blocks
self.dconv1_1 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[0],
stride=1,
padding='same',
groups=1,
bias=False)
self.dconv1_2 = nn.Conv2d(in_channels,
in_channels,
kernel_size=kernel_size[1],
stride=1,
padding='same',
groups=1,
bias=False)
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(in_channels)
self.pconv = nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding='same',
groups=1,
bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.maxpool = nn.MaxPool2d(2, 2)
def forward(self, x):
'''add forward here'''
out1 = self.dconv1_1(x)
out2 = self.dconv1_2(x)
out1 = torch.mul(out1, self.ReLU(out1))
out2 = torch.mul(out1, self.ReLU(out2))
out = self.bn1(out1) + self.bn2(out2)
out = self.bn3(self.pconv(out))
if self.pool_enable:
out = self.maxpool(out)
return out
class CLNET(nn.Module):
def __init__(self, cfg, num_classes=10):
super(CLNET, self).__init__()
self.cfg = cfg
self.conv1 = nn.Conv2d(3,
32,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.pool1 = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32,
32,
kernel_size=3,
stride=1,
padding=1,
groups=1,
bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32,
16,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.layers = self._make_layers(in_channels=16)
self.linear = nn.Linear(cfg['out_channels'][-1], num_classes)
def _make_layers(self, in_channels):
layers = []
cfg = [self.cfg[k] for k in ['out_channels', 'kernel_size', 'pool_enable']]
for out_channels, kernel_size, pool_enable in zip(*cfg):
layers.append(
CrossLinkBlock(in_channels,
out_channels,
kernel_size,
pool_enable))
in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
out = mish(self.bn1(self.pool1(self.conv1(x)))) # conv block
out = self.conv3(swish(self.bn2(self.conv2(out)))) # sep block
out = self.layers(out)
out = F.adaptive_avg_pool2d(out, 1)
out = out.view(out.size(0), -1)
dropout_rate = self.cfg['dropout_rate']
if self.training and dropout_rate > 0:
out = F.dropout(out, p=dropout_rate)
out = self.linear(out)
return out
def CLNet_V0(num_classes):
cfg = {
'out_channels': [24, 40, 80, 112, 160],
'kernel_size': [(5, 3), (3, 5), (3, 3), (5, 5), (3, 3)],
'pool_enable': [True, True, True, True, False],
'dropout_rate': 0.2
}
return CLNET(cfg, num_classes=num_classes)
import torchinfo
def test():
net = CLNet_V0(10)
torchinfo.summary(net, (1, 3, 32, 32))
x = torch.randn(3, 3, 32, 32, device='cuda')
y = net(x)
print(y.shape)
if __name__ == '__main__':
test()
| [
"torch.nn.Linear",
"torch.nn.functional.softplus",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.functional.dropout",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.randn"
] | 1.9.1 | angseung/torch_cifar10 | 3160f749f3bffd941d6c0fb98ddaad63d4e5641d |
1.6 | import os
import torch
import numpy as np
import unittest
import timeit
import functools
from tinygrad.tensor import Tensor, DEFAULT_DEVICE, Device
def helper_test_op(shps, torch_fxn, tinygrad_fxn, atol=1e-6, rtol=1e-3, grad_atol=1e-6, grad_rtol=1e-3, forward_only=False, vals=None, a=-0.5, b=20):
torch.manual_seed(0)
if shps is None:
ts = [torch.tensor(x, requires_grad=True) for x in vals]
else:
ts = [torch.tensor((np.random.random(size=x).astype(np.float32)+a)*b, requires_grad=True) for x in shps]
tst = [Tensor(x.detach().numpy()) for x in ts]
out = torch_fxn(*ts)
ret = tinygrad_fxn(*tst)
np.testing.assert_allclose(ret.cpu().data, out.detach().numpy(), atol=atol, rtol=rtol)
if not forward_only:
out.mean().backward()
ret.mean().backward()
for t, tt in zip(ts, tst):
np.testing.assert_allclose(t.grad, tt.cpu().grad.data, atol=grad_atol, rtol=grad_rtol)
# speed
torch_fp = timeit.Timer(functools.partial(torch_fxn, *ts)).timeit(5) * 1000/5
tinygrad_fp = timeit.Timer(functools.partial(tinygrad_fxn, *tst)).timeit(5) * 1000/5
if not forward_only:
torch_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), torch_fxn, ts)).timeit(5) * 1000/5
tinygrad_fbp = timeit.Timer(functools.partial(lambda f,x: f(*x).mean().backward(), tinygrad_fxn, tst)).timeit(5) * 1000/5
else:
torch_fbp, tinygrad_fbp = np.nan, np.nan
print("testing %30r torch/tinygrad fp: %.2f / %.2f ms bp: %.2f / %.2f ms" % (shps, torch_fp, tinygrad_fp, torch_fbp-torch_fp, tinygrad_fbp-tinygrad_fp))
class TestOps(unittest.TestCase):
def test_add(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x+y, Tensor.add)
def test_sub(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x-y, Tensor.sub)
def test_mul(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x*y, Tensor.mul)
def test_div(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x/y, Tensor.div)
def test_pow(self):
helper_test_op([(45,65), (45,65)], lambda x,y: x**y, Tensor.pow, a=0)
def test_sqrt(self):
helper_test_op([(45,65)], lambda x: x.sqrt(), Tensor.sqrt, a=0)
def test_relu(self):
helper_test_op([(45,65)], lambda x: x.relu(), Tensor.relu)
def test_leakyrelu(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.leaky_relu(x,0.01), Tensor.leakyrelu)
def test_abs(self):
helper_test_op([(45,65)], lambda x: torch.abs(x), Tensor.abs)
def test_log(self):
helper_test_op([(45,65)], lambda x: torch.log(x), Tensor.log)
def test_exp(self):
helper_test_op([(45,65)], lambda x: torch.exp(x), Tensor.exp)
def test_sign(self):
helper_test_op([(45,65)], lambda x: torch.sign(x), Tensor.sign)
def test_sigmoid(self):
helper_test_op([(45,65)], lambda x: x.sigmoid(), Tensor.sigmoid)
def test_softplus(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.softplus(x), Tensor.softplus, atol=1e-6, grad_atol=1e-6)
def test_relu6(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.relu6(x), Tensor.relu6)
def test_hardswish(self):
helper_test_op([(45,65)], lambda x: torch.nn.functional.hardswish(x), Tensor.hardswish, atol=1e-6, grad_atol=1e-6)
def test_mish(self):
def _mish_pytorch(x):
return x*torch.tanh(torch.nn.functional.softplus(x))
helper_test_op([(45,65)], _mish_pytorch, Tensor.mish, atol=1e-4)
def test_dot(self):
helper_test_op([(45,65), (65,100)], lambda x,y: x.matmul(y), Tensor.dot, atol=1e-4)
def test_multidot(self):
helper_test_op([(10,45,65), (10,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
helper_test_op([(3,3,45,65), (3,3,65,45)], lambda x,y: x @ y, Tensor.dot, atol=1e-4)
def test_sum(self):
helper_test_op([(45,3)], lambda x: x.sum(), Tensor.sum)
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=(1,2)), lambda x: Tensor.sum(x, axis=(1,2)))
helper_test_op([(3,4,5,6)], lambda x: x.sum(axis=1), lambda x: Tensor.sum(x, axis=1))
def test_max(self):
helper_test_op([(45,3)], lambda x: x.max(), Tensor.max)
helper_test_op([(45,3)], lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5))
helper_test_op(None, lambda x: x.max().mul(0.5), lambda x: Tensor.max(x).mul(0.5),
vals=[
[[1.0,1.0,0.0,1.0]],
])
helper_test_op([(3,4,5,6)], lambda x: x.max(axis=1)[0], lambda x: Tensor.max(x, axis=1))
def test_mean_axis(self):
helper_test_op([(3,4,5,6)], lambda x: x.mean(axis=(1,2)), lambda x: Tensor.mean(x, axis=(1,2)))
def test_logsoftmax(self):
helper_test_op([(45,65)], lambda x: torch.nn.LogSoftmax(dim=1)(x), Tensor.logsoftmax, atol=1e-7, grad_atol=1e-7)
def test_tanh(self):
helper_test_op([(45,65)], lambda x: x.tanh(), Tensor.tanh, atol=1e-6, grad_atol=1e-6)
def test_topo_sort(self):
helper_test_op([(45,65)], lambda x: (x+x)*x, lambda x: x.add(x).mul(x), atol=1e-6, grad_atol=1e-6)
def test_scalar_mul(self):
helper_test_op([(45,65)], lambda x: x*2, lambda x: x*2)
def test_scalar_rmul(self):
helper_test_op([(45,65)], lambda x: 2*x, lambda x: 2*x)
def test_scalar_sub(self):
helper_test_op([(45,65)], lambda x: x-2, lambda x: x-2)
def test_scalar_rsub(self):
helper_test_op([(45,65)], lambda x: 2-x, lambda x: 2-x)
def test_broadcast_full(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((5,13,24,16), (5,1,24,1)), ((1,3,1,7,1), (2,1,5,1,8))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_broadcast_partial(self):
for torch_op, tinygrad_op in [(torch.add, Tensor.add), (torch.sub, Tensor.sub), (torch.mul, Tensor.mul),
(torch.div, Tensor.div), (torch.pow, Tensor.pow)]:
for shapes in [((1,32,32,32), (1,32,1,1)), ((5,13,24,16,2), (1,13,24,1,1)),
((4,1), (4,5)), ((1,4), (5,4))]:
with self.subTest(op=torch_op.__name__, shapes=shapes):
# NOTE: ANE backwards?
helper_test_op(shapes, torch_op, tinygrad_op, a=-0.5 if tinygrad_op != Tensor.pow else 0.0)
def test_slice(self):
helper_test_op([(3,3,3,3)], lambda x: x[1:2], lambda x: x[1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2], lambda x: x[1:2, 1:2])
helper_test_op([(3,3,3,3)], lambda x: x[1:2, 1:2, 0:-1], lambda x: x[1:2, 1:2, 0:-1])
def test_pad2d(self):
helper_test_op([(3,3,3,3)], lambda x: torch.nn.functional.pad(x, (1,2,3,4)), lambda x: x.pad2d(padding=(1,2,3,4)))
def test_transpose(self):
helper_test_op([(3,3,3)], lambda x: x.transpose(1,2), lambda x: x.transpose(order=(0,2,1)))
# This is failing on GPU because the dim is too large
#helper_test_op([(21,22,23,24)], lambda x: x.movedim((3,0,2,1),(0,1,2,3)), lambda x: x.transpose(order=(3,0,2,1)))
helper_test_op([(3,4,5,6)], lambda x: x.movedim((3,2,1,0),(0,1,2,3)), lambda x: x.transpose(order=(3,2,1,0)))
def test_reshape(self):
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,3,6,6)), lambda x: x.reshape(shape=(-1,3,6,6)))
helper_test_op([(4,3,6,6)], lambda x: torch.reshape(x, (-1,1,6,6)), lambda x: x.reshape(shape=(-1,1,6,6)))
def test_detach(self):
helper_test_op([(4,3,6,6)], lambda x: x.detach(), lambda x: x.detach(), forward_only=True)
def test_conv2d(self):
for bs in [1,8]:
for cin in [1,3]:
for groups in [1,3] if cin == 3 else [1]:
for H in [1,2,5]:
for W in [1,2,3,5]:
with self.subTest(batch_size=bs, channels=cin, groups=groups, height=H, width=W):
helper_test_op([(bs,cin,11,28), (6,cin//groups,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,groups=groups).relu(),
lambda x,w: Tensor.conv2d(x,w,groups=groups).relu(), atol=1e-4, grad_rtol=1e-5)
def test_strided_conv2d(self):
bs = 4
cin = 3
H,W = 3,3
with self.subTest(stride := 2):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=2).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=stride).relu(), atol=1e-4)
with self.subTest(stride := (2,1)):
helper_test_op([(bs,cin,11,28), (4,cin,H,W)],
lambda x,w: torch.nn.functional.conv2d(x,w,stride=stride).relu(),
lambda x,w: Tensor.conv2d(x,w,stride=(2,1)).relu(), atol=1e-4)
def test_maxpool2d(self):
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1)]:
with self.subTest(kernel_size=ksz):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.max_pool2d(x, kernel_size=ksz),
# TODO: why is this tolerance so high?
lambda x: Tensor.max_pool2d(x, kernel_size=ksz), grad_atol=1e-4)
def test_avgpool2d(self):
shape = (32,2,111,28)
for ksz in [(2,2), (3,3), (3,2), (5,5), (5,1), shape[2:]]:
with self.subTest(kernel_size=ksz):
helper_test_op([shape],
lambda x: torch.nn.functional.avg_pool2d(x, kernel_size=ksz),
lambda x: Tensor.avg_pool2d(x, kernel_size=ksz), rtol=1e-5)
def test_upsample2d_nearest(self):
for sf in [1, 2, 3, 4, 5]:
with self.subTest(scale_factor=sf):
helper_test_op([(32,2,110,28)],
lambda x: torch.nn.functional.interpolate(x, scale_factor=sf, mode='nearest'),
lambda x: Tensor.upsample_nearest2d(x, scale_factor=sf), forward_only=True)
if __name__ == '__main__':
unittest.main(verbosity=2)
| [
"torch.reshape",
"torch.nn.LogSoftmax",
"torch.nn.functional.relu6",
"torch.nn.functional.avg_pool2d",
"torch.nn.functional.softplus",
"torch.nn.functional.interpolate",
"torch.sign",
"torch.manual_seed",
"torch.abs",
"torch.tensor",
"torch.nn.functional.hardswish",
"torch.nn.functional.pad",
"torch.log",
"torch.nn.functional.max_pool2d",
"torch.exp",
"torch.nn.functional.conv2d",
"torch.nn.functional.leaky_relu"
] | 1.6.0 | baheytharwat/tinygrad | acf652c3c524ee3214e9ce58d41113738cb833ae |
1.0 | #coding:utf-8
import torch
from learner_util import get_ner_BIO
class Metric(object):
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
raise NotImplementedError
def get_metric(self, reset=False):
"""
返回metric的指标
"""
raise NotImplementedError
def reset(self):
"""
重置内部状态
"""
raise NotImplementedError
@staticmethod
def unwrap_to_tensors(*tensors):
"""
把tensor安全的copy到cpu进行操作,避免gpu的oom
"""
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
@classmethod
def from_option(cls, conf):
return cls(**conf)
class F1Measure(Metric):
def __init__(self, positive_label):
"""
准确率、召回率、F值的评价指标
"""
super(F1Measure, self).__init__()
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predictions,
gold_labels,
mask=None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise Exception("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
self.update(predictions, gold_labels, mask)
def update(self, predictions, gold_labels, mask):
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predictions.
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
# True Positives: correct positively labeled predictions.
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
# False Negatives: incorrect negatively labeled predictions.
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
# False Positives: incorrect positively labeled predictions
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
def get_metric(self, reset=False):
"""
返回准确率、召回率、F值评价指标
"""
# print('TP',self._true_positives,'TN',self._true_negatives,'FP',self._false_positives,'FN',self._false_negatives)
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f1_measure}
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class NerF1Measure(Metric):
def __init__(self, label_vocab):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
self.label_vocab = label_vocab
def reset(self):
"""
重置内部状态
"""
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
def get_metric(self, reset=False):
"""
返回metric的指标
"""
if self.predict_num == 0.0:
precision = -1
else:
precision = (self.right_num+0.0)/self.predict_num
if self.golden_num == 0.0:
recall = -1
else:
recall = (self.right_num+0.0)/self.golden_num
if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:
f_measure = -1
else:
f_measure = 2*precision*recall/(precision+recall)
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f_measure}
def update(self, gold_matrix, pred_matrix):
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
self.golden_num += len(gold_matrix)
self.predict_num += len(pred_matrix)
self.right_num += len(right_ner)
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
batch_size = gold_labels.size(0)
seq_len = gold_labels.size(1)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,
mask)
predictions = predictions.tolist()
gold_labels = gold_labels.tolist()
mask = mask.tolist()
for idx in range(batch_size):
pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold_matrix = get_ner_BIO(gold)
pred_matrix = get_ner_BIO(pred)
self.update(gold_matrix, pred_matrix)
| [
"torch.ones_like"
] | 1.0.0 | waterzxj/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 |
1.0 | #coding:utf-8
"""
Embedding类的抽象
"""
import os
import sys
import torch
from torch import nn
import torch.nn.functional as F
from modules.module_util import init_tensor
from modules.base_type import InitType, FAN_MODE, ActivationType
class BaseEmbedding(nn.Module):
"""
Emebdding类的基类
:params dim int类型,embedding的维度大小
:params vocab_size int类型
:params device string or [string1, string2],计算的后端,默认是cpu
:params init_type string, 初始化的计算方式 ,默认采用uniform初始化
:params dropout float
"""
def __init__(self, dim, vocab_size,
device=None, dropout=0.0):
super(BaseEmbedding, self).__init__()
self.dim = dim
self.vocab_size = vocab_size
self.device = device
self.dropout = nn.Dropout(p=dropout)
@classmethod
def from_dict(cls, params):
return cls(**params)
def forward(self, input):
raise Exception("BaseEmbedding forward method not implemented!")
class TokenEmbedding(BaseEmbedding):
def __init__(self, dim, vocab_size, device=None,
dropout=0.0,
init_type=InitType.XAVIER_NORMAL,
low=0, high=1, mean=0, std=1,
activation_type=ActivationType.NONE,
fan_mode=FAN_MODE.FAN_IN, negative_slope=0
):
"""
Embedding类的基础类
:params dim int类型,embedding的维度大小
:params vocab_size int类型
:params device string or [string1, string2],计算的后端,默认是cpu
:params init_type string, 初始化的计算方式 ,默认采用uniform初始化
:params dropout float
"""
super(TokenEmbedding, self).__init__(dim, vocab_size, device,
dropout)
self.embeddings = nn.Embedding(vocab_size, dim)
embedding_lookup_table = init_tensor(tensor=torch.empty(vocab_size, dim),
init_type=init_type, low=low, high=high, mean=mean, std=std,
activation_type=activation_type, fan_mode=fan_mode,
negative_slope=negative_slope)
self.embeddings.weight.data.copy_(embedding_lookup_table)
def forward(self, input):
embedding = self.embeddings(input)
return self.dropout(embedding)
@classmethod
def from_pretrained(cls, vectors, vocab_map=None):
"""
copy从dataloader每个域加载好的预训练的词向量
:params vectors Vector类型
"""
if isinstance(path, (str)):
raise Exception("Load embedding from path not implemented!")
self.embeddings.weight.data.copy_(vectors)
| [
"torch.nn.Dropout",
"torch.empty",
"torch.nn.Embedding"
] | 1.0.0 | waterzxj/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 |
1.0 | #coding:utf-8
import os
import json
import torch
from torch import nn
import torch.nn.functional as F
from models.model_util import Config
from models.dpcnn import DpCnn
from models.fasttext import FastText
from models.leam import LEAM
from models.self_attention import SelfAttention
from models.textcnn import TextCnn
class Predictor(nn.Module):
def __init__(self, model_save_path, device=None, model_type=None):
super(Predictor, self).__init__()
model_conf = os.path.join(model_save_path, "conf.json")
vocab_path = os.path.join(model_save_path, "vocab.txt")
target_path = os.path.join(model_save_path, "target.txt")
self.model_type = model_type
self.model = self.model_loader(Config.from_json_file(model_conf))
self.model.load_state_dict(torch.load(os.path.join(model_save_path, "best.th")))
self.model.eval()
self.device = device
if self.device is not None:
self.model.to(device)
self.vocab = self.load_vocab(vocab_path)
self.target = self.load_vocab(target_path, reverse=True)
def model_loader(self, conf):
name = self.model_type.lower()
if name == "textcnn":
model = TextCnn(**conf.__dict__)
elif name == "fastext":
model = FastText(**conf.__dict__)
elif name == "dpcnn":
model = DpCnn(**conf.__dict__)
elif name == "leam":
model = LEAM(**conf.__dict__)
elif name == "self-attention":
model = SelfAttention(**conf.__dict__)
else:
raise Exception("name:%s model not implemented!" % (name))
return model
def predict(self, input, **kwargs):
input = input.split()
input_ids = [self.vocab.get(item, 0) for item in input]
input_ids = torch.LongTensor(input_ids)
if self.device is not None:
input_ids = input_ids.to(self.device)
mask = (input_ids != 1).long()
res = self.model.predict(input_ids, mask)
res = res.detach().cpu().tolist()[0]
return res
def load_vocab(self, path, reverse=False):
res = {}
tmp = json.load(open(path))
for index, word in enumerate(tmp):
if reverse:
res[index] = word
else:
res[word] = index
return res
| [
"torch.LongTensor"
] | 1.0.0 | waterzxj/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 |
1.3 | from functools import partial
from typing import List, Union, Callable
import torch
from pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH
from pytorch_toolbelt.modules import encoders as E
from pytorch_toolbelt.modules.decoders import DecoderModule
from pytorch_toolbelt.modules.encoders import EncoderModule
from torch import nn
from torch.nn import functional as F
from .common import disaster_type_classifier, damage_types_classifier
from ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES
__all__ = ["UnetV2SegmentationModel"]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class UNetDecoderV2(DecoderModule):
def __init__(
self,
feature_maps: List[int],
decoder_features: List[int],
mask_channels: int,
last_upsample_filters=None,
dropout=0.0,
abn_block=ABN,
):
super().__init__()
if not isinstance(decoder_features, list):
decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]
if last_upsample_filters is None:
last_upsample_filters = decoder_features[0]
self.encoder_features = feature_maps
self.decoder_features = decoder_features
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])
self.bottlenecks = nn.ModuleList(
[
ConvBottleneck(self.encoder_features[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_features[:]))
]
)
self.output_filters = decoder_features
self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)
self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)
def get_decoder(self, layer):
in_channels = (
self.encoder_features[layer + 1]
if layer + 1 == len(self.decoder_features)
else self.decoder_features[layer + 1]
)
return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])
def forward(self, feature_maps):
last_dec_out = feature_maps[-1]
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
decoder = self.decoder_stages[rev_idx]
x = decoder(x)
x = bottleneck(x, feature_maps[rev_idx - 1])
x = self.last_upsample(x)
f = self.final(x)
return f
class UnetV2SegmentationModel(nn.Module):
def __init__(
self,
encoder: EncoderModule,
num_classes: int,
disaster_type_classes: int,
damage_type_classes: int,
unet_channels: List[int],
dropout=0.25,
abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
feature_maps = [2 * fm for fm in encoder.output_filters]
self.decoder = UNetDecoderV2(
feature_maps=feature_maps,
decoder_features=unet_channels,
mask_channels=num_classes,
dropout=dropout,
abn_block=abn_block,
)
self.full_size_mask = full_size_mask
if disaster_type_classes is not None:
self.disaster_type_classifier = disaster_type_classifier(
feature_maps[-1], disaster_type_classes, dropout=dropout
)
else:
self.disaster_type_classifier = None
if damage_type_classes is not None:
self.damage_types_classifier = damage_types_classifier(
feature_maps[-1], damage_type_classes, dropout=dropout
)
else:
self.damage_types_classifier = None
def forward(self, x):
batch_size = x.size(0)
pre, post = x[:, 0:3, ...], x[:, 3:6, ...]
if self.training:
x = torch.cat([pre, post], dim=0)
features = self.encoder(x)
features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]
else:
pre_features, post_features = self.encoder(pre), self.encoder(post)
features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]
# Decode mask
mask = self.decoder(features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask}
if self.disaster_type_classifier is not None:
disaster_type = self.disaster_type_classifier(features[-1])
output[DISASTER_TYPE_KEY] = disaster_type
if self.damage_types_classifier is not None:
damage_types = self.damage_types_classifier(features[-1])
output[DAMAGE_TYPE_KEY] = damage_types
return output
def efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.EfficientNetB3Encoder(pretrained=pretrained,
layers=[0, 1, 2, 4, 6],
abn_params={"activation": ACT_RELU})
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[96, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
| [
"torch.nn.ReLU",
"torch.cat",
"torch.nn.Upsample",
"torch.nn.Conv2d"
] | 1.3 | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e |
1.3 | from __future__ import absolute_import
import argparse
import collections
import gc
import json
import os
from datetime import datetime
import torch
from catalyst.dl import SupervisedRunner, OptimizerCallback, SchedulerCallback
from catalyst.dl.callbacks import CriterionAggregatorCallback, AccuracyCallback
from catalyst.utils import load_checkpoint, unpack_checkpoint
from pytorch_toolbelt.optimization.functional import get_lr_decay_parameters
from pytorch_toolbelt.utils import fs, torch_utils
from pytorch_toolbelt.utils.catalyst import ShowPolarBatchesCallback, ConfusionMatrixCallback
from pytorch_toolbelt.utils.random import set_manual_seed
from pytorch_toolbelt.utils.torch_utils import count_parameters, transfer_weights, get_optimizable_parameters
from torch import nn
from torch.optim.lr_scheduler import CyclicLR
from torch.utils.data import DataLoader
from xview.dataset import (
INPUT_IMAGE_KEY,
OUTPUT_MASK_KEY,
INPUT_MASK_KEY,
get_datasets,
OUTPUT_MASK_4_KEY,
UNLABELED_SAMPLE,
get_pseudolabeling_dataset,
DISASTER_TYPE_KEY,
UNKNOWN_DISASTER_TYPE_CLASS,
DISASTER_TYPES,
OUTPUT_EMBEDDING_KEY,
DAMAGE_TYPE_KEY,
OUTPUT_MASK_8_KEY, OUTPUT_MASK_16_KEY, OUTPUT_MASK_32_KEY)
from xview.metric import CompetitionMetricCallback
from xview.models import get_model
from xview.optim import get_optimizer
from xview.pseudo import CEOnlinePseudolabelingCallback2d
from xview.scheduler import get_scheduler
from xview.train_utils import clean_checkpoint, report_checkpoint, get_criterion_callback
from xview.visualization import draw_predictions
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-acc", "--accumulation-steps", type=int, default=1, help="Number of batches to process")
parser.add_argument("--seed", type=int, default=42, help="Random seed")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--fast", action="store_true")
parser.add_argument(
"-dd", "--data-dir", type=str, required=True, help="Data directory for INRIA sattelite dataset"
)
parser.add_argument("-m", "--model", type=str, default="resnet34_fpncat128", help="")
parser.add_argument("-b", "--batch-size", type=int, default=8, help="Batch Size during training, e.g. -b 64")
parser.add_argument("-e", "--epochs", type=int, default=100, help="Epoch to run")
# parser.add_argument('-es', '--early-stopping', type=int, default=None, help='Maximum number of epochs without improvement')
# parser.add_argument('-fe', '--freeze-encoder', type=int, default=0, help='Freeze encoder parameters for N epochs')
# parser.add_argument('-ft', '--fine-tune', action='store_true')
parser.add_argument("-lr", "--learning-rate", type=float, default=1e-3, help="Initial learning rate")
parser.add_argument(
"--disaster-type-loss",
type=str,
default=None, # [["ce", 1.0]],
action="append",
nargs="+",
help="Criterion for classifying disaster type",
)
parser.add_argument(
"--damage-type-loss",
type=str,
default=None, # [["bce", 1.0]],
action="append",
nargs="+",
help="Criterion for classifying presence of building with particular damage type",
)
parser.add_argument("-l", "--criterion", type=str, default=None, action="append", nargs="+", help="Criterion")
parser.add_argument("--mask4", type=str, default=None, action="append", nargs="+", help="Criterion for mask with stride 4")
parser.add_argument("--mask8", type=str, default=None, action="append", nargs="+", help="Criterion for mask with stride 8")
parser.add_argument("--mask16", type=str, default=None, action="append", nargs="+", help="Criterion for mask with stride 16")
parser.add_argument("--mask32", type=str, default=None, action="append", nargs="+", help="Criterion for mask with stride 32")
parser.add_argument("--embedding", type=str, default=None)
parser.add_argument("-o", "--optimizer", default="RAdam", help="Name of the optimizer")
parser.add_argument(
"-c", "--checkpoint", type=str, default=None, help="Checkpoint filename to use as initial model weights"
)
parser.add_argument("-w", "--workers", default=8, type=int, help="Num workers")
parser.add_argument("-a", "--augmentations", default="safe", type=str, help="Level of image augmentations")
parser.add_argument("--transfer", default=None, type=str, help="")
parser.add_argument("--fp16", action="store_true")
parser.add_argument("--size", default=512, type=int)
parser.add_argument("--fold", default=0, type=int)
parser.add_argument("-s", "--scheduler", default="multistep", type=str, help="")
parser.add_argument("-x", "--experiment", default=None, type=str, help="")
parser.add_argument("-d", "--dropout", default=0.0, type=float, help="Dropout before head layer")
parser.add_argument("--opl", action="store_true")
parser.add_argument(
"--warmup", default=0, type=int, help="Number of warmup epochs with reduced LR on encoder parameters"
)
parser.add_argument("-wd", "--weight-decay", default=0, type=float, help="L2 weight decay")
parser.add_argument("--show", action="store_true")
parser.add_argument("--dsv", action="store_true")
parser.add_argument("--balance", action="store_true")
parser.add_argument("--only-buildings", action="store_true")
parser.add_argument("--freeze-bn", action="store_true")
parser.add_argument("--crops", action="store_true", help="Train on random crops")
parser.add_argument("--post-transform", action="store_true")
args = parser.parse_args()
set_manual_seed(args.seed)
data_dir = args.data_dir
num_workers = args.workers
num_epochs = args.epochs
learning_rate = args.learning_rate
model_name = args.model
optimizer_name = args.optimizer
image_size = args.size, args.size
fast = args.fast
augmentations = args.augmentations
fp16 = args.fp16
scheduler_name = args.scheduler
experiment = args.experiment
dropout = args.dropout
online_pseudolabeling = args.opl
segmentation_losses = args.criterion
verbose = args.verbose
warmup = args.warmup
show = args.show
accumulation_steps = args.accumulation_steps
weight_decay = args.weight_decay
fold = args.fold
balance = args.balance
only_buildings = args.only_buildings
freeze_bn = args.freeze_bn
train_on_crops = args.crops
enable_post_image_transform = args.post_transform
disaster_type_loss = args.disaster_type_loss
train_batch_size = args.batch_size
embedding_criterion = args.embedding
damage_type_loss = args.damage_type_loss
# Compute batch size for validaion
if train_on_crops:
valid_batch_size = max(1, (train_batch_size * (image_size[0] * image_size[1])) // (1024 ** 2))
else:
valid_batch_size = train_batch_size
run_train = num_epochs > 0
model: nn.Module = get_model(model_name, dropout=dropout).cuda()
if args.transfer:
transfer_checkpoint = fs.auto_file(args.transfer)
print("Transfering weights from model checkpoint", transfer_checkpoint)
checkpoint = load_checkpoint(transfer_checkpoint)
pretrained_dict = checkpoint["model_state_dict"]
transfer_weights(model, pretrained_dict)
if args.checkpoint:
checkpoint = load_checkpoint(fs.auto_file(args.checkpoint))
unpack_checkpoint(checkpoint, model=model)
print("Loaded model weights from:", args.checkpoint)
report_checkpoint(checkpoint)
if freeze_bn:
torch_utils.freeze_bn(model)
print("Freezing bn params")
runner = SupervisedRunner(input_key=INPUT_IMAGE_KEY, output_key=None)
main_metric = "weighted_f1"
cmd_args = vars(args)
current_time = datetime.now().strftime("%b%d_%H_%M")
checkpoint_prefix = f"{current_time}_{args.model}_{args.size}_fold{fold}"
if fp16:
checkpoint_prefix += "_fp16"
if fast:
checkpoint_prefix += "_fast"
if online_pseudolabeling:
checkpoint_prefix += "_opl"
if train_on_crops:
checkpoint_prefix += "_crops"
if experiment is not None:
checkpoint_prefix = experiment
log_dir = os.path.join("runs", checkpoint_prefix)
os.makedirs(log_dir, exist_ok=False)
config_fname = os.path.join(log_dir, f"{checkpoint_prefix}.json")
with open(config_fname, "w") as f:
train_session_args = vars(args)
f.write(json.dumps(train_session_args, indent=2))
default_callbacks = [
CompetitionMetricCallback(input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_KEY, prefix="weighted_f1"),
ConfusionMatrixCallback(
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_KEY,
class_names=["land", "no_damage", "minor_damage", "major_damage", "destroyed"],
ignore_index=UNLABELED_SAMPLE,
),
]
if show:
default_callbacks += [
ShowPolarBatchesCallback(draw_predictions, metric=main_metric + "_batch", minimize=False)
]
train_ds, valid_ds, train_sampler = get_datasets(
data_dir=data_dir,
image_size=image_size,
augmentation=augmentations,
fast=fast,
fold=fold,
balance=balance,
only_buildings=only_buildings,
train_on_crops=train_on_crops,
enable_post_image_transform=enable_post_image_transform,
)
# Pretrain/warmup
if warmup:
callbacks = default_callbacks.copy()
criterions_dict = {}
losses = []
for criterion in segmentation_losses:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name, input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_KEY, loss_weight=float(loss_weight)
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print("Using loss", loss_name, loss_weight)
if args.mask4 is not None:
for criterion in args.mask4:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name, input_key=INPUT_MASK_KEY, output_key=OUTPUT_MASK_4_KEY, loss_weight=float(loss_weight)
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print("Using loss", loss_name, loss_weight)
callbacks += [
CriterionAggregatorCallback(prefix="loss", loss_keys=losses),
OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),
]
parameters = get_lr_decay_parameters(model.named_parameters(), learning_rate, {"encoder": 0.1})
optimizer = get_optimizer("RAdam", parameters, learning_rate=learning_rate * 0.1)
loaders = collections.OrderedDict()
loaders["train"] = DataLoader(
train_ds,
batch_size=train_batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
shuffle=train_sampler is None,
sampler=train_sampler,
)
loaders["valid"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
runner.train(
fp16=fp16,
model=model,
criterion=criterions_dict,
optimizer=optimizer,
scheduler=None,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, "warmup"),
num_epochs=warmup,
verbose=verbose,
main_metric=main_metric,
minimize_metric=False,
checkpoint_data={"cmd_args": cmd_args},
)
del optimizer, loaders
best_checkpoint = os.path.join(log_dir, "warmup", "checkpoints", "best.pth")
model_checkpoint = os.path.join(log_dir, "warmup", "checkpoints", f"{checkpoint_prefix}_warmup.pth")
clean_checkpoint(best_checkpoint, model_checkpoint)
torch.cuda.empty_cache()
gc.collect()
if run_train:
loaders = collections.OrderedDict()
callbacks = default_callbacks.copy()
criterions_dict = {}
losses = []
if online_pseudolabeling:
unlabeled_label = get_pseudolabeling_dataset(
data_dir, include_masks=False, image_size=image_size, augmentation=None
)
unlabeled_train = get_pseudolabeling_dataset(
data_dir,
include_masks=True,
image_size=image_size,
augmentation=augmentations,
train_on_crops=train_on_crops,
enable_post_image_transform=enable_post_image_transform,
)
loaders["label"] = DataLoader(
unlabeled_label, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True
)
train_ds = train_ds + unlabeled_train
train_sampler = None
callbacks += [
CEOnlinePseudolabelingCallback2d(
unlabeled_train,
pseudolabel_loader="label",
prob_threshold=0.75,
output_key=OUTPUT_MASK_KEY,
unlabeled_class=UNLABELED_SAMPLE,
label_frequency=5,
)
]
print("Using online pseudolabeling with ", len(unlabeled_label), "samples")
loaders["train"] = DataLoader(
train_ds,
batch_size=train_batch_size,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
shuffle=train_sampler is None,
sampler=train_sampler,
)
loaders["valid"] = DataLoader(valid_ds, batch_size=valid_batch_size, num_workers=num_workers, pin_memory=True)
# Create losses
for criterion in segmentation_losses:
if isinstance(criterion, (list, tuple)) and len(criterion) == 2:
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion[0], 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix="segmentation",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(INPUT_MASK_KEY, "Using loss", loss_name, loss_weight)
if args.mask4 is not None:
for criterion in args.mask4:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix="mask4",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_4_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(OUTPUT_MASK_4_KEY, "Using loss", loss_name, loss_weight)
if args.mask8 is not None:
for criterion in args.mask8:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix="mask8",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_8_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(OUTPUT_MASK_8_KEY, "Using loss", loss_name, loss_weight)
if args.mask16 is not None:
for criterion in args.mask16:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix="mask16",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_16_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(OUTPUT_MASK_16_KEY, "Using loss", loss_name, loss_weight)
if args.mask32 is not None:
for criterion in args.mask32:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix="mask32",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_MASK_32_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(OUTPUT_MASK_32_KEY, "Using loss", loss_name, loss_weight)
if disaster_type_loss is not None:
callbacks += [
ConfusionMatrixCallback(
input_key=DISASTER_TYPE_KEY,
output_key=DISASTER_TYPE_KEY,
class_names=DISASTER_TYPES,
ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,
prefix=f"{DISASTER_TYPE_KEY}/confusion_matrix",
),
AccuracyCallback(
input_key=DISASTER_TYPE_KEY,
output_key=DISASTER_TYPE_KEY,
prefix=f"{DISASTER_TYPE_KEY}/accuracy",
activation="Softmax",
),
]
for criterion in disaster_type_loss:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix=DISASTER_TYPE_KEY,
input_key=DISASTER_TYPE_KEY,
output_key=DISASTER_TYPE_KEY,
loss_weight=float(loss_weight),
ignore_index=UNKNOWN_DISASTER_TYPE_CLASS,
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(DISASTER_TYPE_KEY, "Using loss", loss_name, loss_weight)
if damage_type_loss is not None:
callbacks += [
# MultilabelConfusionMatrixCallback(
# input_key=DAMAGE_TYPE_KEY,
# output_key=DAMAGE_TYPE_KEY,
# class_names=DAMAGE_TYPES,
# prefix=f"{DAMAGE_TYPE_KEY}/confusion_matrix",
# ),
AccuracyCallback(
input_key=DAMAGE_TYPE_KEY,
output_key=DAMAGE_TYPE_KEY,
prefix=f"{DAMAGE_TYPE_KEY}/accuracy",
activation="Sigmoid",
threshold=0.5,
)
]
for criterion in damage_type_loss:
if isinstance(criterion, (list, tuple)):
loss_name, loss_weight = criterion
else:
loss_name, loss_weight = criterion, 1.0
cd, criterion, criterion_name = get_criterion_callback(
loss_name,
prefix=DAMAGE_TYPE_KEY,
input_key=DAMAGE_TYPE_KEY,
output_key=DAMAGE_TYPE_KEY,
loss_weight=float(loss_weight),
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(DAMAGE_TYPE_KEY, "Using loss", loss_name, loss_weight)
if embedding_criterion is not None:
cd, criterion, criterion_name = get_criterion_callback(
embedding_criterion,
prefix="embedding",
input_key=INPUT_MASK_KEY,
output_key=OUTPUT_EMBEDDING_KEY,
loss_weight=1.0,
)
criterions_dict.update(cd)
callbacks.append(criterion)
losses.append(criterion_name)
print(OUTPUT_EMBEDDING_KEY, "Using loss", embedding_criterion)
callbacks += [
CriterionAggregatorCallback(prefix="loss", loss_keys=losses),
OptimizerCallback(accumulation_steps=accumulation_steps, decouple_weight_decay=False),
]
optimizer = get_optimizer(
optimizer_name, get_optimizable_parameters(model), learning_rate, weight_decay=weight_decay
)
scheduler = get_scheduler(
scheduler_name, optimizer, lr=learning_rate, num_epochs=num_epochs, batches_in_epoch=len(loaders["train"])
)
if isinstance(scheduler, CyclicLR):
callbacks += [SchedulerCallback(mode="batch")]
print("Train session :", checkpoint_prefix)
print(" FP16 mode :", fp16)
print(" Fast mode :", args.fast)
print(" Epochs :", num_epochs)
print(" Workers :", num_workers)
print(" Data dir :", data_dir)
print(" Log dir :", log_dir)
print("Data ")
print(" Augmentations :", augmentations)
print(" Train size :", len(loaders["train"]), len(train_ds))
print(" Valid size :", len(loaders["valid"]), len(valid_ds))
print(" Image size :", image_size)
print(" Train on crops :", train_on_crops)
print(" Balance :", balance)
print(" Buildings only :", only_buildings)
print(" Post transform :", enable_post_image_transform)
print("Model :", model_name)
print(" Parameters :", count_parameters(model))
print(" Dropout :", dropout)
print("Optimizer :", optimizer_name)
print(" Learning rate :", learning_rate)
print(" Weight decay :", weight_decay)
print(" Scheduler :", scheduler_name)
print(" Batch sizes :", train_batch_size, valid_batch_size)
print(" Criterion :", segmentation_losses)
print(" Damage type :", damage_type_loss)
print(" Disaster type :", disaster_type_loss)
print(" Embedding :", embedding_criterion)
# model training
runner.train(
fp16=fp16,
model=model,
criterion=criterions_dict,
optimizer=optimizer,
scheduler=scheduler,
callbacks=callbacks,
loaders=loaders,
logdir=os.path.join(log_dir, "main"),
num_epochs=num_epochs,
verbose=verbose,
main_metric=main_metric,
minimize_metric=False,
checkpoint_data={"cmd_args": vars(args)},
)
# Training is finished. Let's run predictions using best checkpoint weights
best_checkpoint = os.path.join(log_dir, "main", "checkpoints", "best.pth")
model_checkpoint = os.path.join(log_dir, "main", "checkpoints", f"{checkpoint_prefix}.pth")
clean_checkpoint(best_checkpoint, model_checkpoint)
del optimizer, loaders
if __name__ == "__main__":
main()
| [
"torch.cuda.empty_cache",
"torch.utils.data.DataLoader"
] | 1.3 | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e |
1.10 | import torch
from torch import cat
from torch.nn import Conv2d
from torch.nn import Linear
from torch.nn import Module
from torch.nn import ConvTranspose2d
from torch.nn import LeakyReLU
from torch.nn import Tanh
from torch.nn import MaxPool2d
from torch import zeros_like
class ConvMPN(Module):
def __init__(self):
super().__init__()
self.conv1 = Conv2d(in_channels=3*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv2 = Conv2d(in_channels=2*16, out_channels=2*16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.conv3 = Conv2d(in_channels=2*16, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=1)
self.leaky_relu = LeakyReLU(0.1)
def get_nodes(self, feature_vectors, edges, include_neighbours=True):
device = feature_vectors.device
nodes = zeros_like(feature_vectors, device=device)
if include_neighbours:
index = torch.where(edges[:, 1] > 0)
else:
index = torch.where(edges[:, 1] < 0)
src = torch.cat([edges[index[0], 0], edges[index[0], 2]]).long()
dst = torch.cat([edges[index[0], 2], edges[index[0], 0]]).long()
src = feature_vectors[src.contiguous()]
dst = dst.view(-1, 1, 1, 1).expand_as(src).to(device)
return nodes.scatter_add(0, dst, src)
def cat_nodes(self, feature_vectors, edges):
neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=True, )
non_neighbouring_nodes = self.get_nodes(feature_vectors, edges, include_neighbours=False)
encoding = torch.cat([feature_vectors, neighbouring_nodes, non_neighbouring_nodes], 1)
return encoding
def forward(self, x, edges):
x = self.cat_nodes(x, edges)
x = self.conv1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.leaky_relu(x)
return x
class Generator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(138, 1024)
self.conv_mpn_1 = ConvMPN()
self.upsample_1 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.upsample_2 = ConvTranspose2d(16, 16, 4, 2, 1)
self.conv_1 = Conv2d(16, 256, 3, 1, 1)
self.leaky_relu = LeakyReLU(0.1)
self.conv_2 = Conv2d(256, 128, 3, 1, 1)
self.conv_3 = Conv2d(128, 1, 3, 1, 1)
self.tanh = Tanh()
def forward(self, z, t, edges):
z = z.view(-1, 128)#
t = t.view(-1, 10) #
x = cat([z, t], 1)
x = self.linear_reshape_1(x)
x = x.view(-1, 16, 8, 8)
x = self.conv_mpn_1(x, edges).view(-1, *x.shape[1:])
x = self.upsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges).view(-1, *x.shape[1:])
x = self.upsample_2(x)
x = self.leaky_relu(x)
x = self.conv_1(x.view(-1, x.shape[1], *x.shape[2:]))
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.tanh(x)
x = x.view(-1, *x.shape[2:])
return x
class Discriminator(Module):
def __init__(self):
super().__init__()
self.linear_reshape_1 = Linear(10, 8192)
self.leaky_relu = LeakyReLU(0.1)
self.conv_1 = Conv2d(9, 16, 3, 1, 1, bias=True)
self.conv_2 = Conv2d(16, 16, 3, 1, 1)
self.conv_3 = Conv2d(16, 16, 3, 1, 1)
self.conv_mpn_1 = ConvMPN()
self.downsample_1 = Conv2d(16, 16, 3, 2, 1)
self.conv_mpn_2 = ConvMPN()
self.downsample_2 = Conv2d(16, 16, 3, 2, 1)
self.dec_conv_1 = Conv2d(16, 256, 3, 2, 1)
self.dec_conv_2 = Conv2d(256, 128, 3, 2, 1)
self.dec_conv_3 = Conv2d(128, 128, 3, 2, 1)
self.pool_reshape_linear = Linear(128, 1)
def add_pool(self, x, nd_to_sample):
dtype, device = x.dtype, x.device
batch_size = torch.max(nd_to_sample) + 1
pooled_x = torch.zeros(batch_size, x.shape[-1], device=device).float()
pool_to = nd_to_sample.view(-1, 1).expand_as(x).to(device)
pooled_x = pooled_x.scatter_add(0, pool_to, x)
return pooled_x
def forward(self, x, t, edges, nd_to_sample):
x = x.view(-1, 1, 32, 32)
t = self.linear_reshape_1(t)
t = t.view(-1, 8, 32, 32)
x = cat([x, t], 1)
x = self.conv_1(x)
x = self.leaky_relu(x)
x = self.conv_2(x)
x = self.leaky_relu(x)
x = self.conv_3(x)
x = self.leaky_relu(x)
x = self.conv_mpn_1(x, edges)
x = self.downsample_1(x)
x = self.leaky_relu(x)
x = self.conv_mpn_2(x, edges)
x = self.downsample_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_1(x)
x = self.leaky_relu(x)
x = self.dec_conv_2(x)
x = self.leaky_relu(x)
x = self.dec_conv_3(x)
x = self.leaky_relu(x)
x = x.view(-1, x.shape[1])
x = self.add_pool(x, nd_to_sample)
x = self.pool_reshape_linear(x)
return x
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.max",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.where"
] | 1.10.0 | athatheo/House-GANs-Reproduction | 00cc807f1e74f88eef5ed81615bfd87a39c52f94 |
1.5 | # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
import copy
from typing import Optional, List
import math
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from util.misc import inverse_sigmoid
from models.row_column_decoupled_attention import MultiheadRCDA
class Transformer(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.,
activation="relu", num_feature_levels=3,num_query_position = 300,num_query_pattern=3,
spatial_prior="learned",attention_type="RCDA"):
super().__init__()
self.d_model = d_model
self.nhead = nhead
self.attention_type = attention_type
encoder_layer = TransformerEncoderLayerSpatial(d_model, dim_feedforward,
dropout, activation, nhead , attention_type)
encoder_layer_level = TransformerEncoderLayerLevel(d_model, dim_feedforward,
dropout, activation, nhead)
decoder_layer = TransformerDecoderLayer(d_model, dim_feedforward,
dropout, activation, nhead,
num_feature_levels, attention_type)
if num_feature_levels == 1:
self.num_encoder_layers_level = 0
else:
self.num_encoder_layers_level = num_encoder_layers // 2
self.num_encoder_layers_spatial = num_encoder_layers - self.num_encoder_layers_level
self.encoder_layers = _get_clones(encoder_layer, self.num_encoder_layers_spatial)
self.encoder_layers_level = _get_clones(encoder_layer_level, self.num_encoder_layers_level)
self.decoder_layers = _get_clones(decoder_layer, num_decoder_layers)
self.spatial_prior=spatial_prior
if num_feature_levels>1:
self.level_embed = nn.Embedding(num_feature_levels, d_model)
self.num_pattern = num_query_pattern
self.pattern = nn.Embedding(self.num_pattern, d_model)
self.num_position = num_query_position
if self.spatial_prior == "learned":
self.position = nn.Embedding(self.num_position, 2)
self.adapt_pos2d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.adapt_pos1d = nn.Sequential(
nn.Linear(d_model, d_model),
nn.ReLU(),
nn.Linear(d_model, d_model),
)
self.num_layers = num_decoder_layers
num_classes = 91
self.class_embed = nn.Linear(d_model, num_classes)
self.bbox_embed = MLP(d_model, d_model, 4, 3)
self._reset_parameters()
def _reset_parameters(self):
num_pred = self.num_layers
num_classes = 91
prior_prob = 0.01
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.class_embed.bias.data = torch.ones(num_classes) * bias_value
nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
if self.spatial_prior == "learned":
nn.init.uniform_(self.position.weight.data, 0, 1)
nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
def forward(self, srcs, masks):
# prepare input for decoder
bs, l, c, h, w = srcs.shape
if self.spatial_prior == "learned":
reference_points = self.position.weight.unsqueeze(0).repeat(bs, self.num_pattern, 1)
elif self.spatial_prior == "grid":
nx=ny=round(math.sqrt(self.num_position))
self.num_position=nx*ny
x = (torch.arange(nx) + 0.5) / nx
y = (torch.arange(ny) + 0.5) / ny
xy=torch.meshgrid(x,y)
reference_points=torch.cat([xy[0].reshape(-1)[...,None],xy[1].reshape(-1)[...,None]],-1).cuda()
reference_points = reference_points.unsqueeze(0).repeat(bs, self.num_pattern, 1)
else:
raise ValueError(f'unknown {self.spatial_prior} spatial prior')
tgt = self.pattern.weight.reshape(1, self.num_pattern, 1, c).repeat(bs, 1, self.num_position, 1).reshape(
bs, self.num_pattern * self.num_position, c)
mask = masks[-1].unsqueeze(1).repeat(1,l,1,1).reshape(bs*l,h,w)
pos_col, pos_row = mask2pos(mask)
if self.attention_type=="RCDA":
posemb_row = self.adapt_pos1d(pos2posemb1d(pos_row))
posemb_col = self.adapt_pos1d(pos2posemb1d(pos_col))
posemb_2d = None
else:
pos_2d = torch.cat([pos_row.unsqueeze(1).repeat(1, h, 1).unsqueeze(-1), pos_col.unsqueeze(2).repeat(1, 1, w).unsqueeze(-1)],dim=-1)
posemb_2d = self.adapt_pos2d(pos2posemb2d(pos_2d))
posemb_row = posemb_col = None
outputs = srcs.reshape(bs * l, c, h, w)
for idx in range(len(self.encoder_layers)):
outputs = self.encoder_layers[idx](outputs, mask, posemb_row, posemb_col,posemb_2d)
if idx < self.num_encoder_layers_level:
outputs = self.encoder_layers_level[idx](outputs, level_emb=self.level_embed.weight.unsqueeze(1).unsqueeze(0).repeat(bs,1,1,1).reshape(bs*l,1,c))
srcs = outputs.reshape(bs, l, c, h, w)
output = tgt
outputs_classes = []
outputs_coords = []
for lid, layer in enumerate(self.decoder_layers):
output = layer(output, reference_points, srcs, mask, adapt_pos2d=self.adapt_pos2d,
adapt_pos1d=self.adapt_pos1d, posemb_row=posemb_row, posemb_col=posemb_col,posemb_2d=posemb_2d)
reference = inverse_sigmoid(reference_points)
outputs_class = self.class_embed[lid](output)
tmp = self.bbox_embed[lid](output)
if reference.shape[-1] == 4:
tmp += reference
else:
assert reference.shape[-1] == 2
tmp[..., :2] += reference
outputs_coord = tmp.sigmoid()
outputs_classes.append(outputs_class[None,])
outputs_coords.append(outputs_coord[None,])
output = torch.cat(outputs_classes, dim=0), torch.cat(outputs_coords, dim=0)
return output
class TransformerEncoderLayerSpatial(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# self attention
self.self_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, padding_mask=None, posemb_row=None, posemb_col=None,posemb_2d=None):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
if self.attention_type=="RCDA":
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src2 = self.self_attn((src + posemb_row).reshape(bz, h * w, c), (src + posemb_col).reshape(bz, h * w, c),
src + posemb_row, src + posemb_col,
src, key_padding_mask=padding_mask)[0].transpose(0, 1).reshape(bz, h, w, c)
else:
src2 = self.self_attn((src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
(src + posemb_2d).reshape(bz, h * w, c).transpose(0, 1),
src.reshape(bz, h * w, c).transpose(0, 1))[0].transpose(0, 1).reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerEncoderLayerLevel(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0., activation="relu",
n_heads=8):
super().__init__()
# self attention
self.self_attn_level = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, src, level_emb=0):
# self attention
bz, c, h, w = src.shape
src = src.permute(0, 2, 3, 1)
src2 = self.self_attn_level(src.reshape(bz, h * w, c) + level_emb, src.reshape(bz, h * w, c) + level_emb,
src.reshape(bz, h * w, c))[0].reshape(bz, h, w, c)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.ffn(src)
src = src.permute(0, 3, 1, 2)
return src
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model=256, d_ffn=1024,
dropout=0., activation="relu", n_heads=8,
n_levels=3, attention_type="RCDA"):
super().__init__()
self.attention_type = attention_type
self.attention_type = attention_type
if attention_type=="RCDA":
attention_module=MultiheadRCDA
elif attention_type == "nn.MultiheadAttention":
attention_module=nn.MultiheadAttention
else:
raise ValueError(f'unknown {attention_type} attention_type')
# cross attention
self.cross_attn = attention_module(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# level combination
if n_levels>1:
self.level_fc = nn.Linear(d_model * n_levels, d_model)
# ffn
self.ffn = FFN(d_model, d_ffn, dropout, activation)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, tgt, reference_points, srcs, src_padding_masks=None, adapt_pos2d=None,
adapt_pos1d=None, posemb_row=None, posemb_col=None, posemb_2d=None):
tgt_len = tgt.shape[1]
query_pos = pos2posemb2d(reference_points.squeeze(2))
query_pos = adapt_pos2d(query_pos)
# self attention
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), tgt.transpose(0, 1))[0].transpose(0, 1)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
bz, l, c, h, w = srcs.shape
srcs = srcs.reshape(bz * l, c, h, w).permute(0, 2, 3, 1)
if self.attention_type == "RCDA":
query_pos_x = adapt_pos1d(pos2posemb1d(reference_points[..., 0]))
query_pos_y = adapt_pos1d(pos2posemb1d(reference_points[..., 1]))
posemb_row = posemb_row.unsqueeze(1).repeat(1, h, 1, 1)
posemb_col = posemb_col.unsqueeze(2).repeat(1, 1, w, 1)
src_row = src_col = srcs
k_row = src_row + posemb_row
k_col = src_col + posemb_col
tgt2 = self.cross_attn((tgt + query_pos_x).repeat(l, 1, 1), (tgt + query_pos_y).repeat(l, 1, 1), k_row, k_col,
srcs, key_padding_mask=src_padding_masks)[0].transpose(0, 1)
else:
tgt2 = self.cross_attn((tgt + query_pos).repeat(l, 1, 1).transpose(0, 1),
(srcs + posemb_2d).reshape(bz * l, h * w, c).transpose(0,1),
srcs.reshape(bz * l, h * w, c).transpose(0, 1))[0].transpose(0,1)
if l > 1:
tgt2 = self.level_fc(tgt2.reshape(bz, l, tgt_len, c).permute(0, 2, 3, 1).reshape(bz, tgt_len, c * l))
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.ffn(tgt)
return tgt
class FFN(nn.Module):
def __init__(self, d_model=256, d_ffn=1024, dropout=0., activation='relu'):
super().__init__()
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
class MLP(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
nhead=args.nheads,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
activation="relu",
num_feature_levels=args.num_feature_levels,
num_query_position=args.num_query_position,
num_query_pattern=args.num_query_pattern,
spatial_prior=args.spatial_prior,
attention_type=args.attention_type,
)
def pos2posemb2d(pos, num_pos_feats=128, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., 0, None] / dim_t
pos_y = pos[..., 1, None] / dim_t
pos_x = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
pos_y = torch.stack((pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=-1).flatten(-2)
posemb = torch.cat((pos_y, pos_x), dim=-1)
return posemb
def pos2posemb1d(pos, num_pos_feats=256, temperature=10000):
scale = 2 * math.pi
pos = pos * scale
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos.device)
dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)
pos_x = pos[..., None] / dim_t
posemb = torch.stack((pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=-1).flatten(-2)
return posemb
def mask2pos(mask):
not_mask = ~mask
y_embed = not_mask[:, :, 0].cumsum(1, dtype=torch.float32)
x_embed = not_mask[:, 0, :].cumsum(1, dtype=torch.float32)
y_embed = (y_embed - 0.5) / y_embed[:, -1:]
x_embed = (x_embed - 0.5) / x_embed[:, -1:]
return y_embed, x_embed
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.arange",
"torch.nn.init.constant_",
"torch.ones",
"torch.nn.MultiheadAttention",
"torch.nn.ReLU",
"torch.meshgrid",
"torch.nn.init.uniform_",
"torch.nn.Embedding"
] | 1.5.0 | Honghe/AnchorDETR | fc3d45441241cd689b28878d3aa4b0bffb33a8b8 |
1.3 | import logging
from pathlib import Path
from typing import Any, Optional, Tuple, Union
import gym
import torch
import pickle as pkl
from rltoolkit import config, utils
from rltoolkit.buffer import Memory
from rltoolkit.stats_logger import StatsLogger
from rltoolkit.tensorboard_logger import TensorboardWriter
logger = logging.getLogger(__name__)
class MetaLearner:
def __init__(
self,
env_name: str,
use_gpu: bool,
debug_mode: bool = config.DEBUG_MODE,
tensorboard_dir: Union[str, None] = config.TENSORBOARD_DIR,
tensorboard_comment: str = config.TENSORBOARD_COMMENT,
):
f"""Class with parameters common for RL and other interactions with environment
Args:
env_name (str): Name of the gym environment.
use_gpu (bool): Use CUDA.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
self.env_name = env_name
if use_gpu and torch.cuda.is_available():
self.device = torch.device("cuda")
else:
self.device = torch.device("cpu")
self.env = gym.make(self.env_name)
self.discrete = isinstance(self.env.action_space, gym.spaces.Discrete)
self.ob_dim = self.env.observation_space.shape[0]
if self.discrete:
self.ac_dim = self.env.action_space.n
self.ac_lim = None
else:
self.ac_dim = self.env.action_space.shape[0]
self.ac_lim = torch.tensor(self.env.action_space.high, device=self.device)
self.obs_mean = torch.zeros(self.ob_dim, device=self.device)
self.obs_std = torch.ones(self.ob_dim, device=self.device)
self.iteration = 0 # used in tensorboard
self.opt = torch.optim.Adam
self.loss = {}
self.debug_mode = debug_mode
self.tensorboard_writer = None
self.tensorboard_comment = (
"_" + tensorboard_comment if tensorboard_comment else ""
)
self.tensorboard_dir = tensorboard_dir
def run_tensorboard_if_needed(self):
if self.tensorboard_writer is None and (self.tensorboard_dir is not None):
self.tensorboard_writer = TensorboardWriter(
env_name=self.env_name,
log_dir=self.tensorboard_dir,
filename=self.filename,
render=self.render,
)
def log_obs_mean_std_tensorboard(self):
"""
Log mean and std of observations in the tensorboard.
"""
self.run_tensorboard_if_needed()
self.tensorboard_writer.log_obs_mean_std(
self.iteration, self.obs_mean, self.obs_std
)
def update_obs_mean_std(self, buffer: Memory) -> Memory:
"""
Update running average of mean and stds based on the buffer.
Args:
buffer (Memory)
Returns:
Memory
"""
buffer.update_obs_mean_std()
self.obs_mean = buffer.obs_mean
self.obs_std = buffer.obs_std
if self.debug_mode and self.tensorboard_dir is not None:
self.log_obs_mean_std_tensorboard()
return buffer
class RL(MetaLearner):
def __init__(
self,
env_name: str = config.ENV_NAME,
gamma: float = config.GAMMA,
stats_freq: int = config.STATS_FREQ,
test_episodes: int = config.TEST_EPISODES,
batch_size: int = config.BATCH_SIZE,
iterations: int = config.ITERATIONS,
max_frames: int = None,
return_done: Union[int, None] = config.RETURN_DONE,
log_dir: str = config.LOG_DIR,
use_gpu: bool = config.USE_GPU,
verbose: int = config.VERBOSE,
render: bool = config.RENDER,
*args,
**kwargs,
):
f"""Basic parent class for reinforcement learning algorithms.
Args:
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
batch_size (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.BATCH_SIZE }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training. Defaults to
{ None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
debug_mode (bool, optional): Log additional info.
Defaults to { config.DEBUG_MODE }
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
"""
super().__init__(env_name, use_gpu, *args, **kwargs)
assert iterations > 0, f"Iteration has to be positive not {iterations}"
if max_frames is not None:
assert (
max_frames <= iterations * batch_size
), "max_frames should be smaller or equal than iterations * batch_size"
self.max_frames = max_frames
self.gamma = gamma
self.stats_freq = stats_freq
self.test_episodes = test_episodes
self.batch_size = batch_size
self.iterations = iterations
self.return_done = return_done
if log_dir is not None:
self.log_dir = Path(log_dir)
self.log_dir.mkdir(parents=True, exist_ok=True)
else:
self.log_dir = log_dir
self.verbose = verbose
self.render = render
self.max_ep_len = self.env._max_episode_steps
self.start_time = utils.get_time()
self.hparams = {
"hparams/gamma": self.gamma,
"hparams/batch_size": self.batch_size,
"hparams/type": utils.get_pretty_type_name(self),
}
self.shortnames = config.SHORTNAMES
self.stats_logger = StatsLogger()
def train(self, iterations=None):
f""" Train RL model
Args:
iterations ([type], optional): Number of additional training iterations.
If None performs number of iterations defined in self.iterations.
Otherwise increase global counter by this value to run additional steps.
Defaults to { None }.
"""
self.run_tensorboard_if_needed()
if iterations:
self.iterations += iterations
while self.iteration < self.iterations:
buffer, time_diff = self.perform_iteration()
self.stats_logger.time_list.append(time_diff)
running_return = self.stats_logger.calc_running_return(buffer)
if self.return_done is not None and running_return >= self.return_done:
break
if self.iteration % self.stats_freq == 0:
self.logs_after_iteration(buffer)
if self.log_dir is not None:
self.stats_logger.dump_stats(self.log_path)
self.iteration += 1 # used also for logs
if (
self.max_frames is not None
and self.max_frames < self.stats_logger.frames
):
logger.info(f"Reached max_frames at {self.iteration} iteration") # INFO
break
self.logs_after_iteration(buffer, done=True)
if self.log_dir is not None:
self.save()
def test(self, episodes=None):
f"""Test policy
Args:
episodes (int): Number of episodes. Defaults to { None }.
Returns:
float: mean episode reward
"""
mean_reward = None
return mean_reward
@utils.measure_time
def perform_iteration(self):
raise NotImplementedError
def save_model(self):
raise NotImplementedError
def check_path(self, path):
if self.filename is None and path is None:
raise AttributeError
elif path is None:
path = str(self.log_path) + ".pkl"
return path
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic"] = self.critic.state_dict()
params_dict["obs_mean"] = self.obs_mean
params_dict["obs_std"] = self.obs_std
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic.load_state_dict(params_dict["critic"])
self.obs_mean = params_dict["obs_mean"]
self.obs_std = params_dict["obs_std"]
def save(self, path: str = None):
f"""Save RL object
Args:
path (str): Path to save
"""
path = self.check_path(path)
with open(path, "wb") as f:
params_dict = self.collect_params_dict()
pkl.dump(params_dict, f)
def load(self, path: str):
"""Load RL object
Args:
path (str): Path to saved RL object
"""
path = self.check_path(path)
with open(path, "rb") as f:
params_dict = pkl.load(f)
self.apply_params_dict(params_dict)
@property
def log_iteration(self):
return self.iteration // self.stats_freq
@property
def filename(self):
suffix = self.get_tensorboard_hparams_suffix()
suffix += self.tensorboard_comment
filename = self.start_time + suffix
return filename
@property
def log_path(self):
log_path = Path(self.log_dir)
log_path = log_path / self.filename
return log_path
def logs_after_iteration(self, buffer: Memory, done: bool = False):
f"""Logs writer
Args:
buffer (Memory): Buffer used for tensorboard
done (bool, optional): Finalize tensorboard logging due to last iteration.
Defaults to { False }.
"""
if self.test_episodes is not None:
self.stats_logger.test_return = self.test()
running_return = self.stats_logger.running_return
if self.verbose:
if done:
self.stats_logger.task_done(self.iteration)
else:
self.stats_logger.log_stats(self.iteration)
self.stats_logger.stats.append([self.iteration, running_return])
self.stats_logger.reset_time_list()
if self.tensorboard_writer is not None:
self.add_tensorboard_logs(buffer, done)
def add_tensorboard_logs(self, buffer: Memory, done: bool):
self.tensorboard_writer.log_running_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.running_return,
)
if self.test_episodes:
self.tensorboard_writer.log_test_return(
self.iteration,
self.stats_logger.frames,
self.stats_logger.rollouts,
self.stats_logger.test_return,
)
if (self.log_iteration % 5) == 0 or done:
_, rendering_time = self.tensorboard_writer.record_episode(
self, self.iteration, done
)
self.tensorboard_writer.log_returns(self.iteration, buffer)
self.tensorboard_writer.log_actions(self.iteration, buffer)
self.tensorboard_writer.log_observations(self.iteration, buffer)
self.tensorboard_writer.log_loss(self.iteration, self.loss)
def get_tensorboard_hparams_suffix(self):
suffix = ""
for key, val in self.hparams.items():
if key in self.shortnames.keys():
key = self.shortnames[key]
else:
key = key.split("/")[1]
if isinstance(val, float):
val = f"{val:.2}"
else:
val = str(val)
suffix += f"-{key}{val}"
return suffix
def _get_initial_obs_mean_std(
self, obs_norm: Any
) -> Tuple[Optional[torch.tensor], Optional[torch.tensor]]:
f"""
Check if observations are normalized and if so return initial mean and std,
None otherwise.
Returns:
Tuple[Optional[torch.tensor], Optional[torch.tensor]]: obs mean and std
"""
if obs_norm:
obs_mean = torch.zeros(self.ob_dim, device=self.device)
obs_std = torch.ones(self.ob_dim, device=self.device)
else:
obs_mean = None
obs_std = None
return obs_mean, obs_std
| [
"torch.zeros",
"torch.device",
"torch.ones",
"torch.cuda.is_available",
"torch.tensor"
] | 1.3.1 | raznem/sac_ppo | c18e9bd32a70fcc4bc413565c6b885d7560b8b5a |
1.8 | #Linear Module to use with ZeRO Stage 3 to allow for parameter memory release
#after the module execution during forward
#Instead of saving variables using save_for_backward, we save variable ids
#Allowing us to retrieve the variable without creating pointer to it
#Which allows for underlying tensor to be garbage collected
#When partitioned as needed by the Zero Stage 3 optimizer
#TODO instead of patching Linear module, we could patch the ctx.save_for_backward
#ctx.saved_tensors so that this approach works for all nn modules that are built upon
#torch.nn.function. However the issue is that many modules uses C++ implementations
#which does not have pytorch implementation. Eg torch.addmm which acts as a functional
#when implemented outside of torch.autograd.Function
import math
import torch
from torch import Tensor
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn.modules.module import Module
from deepspeed.runtime.utils import noop_decorator
tensor_map = {}
def print_rank_0(message, debug=False, force=False):
if torch.distributed.get_rank() == 0 and (debug or force):
print(message)
try:
autocast_custom_fwd = torch.cuda.amp.custom_fwd
autocast_custom_bwd = torch.cuda.amp.custom_bwd
except (ImportError, AttributeError) as exp:
autocast_custom_fwd = noop_decorator
autocast_custom_bwd = noop_decorator
class LinearFunctionForZeroStage3(torch.autograd.Function):
# Note that both forward and backward are @staticmethods
@staticmethod
@autocast_custom_fwd
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
#print("In ZeRO Linear Function")
weight_id = id(weight)
bias_id = id(bias)
#ctx.save_for_backward(input, weight, bias)
ctx.save_for_backward(input, torch.tensor(weight_id), torch.tensor(bias_id))
tensor_map[weight_id] = weight
tensor_map[bias_id] = bias
if input.dim() == 2 and bias is not None:
# fused op is marginally faster
ret = torch.addmm(bias, input, weight.t())
else:
output = input.matmul(weight.t())
if bias is not None:
output += bias
ret = output
return ret
# This function has only a single output, so it gets only one gradient
@staticmethod
@autocast_custom_bwd
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
#input, weight, bias = ctx.saved_tensors
input, weight_id, bias_id = ctx.saved_tensors
weight = tensor_map[weight_id.item()]
bias = tensor_map[bias_id.item()]
grad_input = grad_weight = grad_bias = None
#print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}")
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
#print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}")
grad_input = grad_output.matmul(weight)
#print(f"Computed grad input {grad_input.shape}")
if ctx.needs_input_grad[1]:
#print("Computing grad weight")
dim = grad_output.dim()
if dim > 2:
grad_weight = grad_output.reshape(-1,
grad_output.shape[-1]).t().matmul(
input.reshape(-1,
input.shape[-1]))
else:
grad_weight = grad_output.t().matmul(input)
#print(f"Computed grad weight grad_weight {grad_weight.shape}")
if bias is not None and ctx.needs_input_grad[2]:
#print("Computing grad bias")
grad_bias = grad_output.sum(0)
#print("Done computing grad bias")
#print("needs bias")
#print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}")
return grad_input, grad_weight, grad_bias
def zero3_linear_wrap(input, weight, bias=None):
if bias is None:
return LinearFunctionForZeroStage3.apply(input, weight)
else:
return LinearFunctionForZeroStage3.apply(input, weight, bias)
class LinearModuleForZeroStage3(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
The weights are pre-transposed and stored as A^T instead of transposing during each
forward. Memory savings proportional to the parameter size.
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
Attributes:
weight: the learnable weights of the module of shape
:math:`(\text{out\_features}, \text{in\_features})`. The values are
initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
:math:`k = \frac{1}{\text{in\_features}}`
bias: the learnable bias of the module of shape :math:`(\text{out\_features})`.
If :attr:`bias` is ``True``, the values are initialized from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{1}{\text{in\_features}}`
Examples::
>>> m = nn.Linear(20, 30)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 30])
"""
__constants__ = ['in_features', 'out_features']
in_features: int
out_features: int
weight: Tensor
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super(LinearModuleForZeroStage3, self).__init__()
print("Building ZeRO module")
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, input: Tensor) -> Tensor:
return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias)
def extra_repr(self) -> str:
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features,
self.out_features,
self.bias is not None)
| [
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.tensor",
"torch.nn.init.uniform_",
"torch.distributed.get_rank",
"torch.Tensor"
] | 1.8 | manuelciosici/DeepSpeed | 3da841853ca07abf3a09e7bd325a576c4e642c11 |
1.8 | import copy
import torch
import deepspeed
import deepspeed.ops.transformer as transformer_inference
from .replace_policy import HFBertLayerPolicy, HFGPT2LayerPolicy, HFGPTJLayerPolicy
from .replace_policy import replace_policies
from ..constants import INFERENCE_GENERIC_MODE, INFERENCE_SPECIALIZED_MODE
from ..runtime.weight_quantizer import WeightQuantization
from torch import nn
class LinearAllreduce(nn.Module):
def __init__(self, weight, bias=None, mp_group=None):
super(LinearAllreduce, self).__init__()
self.weight = weight
self.bias = bias
self.mp_group = mp_group
def forward(self, input):
output = torch.matmul(input, self.weight)
if self.mp_group is not None:
torch.distributed.all_reduce(output, group=self.mp_group)
if self.bias is not None:
output += self.bias
return output
class LinearLayer(nn.Module):
def __init__(self, weight, bias=None):
super(LinearLayer, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, input):
output = torch.matmul(input, self.weight)
if self.bias is not None:
output += self.bias
return output
class ReplaceWithTensorSlicing:
def __init__(self, mp_group=None):
if mp_group is not None:
self.gpu_index = torch.distributed.get_rank(group=mp_group)
else:
self.gpu_index = 0
def merge_assert(self, dim1, dim2):
assert dim1 > dim2, \
'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
for merging your checkpoints before replacing the transformer layer with\
inference-kernels'
def qkv_copy(self, dst, src):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
src_split = torch.split(src.data, src.shape[-1] // 3, dim=-1)
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[1] == dst_shape[1]:
return torch.nn.Parameter(src)
self.merge_assert(src_shape[1], dst_shape[1])
qkv_size = dst_shape[1] // 3
qkv_split = [torch.split(src_s, qkv_size, dim=1) for src_s in src_split]
weight_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split],
axis=1) for i in range(len(qkv_split[0]))
]
dst.data.copy_(weight_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.Parameter(src)
qkv_size = dst_shape[0] // 3
qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
bias_split = [
torch.cat([qkv_s[i] for qkv_s in qkv_split],
axis=0) for i in range(len(qkv_split[0]))
]
dst.data.copy_(bias_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
return torch.nn.Parameter(dst)
def copy(self, dst, src):
if src is None:
return src
src_shape = src.shape
dst_shape = dst.shape
if (len(src_shape) == 2 and len(dst_shape) == 2):
if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]:
return torch.nn.Parameter(src)
if src_shape[0] != dst_shape[0]:
self.merge_assert(src_shape[0], dst_shape[0])
weight_split = torch.split(src, dst_shape[0])
else:
self.merge_assert(src_shape[1], dst_shape[1])
weight_split = torch.split(src.data, dst_shape[1], dim=1)
dst.data.copy_(weight_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
else:
if src_shape[0] == dst_shape[0]:
return torch.nn.Parameter(src)
bias_split = torch.split(src.data, dst_shape[-1])
dst.data.copy_(bias_split[self.gpu_index].to(
torch.cuda.current_device()).contiguous())
return torch.nn.Parameter(dst)
def replace_transformer_layer(orig_layer_impl,
model,
policy=None,
micro_batch_size=-1,
config=None,
seed=-1,
hidden_size=-1,
num_attention_heads=-1,
mp_size=1,
training_mp_size=1,
mp_group=None,
ep_group=None,
expert_mp_group=None,
preln=True,
fp16=True,
local_rank=-1,
stochastic_mode=True,
training=True,
quantize=False,
quantize_settings=None,
triangular_masking=False,
return_tuple=True,
replace_with_kernel_inject=False,
linear_layer_setting=None,
moe=False,
moe_experts=1,
moe_type='standard'):
""" Replace bert-style transformer layers with DeepSpeed's transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
policy: shows the policy for mapping from the orig_layer_impl to transformer parameters when
replace_with_kernel_inject is set, otherwise, it provides the names of two linear layers as
a tuple: (attention_output projection, transformer output projection)
micro_batch_size (int): micro batch size per gpu used during training/eval
config (dict): model config containing hidden size, attention heads, etc.
seed (int): random seed value
max_seq_length (int): max sequence length for training
hidden_size (int): hidden dimension
num_attention_heads (int): number of attention heads
mp_size (int): model_parallelism degree
mp_group : model_parallel group initialized on the modeling side
preln (bool): does the original layer implementation do pre or post layer norm?
fp16 (bool): fp16 or fp32
local_rank (int): GPU rank (optional),
stochastic_mode (bool): whether to use stochastic mode
training (bool): specifying whether kernel-injection is done for training/inference (set to false for inference-mode injection)
quantize_settings (tuple): this setting shows how we can quantize a model for running it through the inference kernels.
It includes (quantization_scales, merge_count, mlp_extra_grouping, quantize_groups).
return_tuple (bool): if set, transformer layer returns a tuple as the output.
Note: this flag needs to be set for huggingface models.
replace_with_kernel_inject (bool): injection_mode, if true, kernels will be add along with configuring
Tensor-Parallelism
linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers
and embedding layers
attention_params: (list of strings) [Optional]: shows the parameters in the attention part that needs to
be adjusted based on the model-parallelism
Returns:
Updated nn.module with replaced transformer layers
"""
def replace_with_policy(child,
policy_cls,
triangular_masking,
inference=False,
preln=True,
layer_id=0):
preln = False if policy_cls is HFBertLayerPolicy else preln
if policy_cls is HFBertLayerPolicy:
policy = policy_cls(child, inference=inference, preln=preln)
else:
policy = policy_cls(child, inference=inference)
if inference:
hidden_size, num_attention_heads = policy.get_hidden_heads()
assert num_attention_heads % mp_size == 0,\
"To run the model parallel across the GPUs, the attention_heads require to be divisible by the world_size!" +\
"This is because the attention computation is partitioned evenly among the parallel GPUs."
from deepspeed.moe.layer import MoE
moe = False
if hasattr(child, 'mlp') and isinstance(child.mlp, MoE):
num_experts = child.mlp.num_experts
moe = True
attn_linear_layer, qkvw, qkvb, dense_w, dense_b, scale_attention, megatron_v2 = policy.attention()
if not moe or moe_type == 'standard':
mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b = policy.mlp()
else:
mlp_linear_layer, _h4h_w, _h4h_b, _4hh_w, _4hh_b, \
_res_h4h_w, _res_h4h_b, _res_4hh_w, _res_4hh_b, _res_coef = policy.mlp(moe_type)
attn_nw, attn_nb, input_nw, input_nb = policy.layerNorm()
if quantize:
if policy_cls is not HFBertLayerPolicy:
qkvw = qkvw.to(torch.int8)
dense_w = dense_w.to(torch.int8)
_h4h_w = [moe_w1.to(torch.int8)
for moe_w1 in _h4h_w] if moe else _h4h_w.to(torch.int8)
_4hh_w = [moe_w1.to(torch.int8)
for moe_w1 in _4hh_w] if moe else _4hh_w.to(torch.int8)
elif fp16:
qkvw = qkvw.half()
dense_w = dense_w.half()
_h4h_w = [moe_w1.half() for moe_w1 in _h4h_w] if moe else _h4h_w.half()
_4hh_w = [moe_w1.half() for moe_w1 in _4hh_w] if moe else _4hh_w.half()
if quantize or fp16:
qkvb = qkvb if qkvb is None else qkvb.half()
dense_b = dense_b if dense_b is None else dense_b.half()
_h4h_b = [moe_b1.half() for moe_b1 in _h4h_b] if moe else _h4h_b.half()
_4hh_b = [moe_b1.half() for moe_b1 in _4hh_b] if moe else _4hh_b.half()
attn_nw = attn_nw if attn_nw is None else attn_nw.half()
attn_nb = attn_nb if attn_nb is None else attn_nb.half()
input_nw = input_nw.half()
input_nb = input_nb.half()
if moe and moe_type == 'residual' and fp16:
_res_h4h_b = _res_h4h_b.half()
_res_4hh_b = _res_4hh_b.half()
_res_h4h_w = _res_h4h_w.half()
_res_4hh_w = _res_4hh_w.half()
_res_coef = _res_coef.half()
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
#expert_mp_replace = ReplaceWithTensorSlicing(mp_group=expert_mp_group)
if inference:
if moe:
ep_world_size = torch.distributed.get_world_size()
local_ep_size = 1 if num_experts < ep_world_size else num_experts // ep_world_size
transformer_config = transformer_inference.DeepSpeedMoEInferenceConfig(
hidden_size=hidden_size,
heads=num_attention_heads,
layer_norm_eps=config.layer_norm_eps if hasattr(
config,
'layer_norm_eps') else 1e-12,
fp16=fp16,
pre_layer_norm=preln,
mp_size=mp_size,
q_int8=quantize,
moe_experts=local_ep_size,
global_experts=num_experts,
mlp_type=moe_type)
else:
rotary_dim = config.rotary_dim if hasattr(config, 'rotary_dim') else child.attention.rotary_ndims \
if hasattr(child, 'attention') and hasattr(child.attention,'rotary_ndims') else -1
transformer_config = transformer_inference.DeepSpeedInferenceConfig(
hidden_size=hidden_size,
heads=num_attention_heads,
layer_norm_eps=config.layer_norm_eps if hasattr(
config,
'layer_norm_eps') else
(config.layer_norm_epsilon
if hasattr(config,
'layer_norm_epsilon') else config.layernorm_epsilon
if hasattr(config,
'layernorm_epsilon') else 1.0e-12),
fp16=fp16,
pre_layer_norm=preln,
mp_size=mp_size,
q_int8=quantize,
return_tuple=(return_tuple or (policy_cls is HFBertLayerPolicy)),
triangular_masking=(policy_cls is not HFBertLayerPolicy),
local_attention=((config.attention_layers[layer_id] == "local")
if hasattr(config,
'attention_layers') else False),
window_size=(config.window_size if hasattr(config,
'window_size') else 1),
rotary_dim=rotary_dim,
mlp_after_attn=(rotary_dim is None or rotary_dim < 0),
training_mp_size=training_mp_size)
if quantize and quantize_settings is not None:
(quantization_scales,
merge_count,
mlp_extra_grouping,
quantize_groups) = quantize_settings
if moe:
new_module = transformer_inference.DeepSpeedMoEInference(
transformer_config,
mp_group=mp_group,
ep_group=None if ep_group is None else ep_group[num_experts],
expert_mp_group=None
if expert_mp_group is None else expert_mp_group[num_experts],
quantize_scales=quantization_scales[layer_id],
quantize_groups=quantize_groups,
merge_count=merge_count,
mlp_extra_grouping=mlp_extra_grouping,
qkv_merging=(policy_cls is HFBertLayerPolicy))
else:
new_module = transformer_inference.DeepSpeedTransformerInference(
transformer_config,
mp_group=mp_group,
quantize_scales=quantization_scales[layer_id],
quantize_groups=quantize_groups,
merge_count=merge_count,
mlp_extra_grouping=mlp_extra_grouping,
qkv_merging=(policy_cls is HFBertLayerPolicy))
if quantize and qkvw.dtype != torch.int8:
quantize_bits = 8
quantizer = WeightQuantization()
if policy_cls is HFBertLayerPolicy:
data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups * 3)
else:
data_quantized, _ = quantizer.quantize_data(qkvw.data, quantize_bits, quantize_groups)
qkvw.data.copy_(data_quantized)
qkvw.data = qkvw.data.to(torch.int8)
else:
if moe:
new_module = transformer_inference.DeepSpeedMoEInference(
transformer_config,
mp_group=mp_group,
ep_group=None if ep_group is None else ep_group[num_experts],
expert_mp_group=None
if expert_mp_group is None else expert_mp_group[num_experts],
)
else:
new_module = transformer_inference.DeepSpeedTransformerInference(
transformer_config,
mp_group=mp_group,
)
new_module.config.scale_attention = scale_attention
# we want the weights in [input, output] shape
# linear layer is created with [input, output] shape
# transpose it here to reduce inference cost!
def transpose(data):
data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1))
data = data.reshape(data.shape[-1], data.shape[-2])
return data
if attn_linear_layer:
qkvw.data = transpose(qkvw.data)
dense_w.data = transpose(dense_w.data)
if megatron_v2:
new_module.config.rotate_half = True
new_module.config.rotate_every_two = False
def _transpose(x):
num_attention_heads_per_partition = transformer_config.heads // transformer_config.mp_size
attention_head_size = x.shape[-1] // num_attention_heads_per_partition
new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition,
attention_head_size)
x_1 = x.view(*new_x_shape)
(q,
k,
v) = torch.split(x_1,
(x_1.shape[-1] // 3),
dim=(x_1.dim() - 1))
if len(q.shape) > 2:
return torch.cat((q.reshape(q.shape[0],
-1),
k.reshape(q.shape[0],
-1),
v.reshape(q.shape[0],
-1)),
dim=-1).reshape(x.shape)
else:
return torch.cat((q.reshape(-1),
k.reshape(-1),
v.reshape(-1)),
dim=-1).reshape(x.shape)
qkvw = torch.nn.Parameter(_transpose(qkvw).contiguous())
qkvb = torch.nn.Parameter(_transpose(qkvb).contiguous())
dense_b = dense_b if dense_b is None else dense_b * (
transformer_config.training_mp_size / transformer_config.mp_size)
_4hh_b = _4hh_b * (transformer_config.training_mp_size /
transformer_config.mp_size)
if mlp_linear_layer:
_h4h_w = [transpose(moe_w1.data)
for moe_w1 in _h4h_w] if moe else transpose(_h4h_w.data)
_4hh_w = [transpose(moe_w1.data)
for moe_w1 in _4hh_w] if moe else transpose(_4hh_w.data)
if moe and moe_type == 'residual':
_res_h4h_w.data = transpose(_res_h4h_w.data)
_res_4hh_w.data = transpose(_res_4hh_w.data)
_res_coef.data = transpose(_res_coef.data)
attn_block = new_module.attention
attn_block.attn_qkvw = mp_replace.qkv_copy(attn_block.attn_qkvw, qkvw)
attn_block.attn_qkvb = mp_replace.qkv_copy(attn_block.attn_qkvb, qkvb)
attn_block.attn_ow = mp_replace.copy(attn_block.attn_ow, dense_w)
attn_block.attn_ob = mp_replace.copy(attn_block.attn_ob, dense_b)
mpl_block = new_module.mlp
if moe:
gpu_index = torch.distributed.get_rank()
gpu_index = 0
for ep_index in range(local_ep_size):
mpl_block[ep_index].inter_w.data = _h4h_w[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].inter_b.data = _h4h_b[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].output_w.data = _4hh_w[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
mpl_block[ep_index].output_b.data = _4hh_b[
gpu_index * local_ep_size + ep_index].to(
torch.cuda.current_device())
new_module.attn_nw.data = attn_nw.to(torch.cuda.current_device())
new_module.attn_nb.data = attn_nb.to(torch.cuda.current_device())
if moe_type == 'residual':
new_module.res_mlp.inter_w.data = _res_h4h_w.to(
torch.cuda.current_device())
new_module.res_mlp.inter_b.data = _res_h4h_b.to(
torch.cuda.current_device())
new_module.res_mlp.output_w.data = _res_4hh_w.to(
torch.cuda.current_device())
new_module.res_mlp.output_b.data = _res_4hh_b.to(
torch.cuda.current_device())
new_module.res_coef.data = _res_coef.to(torch.cuda.current_device())
else:
mpl_block.inter_w.data = mp_replace.copy(mpl_block.inter_w, _h4h_w)
mpl_block.inter_b.data = mp_replace.copy(mpl_block.inter_b, _h4h_b)
mpl_block.output_w.data = mp_replace.copy(mpl_block.output_w, _4hh_w)
mpl_block.output_b.data = mp_replace.copy(mpl_block.output_b, _4hh_b)
if attn_nw is None:
new_module.mlp.attn_nw = attn_nw
else:
new_module.mlp.attn_nw.data = attn_nw.to(torch.cuda.current_device())
if attn_nb is None:
new_module.mlp.attn_nb = attn_nb
else:
new_module.mlp.attn_nb.data = attn_nb.to(torch.cuda.current_device())
new_module.norm_w.data = input_nw.to(torch.cuda.current_device())
new_module.norm_b.data = input_nb.to(torch.cuda.current_device())
else:
transformer_config = deepspeed.DeepSpeedTransformerConfig(
batch_size=micro_batch_size,
hidden_size=config.hidden_size,
heads=config.num_attention_heads,
attn_dropout_ratio=config.attention_probs_dropout_prob,
hidden_dropout_ratio=config.hidden_dropout_prob,
num_hidden_layers=config.num_hidden_layers,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps if hasattr(
config,
'layer_norm_eps') else 1e-12,
seed=seed,
fp16=fp16,
pre_layer_norm=(False if policy_cls is HFBertLayerPolicy else preln),
return_tuple=return_tuple,
local_rank=local_rank,
stochastic_mode=stochastic_mode,
normalize_invertible=True,
training=training)
new_module = deepspeed.DeepSpeedTransformerLayer(transformer_config)
new_module.attn_qkvw.data = qkvw
new_module.attn_qkvb.data = qkvb
new_module.attn_ow.data = dense_w
new_module.attn_ob.data = dense_b
new_module.attn_nw.data = attn_nw
new_module.attn_nb.data = attn_nb
new_module.norm_w.data = input_nw
new_module.norm_b.data = input_nb
new_module.inter_w.data = _h4h_w
new_module.inter_b.data = _h4h_b
new_module.output_w.data = _4hh_w
new_module.output_b.data = _4hh_b
return new_module
def replace_wo_policy(module, all_reduce_linears):
def _replace(child, name, conv_linear_layer):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
if name in all_reduce_linears:
new_weight = torch.empty(
(child.weight.shape[0]
if conv_linear_layer else child.weight.shape[1] // mp_size,
child.weight.shape[1]
if conv_linear_layer else child.weight.shape[0]),
device=child.weight.device,
dtype=torch.half if fp16 else torch.float)
if not conv_linear_layer:
child.weight.data.view(-1).copy_(
child.weight.data.transpose(-1,
-2).contiguous().view(-1))
child.weight.data = child.weight.data.reshape(
child.weight.data.shape[-1],
child.weight.data.shape[-2])
data = mp_replace.copy(new_weight,
child.weight.data).to(torch.cuda.current_device())
return LinearAllreduce(data, child.bias if child.bias is None else \
child.bias.to(torch.cuda.current_device()), mp_group)
else:
new_weight = torch.empty(
(child.weight.shape[0] //
mp_size if conv_linear_layer else child.weight.shape[1],
child.weight.shape[1]
if conv_linear_layer else child.weight.shape[0] // mp_size),
device=child.weight.device,
dtype=torch.half if fp16 else torch.float)
if not conv_linear_layer:
child.weight.data.view(-1).copy_(
child.weight.data.transpose(-1,
-2).contiguous().view(-1))
child.weight.data = child.weight.data.reshape(
child.weight.data.shape[-1],
child.weight.data.shape[-2])
data = mp_replace.copy(new_weight, child.weight.data)
new_bias = torch.empty((child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=torch.half if fp16 else torch.float)
bias_data = None if child.bias is None else mp_replace.copy(
new_bias,
child.bias.data).to(torch.cuda.current_device())
return LinearLayer(data.to(torch.cuda.current_device()), bias_data)
def _slice_embedding(child, name, conv_linear_layer):
mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
new_weight = torch.empty((child.weight.shape[0],
child.weight.shape[1] // mp_size),
device=child.weight.device,
dtype=child.weight.dtype)
data = mp_replace.copy(new_weight, child.weight.data)
new_embedding = nn.Embedding(child.weight.shape[0],
child.weight.shape[1] // mp_size)
new_embedding.weight.data.copy_(data)
return new_embedding
def update_mp_params(child):
if hasattr(child, 'n_heads'):
child.n_heads = child.n_heads // mp_size
if hasattr(child, 'inner_dim'):
child.inner_dim = child.inner_dim // mp_size
if hasattr(child, 'num_heads'):
child.num_heads = child.num_heads // mp_size
if hasattr(child, 'num_attention_heads'):
child.num_attention_heads = child.num_attention_heads // mp_size
if hasattr(child, 'all_head_size'):
child.all_head_size = child.all_head_size // mp_size
if hasattr(child, 'embed_dim'):
child.embed_dim = child.embed_dim // mp_size
conv_linear_layer = False
if linear_layer_setting is not None:
linear_policies = {linear_layer_setting[0]: _replace}
if len(linear_layer_setting) == 2:
linear_policies.update({linear_layer_setting[1]: _slice_embedding})
else:
if orig_layer_impl is HFGPT2LayerPolicy._orig_layer_class:
try:
import transformers
conv_linear_layer = True
linear_policies = {transformers.model_utils.Conv1D: _replace}
except ImportError:
linear_policies = {nn.Linear: _replace}
else:
linear_policies = {nn.Linear: _replace, nn.Embedding: _slice_embedding}
def _replace_module(r_module, prev_name=''):
for name, child in r_module.named_children():
if child.__class__ in linear_policies:
setattr(
r_module,
name,
linear_policies[child.__class__](child,
prev_name + '.' + name,
conv_linear_layer))
else:
update_mp_params(child)
_replace_module(child, name)
return r_module
return _replace_module(module)
def replace_fn(child, _policy, layer_id=0):
if training:
# copy relevant state from child -> new module
new_module = replace_with_policy(child,
_policy,
triangular_masking,
preln=preln)
else:
# copy relevant state from child -> new module
if replace_with_kernel_inject:
new_module = replace_with_policy(child,
_policy,
triangular_masking,
inference=True,
preln=(_policy
is not HFBertLayerPolicy),
layer_id=layer_id)
else:
new_module = replace_wo_policy(child, _policy)
return new_module
return replace_module(model=model,
orig_class=orig_layer_impl,
replace_fn=replace_fn,
_replace_policy=policy)
def revert_transformer_layer(orig_layer_impl, model, config, preln=False):
""" Revert DeepSpeed's transformer layer back to original bert-style transformer layer
Arguments:
orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced,
e.g., transformers.modeling_bert.BertLayer.
model (torch.nn.Module): user's nn.module representing their model
config (dict): model config containing hidden size, attention heads, etc.
Returns:
Updated nn.module with original bert-style transformer layers
"""
def replace_fn(child, _replace_policy, layer_id):
#from turing.nvidia_modelingpreln import BertLayer
orig_module = orig_layer_impl(config)
# copy relevant state from child -> original module
qkvw = child.attn_qkvw.data
qkvb = child.attn_qkvb.data
qw, kw, vw = torch.chunk(qkvw, 3, axis=0)
qb, kb, vb = torch.chunk(qkvb, 3, axis=0)
orig_module.attention.self.query.weight.data = qw
orig_module.attention.self.query.bias.data = qb
orig_module.attention.self.key.weight.data = kw
orig_module.attention.self.key.bias.data = kb
orig_module.attention.self.value.weight.data = vw
orig_module.attention.self.value.bias.data = vb
orig_module.attention.output.dense.weight.data = child.attn_ow.data
orig_module.attention.output.dense.bias.data = child.attn_ob.data
attn_ln_w = child.attn_nw.data
attn_ln_b = child.attn_nb.data
if preln:
orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w
orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b
else:
orig_module.attention.output.LayerNorm.weight.data = attn_ln_w
orig_module.attention.output.LayerNorm.bias.data = attn_ln_b
inter_ff_w = child.inter_w.data
inter_ff_b = child.inter_b.data
if preln:
orig_module.intermediate.dense_act.weight.data = inter_ff_w
orig_module.intermediate.dense_act.bias.data = inter_ff_b
else:
orig_module.intermediate.dense.weight.data = inter_ff_w
orig_module.intermediate.dense.bias.data = inter_ff_b
orig_module.output.dense.weight.data = child.output_w.data
orig_module.output.dense.bias.data = child.output_b.data
transformer_ln_w = child.norm_w.data
transformer_ln_b = child.norm_b.data
if preln:
orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w
orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b
else:
orig_module.output.LayerNorm.weight.data = transformer_ln_w
orig_module.output.LayerNorm.bias.data = transformer_ln_b
return orig_module
return replace_module(model=model,
orig_class=deepspeed.DeepSpeedTransformerLayer,
replace_fn=replace_fn,
_replace_policy=None)
def replace_module(model, orig_class, replace_fn, _replace_policy):
""" Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``.
Arguments:
model (torch.nn.Module): the model to augment
orig_class (torch.nn.Module): the module to search for
replace_fn (method): a method to convert instances of ``orig_class`` to the
desired type and return a new instance.
Returns:
A modified ``model``.
"""
policy = {}
if orig_class is not None:
policy.update({orig_class: (replace_fn, _replace_policy)})
else:
for plcy in replace_policies:
# instantiate a throw-away policy in order to populate the _orig_layer_class
_ = plcy(None)
if isinstance(plcy._orig_layer_class, list):
for orig_layer_class in plcy._orig_layer_class:
policy.update({orig_layer_class: (replace_fn, plcy)})
elif plcy._orig_layer_class is not None:
policy.update({plcy._orig_layer_class: (replace_fn, plcy)})
assert len(policy.items()) > 0,\
"No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\
"You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py"
replaced_module, _ = _replace_module(model, policy)
return replaced_module
from ..pipe import PipelineModule
def _replace_module(model, policies, layer_id=0):
""" Traverse model's children recursively and apply any transformations in ``policies``.
Arguments:
model (torch.nn.Module): model to augment
policies (dict): Mapping of source class to replacement function.
Returns:
Modified ``model``.
"""
for name, child in model.named_children():
if child.__class__ in policies:
replaced_module = policies[child.__class__][0](child,
policies[child.__class__][-1],
layer_id)
setattr(model, name, replaced_module)
if isinstance(model, PipelineModule):
assert hasattr(model, 'forward_funcs'),\
"we require pipe-module to have the list of fwd_functions"
model.forward_funcs[model.fwd_map[name]] = replaced_module
layer_id += 1
else:
_, layer_id = _replace_module(child, policies, layer_id=layer_id)
return model, layer_id
| [
"torch.distributed.get_world_size",
"torch.cat",
"torch.split",
"torch.nn.Parameter",
"torch.cuda.current_device",
"torch.distributed.all_reduce",
"torch.distributed.get_rank",
"torch.empty",
"torch.matmul",
"torch.nn.Embedding",
"torch.chunk"
] | 1.8 | manuelciosici/DeepSpeed | 3da841853ca07abf3a09e7bd325a576c4e642c11 |
1.1 | from __future__ import absolute_import, division, print_function
import torch
from pyro.distributions.torch import RelaxedOneHotCategorical, RelaxedBernoulli
from pyro.distributions.util import copy_docs_from
from torch.distributions.utils import clamp_probs
@copy_docs_from(RelaxedOneHotCategorical)
class RelaxedOneHotCategoricalStraightThrough(RelaxedOneHotCategorical):
"""
An implementation of
:class:`~torch.distributions.relaxed_categorical.RelaxedOneHotCategorical`
with a straight-through gradient estimator.
This distribution has the following properties:
- The samples returned by the :meth:`rsample` method are discrete/quantized.
- The :meth:`log_prob` method returns the log probability of the
relaxed/unquantized sample using the GumbelSoftmax distribution.
- In the backward pass the gradient of the sample with respect to the
parameters of the distribution uses the relaxed/unquantized sample.
References:
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,
Chris J. Maddison, Andriy Mnih, Yee Whye Teh
[2] Categorical Reparameterization with Gumbel-Softmax,
Eric Jang, Shixiang Gu, Ben Poole
"""
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedOneHotCategoricalStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeCategorical.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedOneHotCategoricalStraightThrough, self).log_prob(value)
class QuantizeCategorical(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
argmax = soft_value.max(-1)[1]
hard_value = torch.zeros_like(soft_value)
hard_value._unquantize = soft_value
if argmax.dim() < hard_value.dim():
argmax = argmax.unsqueeze(-1)
return hard_value.scatter_(-1, argmax, 1)
@staticmethod
def backward(ctx, grad):
return grad
@copy_docs_from(RelaxedBernoulli)
class RelaxedBernoulliStraightThrough(RelaxedBernoulli):
"""
An implementation of
:class:`~torch.distributions.relaxed_bernoulli.RelaxedBernoulli`
with a straight-through gradient estimator.
This distribution has the following properties:
- The samples returned by the :meth:`rsample` method are discrete/quantized.
- The :meth:`log_prob` method returns the log probability of the
relaxed/unquantized sample using the GumbelSoftmax distribution.
- In the backward pass the gradient of the sample with respect to the
parameters of the distribution uses the relaxed/unquantized sample.
References:
[1] The Concrete Distribution: A Continuous Relaxation of Discrete Random Variables,
Chris J. Maddison, Andriy Mnih, Yee Whye Teh
[2] Categorical Reparameterization with Gumbel-Softmax,
Eric Jang, Shixiang Gu, Ben Poole
"""
def rsample(self, sample_shape=torch.Size()):
soft_sample = super(RelaxedBernoulliStraightThrough, self).rsample(sample_shape)
soft_sample = clamp_probs(soft_sample)
hard_sample = QuantizeBernoulli.apply(soft_sample)
return hard_sample
def log_prob(self, value):
value = getattr(value, '_unquantize', value)
return super(RelaxedBernoulliStraightThrough, self).log_prob(value)
class QuantizeBernoulli(torch.autograd.Function):
@staticmethod
def forward(ctx, soft_value):
hard_value = soft_value.round()
hard_value._unquantize = soft_value
return hard_value
@staticmethod
def backward(ctx, grad):
return grad
| [
"torch.Size",
"torch.distributions.utils.clamp_probs",
"torch.zeros_like"
] | 1.1.0 | ruohoruotsi/pyro | b54a4b42b9474eb3ecee11505e45fde85b1cdc54 |
1.1 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from . import box_utils
from . import center_utils
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
class SigmoidFocalClassificationLoss(nn.Module):
"""
Sigmoid focal cross entropy loss.
"""
def __init__(self, gamma: float = 2.0, alpha: float = 0.25):
"""
Args:
gamma: Weighting parameter to balance loss for hard and easy examples.
alpha: Weighting parameter to balance loss for positive and negative examples.
"""
super(SigmoidFocalClassificationLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
@staticmethod
def sigmoid_cross_entropy_with_logits(input: torch.Tensor, target: torch.Tensor):
""" PyTorch Implementation for tf.nn.sigmoid_cross_entropy_with_logits:
max(x, 0) - x * z + log(1 + exp(-abs(x))) in
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
Returns:
loss: (B, #anchors, #classes) float tensor.
Sigmoid cross entropy loss without reduction
"""
loss = torch.clamp(input, min=0) - input * target + \
torch.log1p(torch.exp(-torch.abs(input)))
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predicted logits for each class
target: (B, #anchors, #classes) float tensor.
One-hot encoded classification targets
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
weighted_loss: (B, #anchors, #classes) float tensor after weighting.
"""
pred_sigmoid = torch.sigmoid(input)
alpha_weight = target * self.alpha + (1 - target) * (1 - self.alpha)
pt = target * (1.0 - pred_sigmoid) + (1.0 - target) * pred_sigmoid
focal_weight = alpha_weight * torch.pow(pt, self.gamma)
bce_loss = self.sigmoid_cross_entropy_with_logits(input, target)
loss = focal_weight * bce_loss
if weights.shape.__len__() == 2 or \
(weights.shape.__len__() == 1 and target.shape.__len__() == 2):
weights = weights.unsqueeze(-1)
assert weights.shape.__len__() == loss.shape.__len__()
return loss * weights
class WeightedSmoothL1Loss(nn.Module):
"""
Code-wise Weighted Smooth L1 Loss modified based on fvcore.nn.smooth_l1_loss
https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/smooth_l1_loss.py
| 0.5 * x ** 2 / beta if abs(x) < beta
smoothl1(x) = |
| abs(x) - 0.5 * beta otherwise,
where x = input - target.
"""
def __init__(self, beta: float = 1.0 / 9.0, code_weights: list = None):
"""
Args:
beta: Scalar float.
L1 to L2 change point.
For beta values < 1e-5, L1 loss is computed.
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedSmoothL1Loss, self).__init__()
self.beta = beta
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
@staticmethod
def smooth_l1_loss(diff, beta):
if beta < 1e-5:
loss = torch.abs(diff)
else:
n = torch.abs(diff)
loss = torch.where(n < beta, 0.5 * n ** 2 / beta, n - 0.5 * beta)
return loss
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = self.smooth_l1_loss(diff, self.beta)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedL1Loss(nn.Module):
def __init__(self, code_weights: list = None):
"""
Args:
code_weights: (#codes) float list if not None.
Code-wise weights.
"""
super(WeightedL1Loss, self).__init__()
if code_weights is not None:
self.code_weights = np.array(code_weights, dtype=np.float32)
self.code_weights = torch.from_numpy(self.code_weights).cuda()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor = None):
"""
Args:
input: (B, #anchors, #codes) float tensor.
Ecoded predicted locations of objects.
target: (B, #anchors, #codes) float tensor.
Regression targets.
weights: (B, #anchors) float tensor if not None.
Returns:
loss: (B, #anchors) float tensor.
Weighted smooth l1 loss without reduction.
"""
target = torch.where(torch.isnan(target), input, target) # ignore nan targets
diff = input - target
# code-wise weighting
if self.code_weights is not None:
diff = diff * self.code_weights.view(1, 1, -1)
loss = torch.abs(diff)
# anchor-wise weighting
if weights is not None:
assert weights.shape[0] == loss.shape[0] and weights.shape[1] == loss.shape[1]
loss = loss * weights.unsqueeze(-1)
return loss
class WeightedCrossEntropyLoss(nn.Module):
"""
Transform input to fit the fomation of PyTorch offical cross entropy loss
with anchor-wise weighting.
"""
def __init__(self):
super(WeightedCrossEntropyLoss, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor, weights: torch.Tensor):
"""
Args:
input: (B, #anchors, #classes) float tensor.
Predited logits for each class.
target: (B, #anchors, #classes) float tensor.
One-hot classification targets.
weights: (B, #anchors) float tensor.
Anchor-wise weights.
Returns:
loss: (B, #anchors) float tensor.
Weighted cross entropy loss without reduction
"""
input = input.permute(0, 2, 1)
target = target.argmax(dim=-1)
loss = F.cross_entropy(input, target, reduction='none') * weights
return loss
def get_corner_loss_lidar(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (N) float Tensor.
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
# 这里flip的目的应该是忽略朝向,但实际上呢把朝向也纳入整体更好还是说它会造成不稳定呢?
gt_bbox3d_flip = gt_bbox3d.clone()
gt_bbox3d_flip[:, 6] += np.pi
gt_box_corners_flip = box_utils.boxes_to_corners_3d(gt_bbox3d_flip)
# (N, 8)
corner_dist = torch.min(torch.norm(pred_box_corners - gt_box_corners, dim=2),
torch.norm(pred_box_corners - gt_box_corners_flip, dim=2))
# (N, 8)
corner_loss = WeightedSmoothL1Loss.smooth_l1_loss(corner_dist, beta=1.0)
return corner_loss.mean(dim=1)
def get_corner_loss_mse(pred_bbox3d: torch.Tensor, gt_bbox3d: torch.Tensor):
"""
Args:
pred_bbox3d: (N, 7) float Tensor.
gt_bbox3d: (N, 7) float Tensor.
Returns:
corner_loss: (1,) float scaler
"""
assert pred_bbox3d.shape[0] == gt_bbox3d.shape[0]
# (N, 8, 3)
pred_box_corners = box_utils.boxes_to_corners_3d(pred_bbox3d)
gt_box_corners = box_utils.boxes_to_corners_3d(gt_bbox3d)
# print('==> pred_box_corners[0, :, :]')
# print(pred_box_corners[0,:,:])
# print('==> gt_box_corners[0, :, :]')
# print(gt_box_corners[0,:,:])
# print('==> pred_box_corners[10, :, :]')
# print(pred_box_corners[10,:,:])
# print('==> gt_box_corners[10, :, :]')
# print(gt_box_corners[10,:,:])
# print('==> pred_box_corners[100, :, :]')
# print(pred_box_corners[100,:,:])
# print('==> gt_box_corners[100, :, :]')
# print(gt_box_corners[100,:,:])
# for each box, mean by 8 corners.
corner_loss_x = F.mse_loss(input=pred_box_corners[:,:,0], target=gt_box_corners[:,:,0]) # (N, 8) -> (N)
corner_loss_y = F.mse_loss(input=pred_box_corners[:,:,1], target=gt_box_corners[:,:,1]) # (N, 8) -> (N)
corner_loss_z = F.mse_loss(input=pred_box_corners[:,:,2], target=gt_box_corners[:,:,2]) # (N, 8) -> (N)
# xyz之间求和
corner_loss = corner_loss_x + corner_loss_y + corner_loss_z
return corner_loss
def get_iouscore_loss_bce(iou_preds, iou_gts, iou_fg_thresh=0.75, iou_bg_thresh=0.25):
"""
Args:
iou_preds: (N,)
iou_gts: (N, )
Returns:
loss_iouscore:
"""
# prepare the labels
# now only for car class, 08132020
# iou_preds = iou_preds.view(-1)
# iou_gts = iou_gts.view(-1)
# print('==> iou_preds.size()')
# print(iou_preds.size())
# print(torch.sigmoid(iou_preds))
# print('==> iou_gts.size()')
# print(iou_gts.size())
# print(iou_gts)
# CLS_FG_THRESH: 0.75
# CLS_BG_THRESH: 0.25
# iou_bg_thresh = self.roi_sampler_cfg.CLS_BG_THRESH
# iou_fg_thresh = self.roi_sampler_cfg.CLS_FG_THRESH
# iou_bg_thresh = 0.25
# iou_fg_thresh = 0.75
fg_mask = iou_gts > iou_fg_thresh
bg_mask = iou_gts < iou_bg_thresh
interval_mask = (fg_mask == 0) & (bg_mask == 0)
iou_cls_labels = (fg_mask > 0).float()
iou_cls_labels[interval_mask] = \
(iou_gts[interval_mask] - iou_bg_thresh) / (iou_fg_thresh - iou_bg_thresh)
# print('==> iou_cls_labels')
# print(iou_cls_labels.size())
# print(iou_cls_labels[:50])
# 这里CE是计算的整个范围的iou,但是最后求和的时候只计算了iou>=0这部分的。
# 条件 iou_cls_labels >= 0 选出来了那些iou >= 0 的候选框。
loss_ioucls = F.binary_cross_entropy(torch.sigmoid(iou_preds), iou_cls_labels.float(), reduction='none')
cls_valid_mask = (iou_cls_labels >= 0).float()
loss_iouscore = (loss_ioucls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
return loss_iouscore
def get_rot_binres_loss(pred_reg, reg_label, num_head_bin, get_ry_fine=False):
"""
Bin-based 3D bounding boxes regression loss. See https://arxiv.org/abs/1812.04244 for more details.
:param pred_reg: (N, C)
:param reg_label: (N, 1), ry
:param num_head_bin: constant
:param get_ry_fine: False
:return:
"""
# print('==> pred_reg.size()')
# print(pred_reg.size()) # should be (N, 24)
reg_loss_dict = {}
# angle loss
start_offset = 0
ry_bin_l, ry_bin_r = start_offset, start_offset + num_head_bin
ry_res_l, ry_res_r = ry_bin_r, ry_bin_r + num_head_bin
start_offset = ry_res_r
ry_label = reg_label.squeeze(dim=-1)
# print('==> reg_label[] in encode')
# print(reg_label.size()) # should be (N, C)
# print(reg_label[100:150])
# print('==> ry_label[] in encode')
# print(ry_label.size()) # should be (N,)
# print(ry_label[100:150])
if get_ry_fine:
assert False, "one-stage should not get_ry_fine."
# divide pi/2 into several bins
angle_per_class = (np.pi / 2) / num_head_bin
ry_label = ry_label % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (ry_label > np.pi * 0.5) & (ry_label < np.pi * 1.5)
ry_label[opposite_flag] = (ry_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
shift_angle = (ry_label + np.pi * 0.5) % (2 * np.pi) # (0 ~ pi)
shift_angle = torch.clamp(shift_angle - np.pi * 0.25, min=1e-3, max=np.pi * 0.5 - 1e-3) # (0, pi/2)
# bin center is (5, 10, 15, ..., 85)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
else:
# divide 2pi into several bins
angle_per_class = (2 * np.pi) / num_head_bin
heading_angle = ry_label % (2 * np.pi) # 0 ~ 2pi
# print('==> heading_angle[] in encode')
# print(heading_angle.size())
# print(heading_angle[100:150])
shift_angle = (heading_angle + angle_per_class / 2) % (2 * np.pi)
ry_bin_label = (shift_angle / angle_per_class).floor().long()
ry_res_label = shift_angle - (ry_bin_label.float() * angle_per_class + angle_per_class / 2)
ry_res_norm_label = ry_res_label / (angle_per_class / 2)
# print('==> ry_bin_label in encode')
# print(ry_bin_label.size())
# print(ry_bin_label[100:150])
ry_bin_onehot = torch.cuda.FloatTensor(ry_bin_label.size(0), num_head_bin).zero_()
ry_bin_onehot.scatter_(1, ry_bin_label.view(-1, 1).long(), 1)
loss_ry_bin = F.cross_entropy(pred_reg[:, ry_bin_l:ry_bin_r], ry_bin_label)
loss_ry_res = F.smooth_l1_loss((pred_reg[:, ry_res_l: ry_res_r] * ry_bin_onehot).sum(dim=1), ry_res_norm_label)
reg_loss_dict['loss_ry_bin'] = loss_ry_bin.item()
reg_loss_dict['loss_ry_res'] = loss_ry_res.item()
angle_loss = loss_ry_bin + loss_ry_res
# Total regression loss
reg_loss_dict['loss_angle'] = angle_loss
return angle_loss, reg_loss_dict
class CenterNetFocalLoss(nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self, gamma=4, alpha=2):
super(CenterNetFocalLoss, self).__init__()
# self.neg_loss = _neg_loss
self.gamma = gamma
self.alpha = alpha
def _sigmoid(self, x):
# y = torch.clamp(x.sigmoid_(), min=1e-4, max=1 - 1e-4)
# dnnt use the replace version!
y = torch.clamp(torch.sigmoid(x), min=1e-4, max=1 - 1e-4)
# too small will cause loss nan.
# y = torch.clamp(x.sigmoid_(), min=1e-12, max=1 - 1e-12)
return y
def _neg_loss(self, pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred: (batch x c x h x w), do some clamp or not?. should be clampped already.
gt: (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
# neg_weights = torch.pow(1 - gt, 4)
neg_weights = torch.pow(1 - gt, self.gamma)
loss = 0
# pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
# neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
pos_loss = torch.log(pred) * torch.pow(1 - pred, self.alpha) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, self.alpha) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def forward(self, out, target):
out_norm = self._sigmoid(out)
return self._neg_loss(out_norm, target)
class CenterNetResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetResLoss, self).__init__()
self.res_func_type = cfg['res_func']
def forward(self, output, mask, ind, target):
"""
Args:
output: torch.Size([B, C, 152, 152])
mask: torch.Size([B, max_objs])
ind: torch.Size([B, max_objs])
target: torch.Size([B, max_objs, C])
Returns:
reduced and weighted loss term.
"""
pred = center_utils._transpose_and_gather_feat(output, ind) # (B, max_objs, C)
# print('==> (ind != 0).float().sum(): ', (ind != 0).float().sum() )
# print('==> mask.sum(): ', mask.sum() )
if mask.sum():
# 1. flatten.
pred_flat = pred.view(-1, pred.shape[-1]) #(B*max_objs, C)
target_flat = target.view(-1, target.shape[-1]) #(B*max_objs, C)
mask_flat = mask.view(-1).bool() #(B*max_objs)
# 2. valid select
pred_valid = pred_flat[mask_flat] #(num_valid, C)
target_valid = target_flat[mask_flat] #(num_valid, C)
# 3. un-reduced loss term
if self.res_func_type == 'smooth-l1':
loss = F.smooth_l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'l1':
loss = F.l1_loss(pred_valid, target_valid, reduction='none')
elif self.res_func_type == 'balanced_l1':
loss = get_balanced_l1_loss(pred_valid, target_valid)
else:
raise NotImplementedError
# mean for num_obj_dims, sum for channel_dims
# (num_valid, C) -> (C) -> ()
loss = loss.mean(dim=0).sum()
else:
loss = 0.
return loss
class CenterNetRotBinResLoss(nn.Module):
def __init__(self, cfg):
super(CenterNetRotBinResLoss, self).__init__()
self.num_head_bin = cfg['num_bins']
def forward(self, output, mask, ind, target):
"""
Args:
output: torch.Size([B, C, 152, 152])
mask: torch.Size([B, max_objs])
ind: torch.Size([B, max_objs])
target: torch.Size([B, max_objs, C])
Returns:
reduced and weighted loss term.
"""
pred = center_utils._transpose_and_gather_feat(output, ind) # torch.Size([1, 500, 2])
if mask.sum():
# 1. flatten
pred_flat = pred.view(-1, pred.shape[-1]) # (B*max_objs, C)
target_flat = target.view(-1, target.shape[-1]) # (B*max_objs, 1)
mask_flat = mask.view(-1).bool() # (B*max_objs)
# 2. valid select
pred_valid = pred_flat[mask_flat] # (num_valid, C)
target_valid = target_flat[mask_flat] # (num_valid, 1)
# 3. return the reduced rot loss term.
loss, _ = get_rot_binres_loss(pred_valid, target_valid, num_head_bin=self.num_head_bin)
else:
loss = 0.
# print('==> loss in rot')
# print(loss)
return loss
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
NOTE probas should be applied with softmax.
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
# print('==> lovasz_softmax, classes: ', classes)
# print('==> lovasz_softmax, per_image: ', per_image)
# print('==> lovasz_softmax, ignore: ', ignore)
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 2:
# do nothing, 3D segmentation for sparse tensor
pass
elif probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
elif probas.dim() == 5:
# 3D segmentation for dense tensor
B, C, L, H, W = probas.size()
probas = probas.contiguous().view(B, C, L, H*W)
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is not None:
valid = (labels != ignore)
# vprobas = probas[valid.nonzero().squeeze()]
# for newer pytorch
vprobas = probas[torch.nonzero(valid, as_tuple=False).squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
else:
return probas, labels
# --------------------------- HELPER FUNCTIONS ---------------------------
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n
| [
"torch.sigmoid",
"torch.nonzero",
"torch.nn.functional.smooth_l1_loss",
"torch.isnan",
"torch.nn.functional.l1_loss",
"torch.norm",
"torch.autograd.Variable",
"torch.clamp",
"torch.from_numpy",
"torch.nn.functional.mse_loss",
"torch.abs",
"torch.nn.functional.cross_entropy",
"torch.log",
"torch.sort",
"torch.where",
"torch.pow"
] | 1.1 | jialeli1/From-Voxel-to-Point | b4dba9c4e9cd83e04199d9224f6ec7bf06b71f93 |
1.3 | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: Nguyen Mau Dung
# DoC: 2020.08.17
# email: [email protected]
-----------------------------------------------------------------------------------
# Description: The configurations of the project will be defined here
"""
import os
import argparse
import torch
from easydict import EasyDict as edict
import kitti_config as cnf
def parse_train_configs():
parser = argparse.ArgumentParser(description='The Implementation using PyTorch')
parser.add_argument('--seed', type=int, default=2020,
help='re-produce the results with seed random')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('--root-dir', type=str, default='../', metavar='PATH',
help='The ROOT working directory')
####################################################################
############## Model configs ########################
####################################################################
parser.add_argument('--arch', type=str, default='fpn_resnet_18', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--pretrained_path', type=str, default=None, metavar='PATH',
help='the path of the pretrained checkpoint')
####################################################################
############## Dataloader and Running configs #######
####################################################################
parser.add_argument('--hflip_prob', type=float, default=0.5,
help='The probability of horizontal flip')
parser.add_argument('--no-val', action='store_true',
help='If true, dont evaluate the model on the val set')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=4,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=16,
help='mini-batch size (default: 16), this is the total'
'batch size of all GPUs on the current node when using'
'Data Parallel or Distributed Data Parallel')
parser.add_argument('--print_freq', type=int, default=50, metavar='N',
help='print frequency (default: 50)')
parser.add_argument('--tensorboard_freq', type=int, default=50, metavar='N',
help='frequency of saving tensorboard (default: 50)')
parser.add_argument('--checkpoint_freq', type=int, default=2, metavar='N',
help='frequency of saving checkpoints (default: 5)')
####################################################################
############## Training strategy ####################
####################################################################
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='the starting epoch')
parser.add_argument('--num_epochs', type=int, default=300, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_type', type=str, default='cosin',
help='the type of learning rate scheduler (cosin or multi_step or one_cycle)')
parser.add_argument('--lr', type=float, default=0.003, metavar='LR',
help='initial learning rate')
parser.add_argument('--minimum_lr', type=float, default=1e-7, metavar='MIN_LR',
help='minimum learning rate during training')
parser.add_argument('--momentum', type=float, default=0.949, metavar='M',
help='momentum')
parser.add_argument('-wd', '--weight_decay', type=float, default=0., metavar='WD',
help='weight decay (default: 0.)')
parser.add_argument('--optimizer_type', type=str, default='adam', metavar='OPTIMIZER',
help='the type of optimizer, it can be sgd or adam')
parser.add_argument('--steps', nargs='*', default=[150, 180],
help='number of burn in step')
####################################################################
############## Loss weight ##########################
####################################################################
####################################################################
############## Distributed Data Parallel ############
####################################################################
parser.add_argument('--world-size', default=-1, type=int, metavar='N',
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int, metavar='N',
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://127.0.0.1:29500', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu_idx', default=0, type=int,
help='GPU index to use.')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
####################################################################
############## Evaluation configurations ###################
####################################################################
parser.add_argument('--evaluate', action='store_true',
help='only evaluate the model, not training')
parser.add_argument('--resume_path', type=str, default=None, metavar='PATH',
help='the path of the resumed checkpoint')
parser.add_argument('--K', type=int, default=50,
help='the number of top K')
configs = edict(vars(parser.parse_args()))
####################################################################
############## Hardware configurations #############################
####################################################################
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda')
configs.ngpus_per_node = torch.cuda.device_count()
configs.pin_memory = True
configs.input_size = (cnf.BEV_WIDTH, cnf.BEV_HEIGHT)
configs.down_ratio = 2
configs.hm_size = (cnf.BEV_WIDTH/configs.down_ratio, cnf.BEV_HEIGHT/configs.down_ratio)
configs.max_objects = 50
configs.imagenet_pretrained = True
configs.head_conv = 256
configs.num_classes = 1
configs.num_center_offset = 2
configs.num_z = 1
configs.num_dim = 3
configs.num_direction = 2 # sin, cos 8 for bin cos sin
configs.voxel_size = [0.16, 0.16, 4]
configs.point_cloud_range =[0, -34.56, -2.73, 69.12, 34.56, 1.27]
configs.max_number_of_points_per_voxel = 100
configs.heads = {
'hm_cen': configs.num_classes,
'cen_offset': configs.num_center_offset,
'direction': configs.num_direction,
'z_coor': configs.num_z,
'dim': configs.num_dim
}
configs.num_input_features = 4
####################################################################
############## Dataset, logs, Checkpoints dir ######################
####################################################################
configs.dataset_dir = '/media/wx/File/data/kittidata'
configs.checkpoints_dir = os.path.join(configs.root_dir, 'checkpoints', configs.saved_fn)
configs.logs_dir = os.path.join(configs.root_dir, 'logs', configs.saved_fn)
if not os.path.isdir(configs.checkpoints_dir):
os.makedirs(configs.checkpoints_dir)
if not os.path.isdir(configs.logs_dir):
os.makedirs(configs.logs_dir)
return configs
| [
"torch.device",
"torch.cuda.device_count"
] | 1.3.0 | wangx1996/CenterPillarNet | 4be3d53265b8ecb1f9572612fa87f7acd8c57669 |
1.3 | """
# -*- coding: utf-8 -*-
-----------------------------------------------------------------------------------
# Author: Nguyen Mau Dung
# DoC: 2020.08.17
# email: [email protected]
-----------------------------------------------------------------------------------
# Description: This script for training
# Modified: Wang Xu
# email: [email protected]
"""
import argparse
import os
import time
import numpy as np
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from config import kitti_config as cnf
import torch
import torch.utils.data.distributed
from tqdm import tqdm
from easydict import EasyDict as edict
import cv2
sys.path.append('./')
from data_process.kitti_dataloader import create_val_dataloader
from models.model_utils import create_model
from utils.misc import AverageMeter, ProgressMeter
from utils.misc import make_folder, time_synchronized
from utils.evaluation_utils import decode, post_processingv2, \
get_batch_statistics_rotated_bbox, ap_per_class, \
load_classes, convert_det_to_real_values_v2
from utils.visualization_utils import project_to_image, compute_box_3d, draw_box_3d
from data_process.transformation import lidar_to_camera_box
from spconv.utils import VoxelGeneratorV2
from utils.torch_utils import _sigmoid
from data_process.kitti_data_utils import Calibration
import mayavi.mlab
import config.kitti_config as cnf
from utils.evaluation_utils import decode, post_processing, draw_predictions, convert_det_to_real_values
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])
inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])
return inv_Tr
V2C= np.array([7.533745000000e-03, -9.999714000000e-01, -6.166020000000e-04,
-4.069766000000e-03, 1.480249000000e-02, 7.280733000000e-04,
-9.998902000000e-01, -7.631618000000e-02, 9.998621000000e-01,
7.523790000000e-03, 1.480755000000e-02, -2.717806000000e-01])
V2C = np.reshape(V2C, [3, 4])
C2V = inverse_rigid_trans(V2C)
R0 = np.array([9.999239000000e-01, 9.837760000000e-03, -7.445048000000e-03, -9.869795000000e-03,
9.999421000000e-01, -4.278459000000e-03, 7.402527000000e-03, 4.351614000000e-03,
9.999631000000e-01])
R0 = np.reshape(R0, [3, 3])
def cart2hom(pts_3d):
''' Input: nx3 points in Cartesian
Oupput: nx4 points in Homogeneous by pending 1
'''
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
return pts_3d_hom
def project_ref_to_velo(pts_3d_ref):
pts_3d_ref = cart2hom(pts_3d_ref) # nx4
return np.dot(pts_3d_ref, np.transpose(C2V))
def project_rect_to_ref(pts_3d_rect):
''' Input and Output are nx3 points '''
return np.transpose(np.dot(np.linalg.inv(R0), np.transpose(pts_3d_rect)))
def project_rect_to_velo(pts_3d_rect):
''' Input: nx3 points in rect camera coord.
Output: nx3 points in velodyne coord.
'''
pts_3d_ref = project_rect_to_ref(pts_3d_rect)
return project_ref_to_velo(pts_3d_ref)
def rotz(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def draw_gt_boxes3d(gt_boxes3d, score,fig, color=(1,1,1), line_width=1, draw_text=True, text_scale=(1,1,1), color_list=None, ):
''' Draw 3D bounding boxes
Args:
gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
fig: mayavi figure handler
color: RGB value tuple in range (0,1), box line color
line_width: box line width
draw_text: boolean, if true, write box indices beside boxes
text_scale: three number tuple
color_list: a list of RGB tuple, if not None, overwrite color.
Returns:
fig: updated fig
'''
num = len(gt_boxes3d)
for n in range(num):
b = gt_boxes3d[n]
if color_list is not None:
color = color_list[n]
#if draw_text: mayavi.mlab.text3d(b[4,0], b[4,1], b[4,2], 'car'+"{:.2f}".format(float(score)), scale=text_scale, color=(1,1,1), figure=fig)
for k in range(0,4):
#http://docs.enthought.com/mayavi/mayavi/auto/mlab_helper_functions.html
i,j=k,(k+1)%4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k+4,(k+1)%4 + 4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
i,j=k,k+4
mayavi.mlab.plot3d([b[i,0], b[j,0]], [b[i,1], b[j,1]], [b[i,2], b[j,2]], color=color, tube_radius=None, line_width=line_width, figure=fig)
#mlab.show(1)
#mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)
return fig
def show3dlidar(pointpaht, detections,V2C, R0, P2):
pointcloud = np.fromfile(pointpaht, dtype=np.float32).reshape(-1, 4)
x = pointcloud[:, 0] # x position of point
xmin = np.amin(x, axis=0)
xmax = np.amax(x, axis=0 )
y = pointcloud[:, 1] # y position of point
ymin = np.amin(y, axis=0)
ymax = np.amax(y, axis=0)
z = pointcloud[:, 2] # z position of point
zmin = np.amin(z, axis=0)
zmax = np.amax(z, axis=0)
d = np.sqrt(x ** 2 + y ** 2) # Map Distance from sensor
vals = 'height'
if vals == "height":
col = z
else:
col = d
fig = mayavi.mlab.figure(bgcolor=(0, 0, 0), size=(640, 500))
mayavi.mlab.points3d(x, y, z,
col, # Values used for Color
mode="point",
# 灰度图的伪彩映射
colormap='Blues', # 'bone', 'copper', 'gnuplot'
# color=(0, 1, 0), # Used a fixed (r,g,b) instead
figure=fig,
)
# 绘制原点
mayavi.mlab.points3d(0, 0, 0, color=(1, 1, 1), mode="sphere",scale_factor=0.2)
print(detections.shape)
detections[:, 1:8] = lidar_to_camera_box(detections[:, 1:8], V2C, R0, P2)
for i in range(detections.shape[0]):
h = float(detections[i][4])
w = float(detections[i][5])
l = float(detections[i][6])
x = float(detections[i][1])
y = float(detections[i][2])
z = float(detections[i][3])
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] ;
y_corners = [0, 0, 0, 0, -h, -h, -h, -h] ;
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2];
#print(x_corners)
#print(detections[i])
R = roty(float(detections[i][7]))
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
# print corners_3d.shape
#corners_3d = np.zeros((3,8))
corners_3d[0, :] = corners_3d[0, :] + x;
corners_3d[1, :] = corners_3d[1, :] + y;
corners_3d[2, :] = corners_3d[2, :] + z;
corners_3d = np.transpose(corners_3d)
box3d_pts_3d_velo = project_rect_to_velo(corners_3d)
#x1, y1, z1 = box3d_pts_3d_velo[0, :]
#x2, y2, z2 = box3d_pts_3d_velo[1, :]
if detections[i][0] == 1.0:
draw_gt_boxes3d([box3d_pts_3d_velo],1,color=(1,0,0), fig=fig)
else:
draw_gt_boxes3d([box3d_pts_3d_velo], 1, color=(0, 1, 0), fig=fig)
# 绘制坐标
'''axes = np.array(
[[20.0, 0.0, 0.0, 0.0], [0.0, 20.0, 0.0, 0.0], [0.0, 0.0, 20.0, 0.0]],
dtype=np.float64,
)
#x轴
mayavi.mlab.plot3d(
[0, axes[0, 0]],
[0, axes[0, 1]],
[0, axes[0, 2]],
color=(1, 0, 0),
tube_radius=None,
figure=fig,
)
#y轴
mayavi.mlab.plot3d(
[0, axes[1, 0]],
[0, axes[1, 1]],
[0, axes[1, 2]],
color=(0, 1, 0),
tube_radius=None,
figure=fig,
)
#z轴
mayavi.mlab.plot3d(
[0, axes[2, 0]],
[0, axes[2, 1]],
[0, axes[2, 2]],
color=(0, 0, 1),
tube_radius=None,
figure=fig,
)'''
mayavi.mlab.show()
def evaluate_mAP(val_loader, model, configs, logger):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
progress = ProgressMeter(len(val_loader), [batch_time, data_time],
prefix="Evaluation phase...")
labels = []
sample_metrics = [] # List of tuples (TP, confs, pred)
# switch to evaluate mode
model.eval()
class_id = {0:'Car', 1:'Pedestrian', 2:'Cyclist'}
with torch.no_grad():
start_time = time.time()
for batch_idx, batch_data in enumerate(tqdm(val_loader)):
metadatas, targets= batch_data
batch_size = len(metadatas['img_path'])
voxelinput = metadatas['voxels']
coorinput = metadatas['coors']
numinput = metadatas['num_points']
dtype = torch.float32
voxelinputr = torch.tensor(
voxelinput, dtype=torch.float32, device=configs.device).to(dtype)
coorinputr = torch.tensor(
coorinput, dtype=torch.int32, device=configs.device)
numinputr = torch.tensor(
numinput, dtype=torch.int32, device=configs.device)
t1 = time_synchronized()
outputs = model(voxelinputr, coorinputr, numinputr)
outputs = outputs._asdict()
outputs['hm_cen'] = _sigmoid(outputs['hm_cen'])
outputs['cen_offset'] = _sigmoid(outputs['cen_offset'])
# detections size (batch_size, K, 10)
img_path = metadatas['img_path'][0]
#print(img_path)
calib = Calibration(img_path.replace(".png", ".txt").replace("image_2", "calib"))
detections = decode(outputs['hm_cen'], outputs['cen_offset'], outputs['direction'], outputs['z_coor'],
outputs['dim'], K=configs.K)
detections = detections.cpu().numpy().astype(np.float32)
detections = post_processing(detections, configs.num_classes, configs.down_ratio, configs.peak_thresh)
for i in range(configs.batch_size):
detections[i] = convert_det_to_real_values(detections[i])
img_path = metadatas['img_path'][i]
#rint(img_path)
datap = str.split(img_path,'/')
filename = str.split(datap[7],'.')
file_write_obj = open('../result/' + filename[0] + '.txt', 'w')
lidar_path = '/' + datap[1] + '/' + datap[2] + '/' + datap[3] + '/' + \
datap[4] + '/' + datap[5] + '/' + 'velodyne' + '/' + filename[0] + '.bin'
#print(lidar_path)
#show3dlidar(lidar_path, detections[i], calib.V2C, calib.R0, calib.P2)
dets = detections[i]
if len(dets) >0 :
dets[:, 1:] = lidar_to_camera_box(dets[:, 1:], calib.V2C, calib.R0, calib.P2)
for box_idx, label in enumerate(dets):
location, dim, ry = label[1:4], label[4:7], label[7]
if ry < -np.pi:
ry = 2*np.pi + ry
if ry > np.pi:
ry = -2*np.pi + ry
corners_3d = compute_box_3d(dim, location, ry)
corners_2d = project_to_image(corners_3d, calib.P2)
minxy = np.min(corners_2d, axis=0)
maxxy = np.max(corners_2d, axis=0)
bbox = np.concatenate([minxy, maxxy], axis=0)
if bbox[0] < 0 or bbox[2]<0:
continue
if bbox[1] > 1272 or bbox[3] > 375:
continue
oblist = ['Car',' ','0.0', ' ', '0', ' ', '-10', ' ','%.2f'%bbox[0], ' ', \
'%.2f' %bbox[1], ' ','%.2f'%bbox[2], ' ','%.2f'%bbox[3], ' ','%.2f'%dim[0], ' ','%.2f'%dim[1], ' ','%.2f'%dim[2], ' ', \
'%.2f' %location[0], ' ','%.2f'%location[1], ' ','%.2f'%location[2], ' ', '%.2f'%ry, '\n']
file_write_obj.writelines(oblist)
file_write_obj.close()
'''for sample_i in range(len(detections)):
# print(output.shape)
num = targets['count'][sample_i]
# print(targets['batch'][sample_i][:num].shape)
target = targets['batch'][sample_i][:num]
#print(target[:, 8].tolist())
labels += target[:, 8].tolist()
sample_metrics += get_batch_statistics_rotated_bbox(detections, targets, iou_threshold=configs.iou_thresh)
t2 = time_synchronized()
# measure elapsed time
# torch.cuda.synchronize()
batch_time.update(time.time() - start_time)
# Log message
if logger is not None:
if ((batch_idx + 1) % configs.print_freq) == 0:
logger.info(progress.get_message(batch_idx))
start_time = time.time()
# Concatenate sample statistics
true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)'''
#return precision, recall, AP, f1, ap_class
def parse_eval_configs():
parser = argparse.ArgumentParser(description='Testing config for the Implementation')
parser.add_argument('--classnames-infor-path', type=str, default='/media/wx/File/kittidatabase/classes_names_pillar.txt',
metavar='PATH', help='The class names of objects in the task')
parser.add_argument('--saved_fn', type=str, default='fpn_resnet_18', metavar='FN',
help='The name using for saving logs, models,...')
parser.add_argument('-a', '--arch', type=str, default='fpn_resnet_18', metavar='ARCH',
help='The name of the model architecture')
parser.add_argument('--pretrained_path', type=str,
default='../checkpoints/fpn_resnet_18/fpn_resnet_18_epoch_300.pth', metavar='PATH',
help='the path of the pretrained checkpoint')
parser.add_argument('--K', type=int, default=50,
help='the number of top K')
parser.add_argument('--no_cuda', action='store_true',
help='If true, cuda is not used.')
parser.add_argument('--gpu_idx', default=0, type=int,
help='GPU index to use.')
parser.add_argument('--num_samples', type=int, default=None,
help='Take a subset of the dataset to run and debug')
parser.add_argument('--num_workers', type=int, default=1,
help='Number of threads for loading data')
parser.add_argument('--batch_size', type=int, default=4,
help='mini-batch size (default: 4)')
parser.add_argument('--peak_thresh', type=float, default=0.3)
parser.add_argument('--save_test_output', action='store_true',
help='If true, the output image of the testing phase will be saved')
parser.add_argument('--output_format', type=str, default='image', metavar='PATH',
help='the type of the test output (support image or video)')
parser.add_argument('--output_video_fn', type=str, default='out_fpn_resnet_18', metavar='PATH',
help='the video filename if the output format is video')
parser.add_argument('--output-width', type=int, default=608,
help='the width of showing output, the height maybe vary')
parser.add_argument('--conf_thresh', type=float, default=0.5,
help='for evaluation - the threshold for class conf')
parser.add_argument('--nms_thresh', type=float, default=0.5,
help='for evaluation - the threshold for nms')
parser.add_argument('--iou_thresh', type=float, default=0.5,
help='for evaluation - the threshold for IoU')
configs = edict(vars(parser.parse_args()))
configs.pin_memory = True
configs.distributed = False # For testing on 1 GPU only
configs.input_size = (432, 432)
configs.hm_size = (216, 216)
configs.down_ratio = 2
configs.max_objects = 50
configs.imagenet_pretrained = False
configs.head_conv = 256
configs.num_classes = 1
configs.num_center_offset = 2
configs.num_z = 1
configs.num_dim = 3
configs.num_direction = 2 # sin, cos
configs.voxel_size = [0.16, 0.16, 4]
configs.point_cloud_range = [0, -34.56, -2.73, 69.12, 34.56, 1.27]
configs.max_number_of_points_per_voxel = 100
configs.heads = {
'hm_cen': configs.num_classes,
'cen_offset': configs.num_center_offset,
'direction': configs.num_direction,
'z_coor': configs.num_z,
'dim': configs.num_dim
}
configs.num_input_features = 4
####################################################################
##############Dataset, Checkpoints, and results dir configs#########
####################################################################
configs.root_dir = '../'
configs.dataset_dir = '/media/wx/File/kittidatabase'
if configs.save_test_output:
configs.results_dir = os.path.join(configs.root_dir, 'results', configs.saved_fn)
make_folder(configs.results_dir)
return configs
if __name__ == '__main__':
configs = parse_eval_configs()
configs.distributed = False # For evaluation
class_names = load_classes(configs.classnames_infor_path)
print(configs.iou_thresh)
voxel_generator = VoxelGeneratorV2(
voxel_size=list(configs.voxel_size),
point_cloud_range = list(configs.point_cloud_range),
max_num_points= configs.max_number_of_points_per_voxel,
max_voxels=20000
)
model = create_model(configs, voxel_generator)
print('\n\n' + '-*=' * 30 + '\n\n')
assert os.path.isfile(configs.pretrained_path), "No file at {}".format(configs.pretrained_path)
model.load_state_dict(torch.load(configs.pretrained_path, map_location='cpu'))
print('Loaded weights from {}\n'.format(configs.pretrained_path))
configs.device = torch.device('cpu' if configs.no_cuda else 'cuda:{}'.format(configs.gpu_idx))
model = model.to(device=configs.device)
out_cap = None
model.eval()
print('Create the validation dataloader')
val_dataloader = create_val_dataloader(configs, voxel_generator)
print("\nStart computing mAP...\n")
evaluate_mAP(val_dataloader, model, configs, None)
'''print("\nDone computing mAP...\n")
for idx, cls in enumerate(ap_class):
print("\t>>>\t Class {} ({}): precision = {:.4f}, recall = {:.4f}, AP = {:.4f}, f1: {:.4f}".format(cls, \
class_names[cls][:3], precision[idx], recall[idx], AP[idx], f1[idx]))
print("\nmAP: {}\n".format(AP.mean()))'''
| [
"torch.load",
"torch.tensor",
"torch.no_grad"
] | 1.3.0 | wangx1996/CenterPillarNet | 4be3d53265b8ecb1f9572612fa87f7acd8c57669 |
1.6 | # Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def load_model(model_dir, device):
global models
if os.path.exists(f"{model_dir}/weights.pt"):
checkpoint = torch.load(f"{model_dir}/weights.pt", map_location=device)
else:
checkpoint = torch.load(model_dir, map_location=device)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint["model"])
model.eval()
models[model_dir] = model
def predict(spectrogram, model_dir=None, params=None, device=torch.device("cuda")):
global models
# Lazy load model.
if not model_dir in models:
load_model(model_dir, device)
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(
spectrogram.shape[0],
model.params.hop_samples * spectrogram.shape[-1],
device=device,
)
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (
audio
- c2
* model(
audio, spectrogram, torch.tensor([n], device=audio.device)
).squeeze(1)
)
if n > 0:
noise = torch.randn_like(audio)
sigma = (
(1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]
) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
audio, sr = predict(spectrogram, model_dir=args.model_dir)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == "__main__":
parser = ArgumentParser(
description="runs inference on a spectrogram file generated by diffwave.preprocess"
)
parser.add_argument(
"model_dir",
help="directory containing a trained model (or full path to weights.pt file)",
)
parser.add_argument(
"spectrogram_path",
help="path to a spectrogram file generated by diffwave.preprocess",
)
parser.add_argument("--output", "-o", default="output.wav", help="output file name")
main(parser.parse_args())
| [
"torch.device",
"torch.no_grad",
"torch.clamp",
"torch.from_numpy",
"torch.randn_like",
"torch.tensor",
"torch.load",
"torch.randn"
] | 1.6.0 | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 |
1.6 | import unittest
import os
import shutil
import random
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from aitoolbox import TrainLoop, TTModel
from tests_gpu.test_multi_gpu.ddp_prediction_saver import DDPPredictionSave
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class CNNNet(TTModel):
def __init__(self):
super(CNNNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_loss(self, batch_data, criterion, device):
data, target = batch_data
data, target = data.to(device), target.to(device)
output = self(data)
loss = criterion(output, target)
return loss
def get_predictions(self, batch_data, device):
data, y_test = batch_data
data = data.to(device)
output = self(data)
y_pred = output.argmax(dim=1, keepdim=False)
return y_pred.cpu(), y_test, {}
class TestMNISTCNN(unittest.TestCase):
def test_trainloop_core_pytorch_compare(self):
os.mkdir(f'{THIS_DIR}/ddp_cnn_save')
val_loss_tl, y_pred_tl, y_true_tl = self.train_eval_trainloop(num_epochs=5, use_real_train_data=True)
val_loss_pt, y_pred_pt, y_true_pt = self.train_eval_core_pytorch(num_epochs=5, use_real_train_data=True)
self.assertAlmostEqual(val_loss_tl, val_loss_pt, places=8)
self.assertEqual(y_pred_tl, y_pred_pt)
self.assertEqual(y_true_tl, y_true_pt)
project_path = os.path.join(THIS_DIR, 'ddp_cnn_save')
if os.path.exists(project_path):
shutil.rmtree(project_path)
project_path = os.path.join(THIS_DIR, 'data')
if os.path.exists(project_path):
shutil.rmtree(project_path)
def train_eval_trainloop(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model = CNNNet()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.NLLLoss()
print('Starting train loop')
tl = TrainLoop(
model,
train_loader, val_loader, None,
optimizer, criterion,
gpu_mode='ddp'
)
self.assertEqual(tl.device.type, "cuda")
tl.fit(num_epochs=num_epochs,
callbacks=[DDPPredictionSave(dir_path=f'{THIS_DIR}/ddp_cnn_save',
file_name='tl_ddp_predictions.p')])
with open(f'{THIS_DIR}/ddp_cnn_save/tl_ddp_predictions.p', 'rb') as f:
val_loss, y_pred, y_true = pickle.load(f)
return val_loss, y_pred, y_true
def train_eval_core_pytorch(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model_pt = CNNNet()
optimizer_pt = optim.Adam(model_pt.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion_pt = nn.NLLLoss()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8888'
print('Starting the manual DDP training')
mp.spawn(
self.manual_ddp_training,
args=(num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader),
nprocs=torch.cuda.device_count()
)
val_loss, y_pred, y_true = [], [], []
for idx in range(torch.cuda.device_count()):
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{idx}.p', 'rb') as f:
val_loss_f, y_pred_f, y_true_f = pickle.load(f)
val_loss += val_loss_f
y_pred += y_pred_f
y_true += y_true_f
val_loss = np.mean(val_loss)
return val_loss, y_pred, y_true
@staticmethod
def manual_ddp_training(gpu, num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader):
rank = gpu
dist.init_process_group(backend='nccl', init_method='env://', world_size=torch.cuda.device_count(), rank=rank)
torch.manual_seed(0)
torch.cuda.set_device(gpu)
device = torch.device(f"cuda:{gpu}")
train_sampler = DistributedSampler(dataset=train_loader.dataset, shuffle=True,
num_replicas=torch.cuda.device_count(), rank=rank)
val_sampler = DistributedSampler(dataset=val_loader.dataset, shuffle=False,
num_replicas=torch.cuda.device_count(), rank=rank)
train_loader_ddp = DataLoader(train_loader.dataset, batch_size=100, sampler=train_sampler)
val_loader_ddp = DataLoader(val_loader.dataset, batch_size=100, sampler=val_sampler)
model_pt = model_pt.to(device)
criterion_pt = criterion_pt.to(device)
model_pt = DistributedDataParallel(model_pt, device_ids=[gpu])
model_pt.train()
for epoch in range(num_epochs):
print(f'Epoch: {epoch}')
train_sampler.set_epoch(epoch)
for i, (input_data, target) in enumerate(train_loader_ddp):
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss = criterion_pt(predicted, target)
loss.backward()
optimizer_pt.step()
optimizer_pt.zero_grad()
# Imitate what happens in auto_execute_end_of_epoch() in TrainLoop
for _ in train_loader:
pass
for _ in val_loader:
pass
print('Evaluating')
val_loss, val_pred, val_true = [], [], []
model_pt.eval()
with torch.no_grad():
for input_data, target in val_loader_ddp:
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss_batch = criterion_pt(predicted, target).cpu().item()
val_pred += predicted.argmax(dim=1, keepdim=False).cpu().tolist()
val_true += target.cpu().tolist()
val_loss.append(loss_batch)
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{gpu}.p', 'wb') as f:
pickle.dump([val_loss, val_pred, val_true], f)
@staticmethod
def set_seeds():
manual_seed = 0
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
# if you are suing GPU
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
| [
"torch.nn.Linear",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.nn.functional.relu",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.nn.parallel.DistributedDataParallel",
"torch.nn.functional.log_softmax",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.nn.Conv2d",
"torch.nn.NLLLoss",
"torch.no_grad",
"torch.flatten",
"torch.nn.functional.max_pool2d",
"torch.nn.Dropout2d"
] | 1.6.0 | mv1388/AIToolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 |
1.2 | import torch
import torch.nn as nn
import torch.nn.functional as F
from api.models.utils.distribution import sample_from_discretized_mix_logistic
from api.models.utils.display import *
from api.models.utils.dsp import *
import os
import numpy as np
from pathlib import Path
from typing import Union
class ResBlock(nn.Module):
def __init__(self, dims):
super().__init__()
self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.batch_norm1 = nn.BatchNorm1d(dims)
self.batch_norm2 = nn.BatchNorm1d(dims)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.batch_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.batch_norm2(x)
return x + residual
class MelResNet(nn.Module):
def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad):
super().__init__()
k_size = pad * 2 + 1
self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False)
self.batch_norm = nn.BatchNorm1d(compute_dims)
self.layers = nn.ModuleList()
for i in range(res_blocks):
self.layers.append(ResBlock(compute_dims))
self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1)
def forward(self, x):
x = self.conv_in(x)
x = self.batch_norm(x)
x = F.relu(x)
for f in self.layers: x = f(x)
x = self.conv_out(x)
return x
class Stretch2d(nn.Module):
def __init__(self, x_scale, y_scale):
super().__init__()
self.x_scale = x_scale
self.y_scale = y_scale
def forward(self, x):
b, c, h, w = x.size()
x = x.unsqueeze(-1).unsqueeze(3)
x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale)
return x.view(b, c, h * self.y_scale, w * self.x_scale)
class UpsampleNetwork(nn.Module):
def __init__(self, feat_dims, upsample_scales, compute_dims,
res_blocks, res_out_dims, pad):
super().__init__()
total_scale = np.cumproduct(upsample_scales)[-1]
self.indent = pad * total_scale
self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad)
self.resnet_stretch = Stretch2d(total_scale, 1)
self.up_layers = nn.ModuleList()
for scale in upsample_scales:
k_size = (1, scale * 2 + 1)
padding = (0, scale)
stretch = Stretch2d(scale, 1)
conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False)
conv.weight.data.fill_(1. / k_size[1])
self.up_layers.append(stretch)
self.up_layers.append(conv)
def forward(self, m):
aux = self.resnet(m).unsqueeze(1)
aux = self.resnet_stretch(aux)
aux = aux.squeeze(1)
m = m.unsqueeze(1)
for f in self.up_layers: m = f(m)
m = m.squeeze(1)[:, :, self.indent:-self.indent]
return m.transpose(1, 2), aux.transpose(1, 2)
class WaveRNN(nn.Module):
def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors,
feat_dims, compute_dims, res_out_dims, res_blocks,
hop_length, sample_rate, mode='RAW'):
super().__init__()
self.mode = mode
self.pad = pad
if self.mode == 'RAW':
self.n_classes = 2 ** bits
elif self.mode == 'MOL':
self.n_classes = 30
else:
RuntimeError("Unknown model mode value - ", self.mode)
# List of rnns to call `flatten_parameters()` on
self._to_flatten = []
self.rnn_dims = rnn_dims
self.aux_dims = res_out_dims // 4
self.hop_length = hop_length
self.sample_rate = sample_rate
self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad)
self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
self._to_flatten += [self.rnn1, self.rnn2]
self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
self.fc3 = nn.Linear(fc_dims, self.n_classes)
self.register_buffer('step', torch.zeros(1, dtype=torch.long))
self.num_params()
# Avoid fragmentation of RNN parameters and associated warning
self._flatten_parameters()
def forward(self, x, mels):
device = next(self.parameters()).device # use same device as parameters
# Although we `_flatten_parameters()` on init, when using DataParallel
# the model gets replicated, making it no longer guaranteed that the
# weights are contiguous in GPU memory. Hence, we must call it again
self._flatten_parameters()
if self.training:
self.step += 1
bsize = x.size(0)
h1 = torch.zeros(1, bsize, self.rnn_dims, device=device)
h2 = torch.zeros(1, bsize, self.rnn_dims, device=device)
mels, aux = self.upsample(mels)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2)
x = self.I(x)
res = x
x, _ = self.rnn1(x, h1)
x = x + res
res = x
x = torch.cat([x, a2], dim=2)
x, _ = self.rnn2(x, h2)
x = x + res
x = torch.cat([x, a3], dim=2)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4], dim=2)
x = F.relu(self.fc2(x))
return self.fc3(x)
def generate(self, mels, save_path: Union[str, Path, None], batched, target, overlap, mu_law, silent=False):
self.eval()
device = next(self.parameters()).device # use same device as parameters
mu_law = mu_law if self.mode == 'RAW' else False
output = []
start = time.time()
rnn1 = self.get_gru_cell(self.rnn1)
rnn2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
mels = torch.as_tensor(mels, device=device)
wave_len = (mels.size(-1) - 1) * self.hop_length
mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both')
mels, aux = self.upsample(mels.transpose(1, 2))
if batched:
mels = self.fold_with_overlap(mels, target, overlap)
aux = self.fold_with_overlap(aux, target, overlap)
b_size, seq_len, _ = mels.size()
h1 = torch.zeros(b_size, self.rnn_dims, device=device)
h2 = torch.zeros(b_size, self.rnn_dims, device=device)
x = torch.zeros(b_size, 1, device=device)
d = self.aux_dims
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
for i in range(seq_len):
m_t = mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = \
(a[:, i, :] for a in aux_split)
x = torch.cat([x, m_t, a1_t], dim=1)
x = self.I(x)
h1 = rnn1(x, h1)
x = x + h1
inp = torch.cat([x, a2_t], dim=1)
h2 = rnn2(inp, h2)
x = x + h2
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
if self.mode == 'MOL':
sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2))
output.append(sample.view(-1))
# x = torch.FloatTensor([[sample]]).cuda()
x = sample.transpose(0, 1)
elif self.mode == 'RAW':
posterior = F.softmax(logits, dim=1)
distrib = torch.distributions.Categorical(posterior)
sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1.
output.append(sample)
x = sample.unsqueeze(-1)
else:
raise RuntimeError("Unknown model mode value - ", self.mode)
if not silent and i % 100 == 0:
self.gen_display(i, seq_len, b_size, start)
output = torch.stack(output).transpose(0, 1)
output = output.cpu().numpy()
output = output.astype(np.float64)
if mu_law:
output = decode_mu_law(output, self.n_classes, False)
if batched:
output = self.xfade_and_unfold(output, target, overlap)
else:
output = output[0]
# Fade-out at the end to avoid signal cutting out suddenly
fade_out = np.linspace(1, 0, 20 * self.hop_length)
output = output[:wave_len]
output[-20 * self.hop_length:] *= fade_out
if save_path is not None:
save_wav(output, save_path)
self.train()
return output
def gen_display(self, i, seq_len, b_size, start):
gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
pbar = progbar(i, seq_len)
msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
stream(msg)
def get_gru_cell(self, gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
def pad_tensor(self, x, pad, side='both'):
# NB - this is just a quick method i need right now
# i.e., it won't generalise to other shapes/dims
b, t, c = x.size()
total = t + 2 * pad if side == 'both' else t + pad
padded = torch.zeros(b, total, c, device=x.device)
if side == 'before' or side == 'both':
padded[:, pad:pad + t, :] = x
elif side == 'after':
padded[:, :t, :] = x
return padded
def fold_with_overlap(self, x, target, overlap):
''' Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold()
Args:
x (tensor) : Upsampled conditioning features.
shape=(1, timesteps, features)
target (int) : Target timesteps for each index of batch
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(tensor) : shape=(num_folds, target + 2 * overlap, features)
Details:
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning features
Eg: target=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
'''
_, total_len, features = x.size()
# Calculate variables needed
num_folds = (total_len - overlap) // (target + overlap)
extended_len = num_folds * (overlap + target) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
num_folds += 1
padding = target + 2 * overlap - remaining
x = self.pad_tensor(x, padding, side='after')
folded = torch.zeros(num_folds, target + 2 * overlap, features, device=x.device)
# Get the values for the folded tensor
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
folded[i] = x[:, start:end, :]
return folded
def xfade_and_unfold(self, y, target, overlap):
''' Applies a crossfade and unfolds into a 1d array.
Args:
y (ndarry) : Batched sequences of audio samples
shape=(num_folds, target + 2 * overlap)
dtype=np.float64
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(ndarry) : audio samples in a 1d array
shape=(total_len)
dtype=np.float64
Details:
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_target, seq1_out],
[seq2_in, seq2_target, seq2_out],
[seq3_in, seq3_target, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...]
'''
num_folds, length = y.shape
target = length - 2 * overlap
total_len = num_folds * (target + overlap) + overlap
# Need some silence for the rnn warmup
silence_len = overlap // 2
fade_len = overlap - silence_len
silence = np.zeros((silence_len), dtype=np.float64)
linear = np.ones((silence_len), dtype=np.float64)
# Equal power crossfade
t = np.linspace(-1, 1, fade_len, dtype=np.float64)
fade_in = np.sqrt(0.5 * (1 + t))
fade_out = np.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_in = np.concatenate([silence, fade_in])
fade_out = np.concatenate([linear, fade_out])
# Apply the gain to the overlap samples
y[:, :overlap] *= fade_in
y[:, -overlap:] *= fade_out
unfolded = np.zeros((total_len), dtype=np.float64)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
unfolded[start:end] += y[i]
return unfolded
def get_step(self):
return self.step.data.item()
def log(self, path, msg):
with open(path, 'a') as f:
print(msg, file=f)
def load(self, path: Union[str, Path]):
# Use device of model params as location for loaded state
device = next(self.parameters()).device
self.load_state_dict(torch.load(path, map_location=device), strict=False)
def save(self, path: Union[str, Path]):
# No optimizer argument because saving a model should not include data
# only relevant in the training process - it should only be properties
# of the model itself. Let caller take care of saving optimzier state.
torch.save(self.state_dict(), path)
def num_params(self, print_out=False):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
return parameters
def _flatten_parameters(self):
"""Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used
to improve efficiency and avoid PyTorch yelling at us."""
[m.flatten_parameters() for m in self._to_flatten]
| [
"torch.nn.Linear",
"torch.cat",
"torch.stack",
"torch.nn.GRU",
"torch.nn.ModuleList",
"torch.distributions.Categorical",
"torch.load",
"torch.nn.Conv1d",
"torch.as_tensor",
"torch.nn.functional.relu",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.GRUCell",
"torch.no_grad",
"torch.nn.BatchNorm1d"
] | 1.2.0 | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 |
1.8 | import torch
import numpy as np
from tqdm import tqdm
from sklearn import cluster
#bol_norm True -> Divide by norm of feature
def same_score(v_ortho_dict, features, labels, bol_norm=False):
features = torch.from_numpy(features).cuda()
scores = torch.zeros(features.shape[0])
for indx, feat in enumerate(features):
tmp_scores = torch.dot(v_ortho_dict[labels[indx]][0], feat).abs()
scores[indx] = (tmp_scores / torch.norm(feat, p=2)) if bol_norm else tmp_scores
return scores
def same_topk(label_list, scores, p):
output = []
for idx in range(len(np.unique(label_list))):
num_inst = int(p * np.sum(label_list==idx))
indexs = torch.tensor(range(len(label_list)))[label_list==idx]
tmp_sort, tmp_idx = torch.sort(scores[label_list==idx], descending=False)
# 못 들어간 애가 필요한거니까 이렇게!
output += indexs[tmp_idx[num_inst:]].numpy().tolist()
return torch.tensor(output).long()
#Classswise kmenas
def same_kmeans(label_list, scores, p=None):
output = []
for idx in range(len(np.unique(label_list))):
indexs = torch.tensor(range(len(scores)))[label_list==idx]
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores[indexs].reshape(-1, 1))
if torch.mean(scores[indexs][kmeans.labels_==0]) < torch.mean(scores[indexs][kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output += indexs[kmeans.labels_ == 0].numpy().tolist()
return torch.tensor(output).long()
#Total Kmeans
def same_kmeans_total(scores, p=None):
output = []
indexs = torch.tensor(range(len(scores)))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores.reshape(-1, 1))
if torch.mean(scores[kmeans.labels_==0]) < torch.mean(scores[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return torch.tensor(output).long(), None
def same_topk_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_topk(orig_label_list, scores, p)
return output.numpy()
def same_kmeans_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_kmeans(orig_label_list, scores, p)
return output.numpy()
def compute_noisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def get_loss_list(model, data_loader):
loss_list = np.empty((0,))
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, label_gt) in enumerate(progress):
data = data.cuda()
label, label_gt = label.long().cuda(), label_gt.long().cuda()
_, prediction = model(data)
loss = torch.nn.CrossEntropyLoss(reduction='none')(prediction, label)
loss_list = np.concatenate((loss_list, loss.detach().cpu()))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(loss_list[kmeans.labels_==0]) > np.mean(loss_list[kmeans.labels_==1]):
clean_label = 1
else:
clean_label = 0
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def iterative_eigen(number, label_list, out_list, teacher_idx=None):
sin_lbls = {}
for i in range(number):
tmp_lbl = torch.zeros(50000)
if teacher_idx !=None:
for num in (set(range(0,50000)) - set(teacher_idx)):
tmp_lbl[num] += 1
print(tmp_lbl.sum().item())
for k in range(i):
tmp_lbl += sin_lbls[k]
singular_dict, v_ortho_dict = get_singular_value_vector(label_list[tmp_lbl==0], out_list[tmp_lbl==0])
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
sing_lbl, sin_score_lbl = singular_label(v_ortho_dict, out_list, label_list)
sin_lbls[i]=sing_lbl
if i>0 and torch.all(torch.eq(sin_lbls[i], sin_lbls[i-1])):
print(i)
break
if number ==1:
output=[]
for idx, value in enumerate(sing_lbl):
if value==0:
output.append(idx)
else:
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(sin_score_lbl[kmeans.labels_==0]) > np.mean(sin_score_lbl[kmeans.labels_==1]):
clean_label = 0
else:
clean_label = 1
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def get_out_list(model, data_loader):
label_list = np.empty((0,))
model.eval()
model.cuda()
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, _) in enumerate(progress):
data = data.cuda()
# label, label_gt = label.long().cuda(), label_gt.long().cuda()
label = label.long()
output, _ = model(data)
label_list = np.concatenate((label_list, label.cpu()))
if batch_idx == 0:
out_list = output.detach().cpu()
else:
out_list = np.concatenate((out_list, output.detach().cpu()), axis=0)
return label_list, out_list
def get_singular_value_vector(label_list, out_list):
singular_dict = {}
v_ortho_dict = {}
for index in np.unique(label_list):
u, s, v = np.linalg.svd(out_list[label_list==index])
singular_dict[index] = s[0] / s[1]
v_ortho_dict[index] = torch.from_numpy(v[:2])
return singular_dict, v_ortho_dict
def singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
if torch.dot(v_ortho_dict[label[i]][0], data).abs() < torch.dot(v_ortho_dict[label[i]][1], data).abs():
sing_lbl[i] = 1
return sing_lbl, sin_score_lbl
def kmean_singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_singular_label2(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() / torch.norm(data, p=2)
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_eigen_out(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label(v_ortho_dict, out_list, label_list)
return output
def topk_eigen_kmean(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label2(v_ortho_dict, out_list, label_list)
return output
def get_anchor(label_list, out_list, teacher_idx=None):
label_list = torch.from_numpy(label_list).long().numpy()
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
model_represents = torch.from_numpy(out_list).cuda()
sin_score_lbl = [[] for _ in range(len(np.unique(label_list)))]
for i, data in enumerate(model_represents):
sin_score_lbl[label_list[i]].append(torch.dot(v_ortho_dict[label_list[i]][0], data).abs())
# classwise topk
v_ortho_dict_ = {}
for index in np.unique(label_list):
cls_score_lbl = sin_score_lbl[index]
topk_v, topk_i = torch.topk(torch.tensor(cls_score_lbl), k=50)
u, s, v = np.linalg.svd(model_represents[label_list==index][topk_i].cpu().numpy())
v_ortho_dict_[index] = torch.from_numpy(v[0]).unsqueeze(0).cuda()
output = kmean_singular_label2(v_ortho_dict_, model_represents.cpu().numpy(), label_list)
return output
def isNoisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def extract_teacherIdx(teacher, data_loader, parse):
teacher.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(1,tea_label_list,tea_out_list)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(100,tea_label_list,tea_out_list)
elif parse.distill_mode == 'kmean_eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = kmean_eigen_out(tea_label_list, tea_out_list)
elif parse.distill_mode == 'topk_eigen_kmean':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = topk_eigen_kmean(tea_label_list, tea_out_list)
else:
teacher_idx = get_loss_list(teacher, data_loader)
print('||||||original||||||')
isNoisy_ratio(data_loader)
if parse.second_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(1,tea_label_list,tea_out_list,teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx2 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx2))
print('second_distillation')
if parse.third_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(1,tea_label_list,tea_out_list, teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx3 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx3))
print('third_ distillation')
return teacher_idx
# def get_loss_list_2d(model, data_loader, n_clusters=2, c_clusters=1):
# loss_list = np.empty((0, 2))
# model.cuda()
# with tqdm(data_loader) as progress:
# for batch_idx, (data, label, index, label_gt) in enumerate(progress):
# data = data.cuda()
# label, label_gt = label.long().cuda(), label_gt.long().cuda()
# _, pred = model(data)
# loss = torch.nn.CrossEntropyLoss(reduction='none')(pred, label)
# prob = torch.softmax(pred, dim=-1)
# top2_log_pred, top2_ind = torch.topk(torch.log(prob), k=n_clusters, dim=-1)
# is_pred_wrong = (top2_ind[:, 0] != label).bool()
# is_pred_correct = (top2_ind[:, 0] == label).bool()
# label_top1 = torch.stack([loss, -top2_log_pred[:, 0]], dim=1) # for pred wrong
# top2_log_pred = -top2_log_pred
# top2_log_pred[is_pred_wrong] = label_top1[is_pred_wrong]
# loss_list = np.concatenate((loss_list, top2_log_pred.detach().cpu().numpy()), axis=0)
# kmeans = cluster.KMeans(n_clusters=n_clusters, random_state=0).fit(loss_list.reshape(50000,2))
# mean_losses = []
# for itr in range(n_clusters):
# mean_losses.append(np.mean(loss_list[kmeans.labels_==itr][:, 0]))
# _, clean_labels = torch.topk(-torch.tensor(mean_losses), k=c_clusters)
# output=[]
# for idx, value in enumerate(kmeans.labels_):
# if value in clean_labels:
# output.append(idx)
# return output
| [
"torch.zeros",
"torch.dot",
"torch.eq",
"torch.norm",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.sort",
"torch.tensor",
"torch.load",
"torch.mean"
] | 1.8.0 | Kthyeon/FINE | ae8a24a4a2514feafd9a9ed394af87f397708ccf |
1.8 | # https://github.com/AlanChou/Truncated-Loss/blob/master/TruncatedLoss.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
__all__=['GCELoss', 'GCE_GTLoss']
class GCELoss(nn.Module):
def __init__(self, q=0.7, k=0.5, trainset_size=50000, truncated=False):
super().__init__()
self.q = q
self.k = k
self.truncated = truncated
self.weight = torch.nn.Parameter(data=torch.ones(trainset_size, 1), requires_grad=False)
def forward(self, logits, targets, indexes, mode=None):
p = F.softmax(logits, dim=1)
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
if self.truncated == True:
if mode == 'ce':
ce = nn.CrossEntropyLoss(reduction='none')
loss = ce(logits, targets)
loss = torch.mean(loss)
else:
loss = ((1-(Yg**self.q))/self.q)*self.weight[indexes] - ((1-(self.k**self.q))/self.q)*self.weight[indexes]
loss = torch.mean(loss)
else:
if mode == 'ce':
ce = nn.CrossEntropyLoss(reduction='none')
loss = ce(logits, targets)
loss = torch.mean(loss)
else:
loss = (1-(Yg**self.q))/self.q
loss = torch.mean(loss)
return loss
def update_weight(self, logits, targets, indexes):
p = F.softmax(logits, dim=1)
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
Lq = ((1-(Yg**self.q))/self.q)
Lqk = np.repeat(((1-(self.k**self.q))/self.q), targets.size(0))
Lqk = torch.from_numpy(Lqk).type(torch.cuda.FloatTensor)
Lqk = torch.unsqueeze(Lqk, 1)
condition = torch.gt(Lqk, Lq)
self.weight[indexes] = condition.type(torch.cuda.FloatTensor)
class GCE_GTLoss(GCELoss):
def __init__(self, q=0.7, k=0.5, trainset_size=50000, truncated=False):
super().__init__(q, k, trainset_size, truncated)
def forward(self, logits, targets, clean_indexs, index=None):
# index : redundant variable. This is only used in ELR.
p = F.softmax(logits, dim=1)
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
size = logits.shape[0] if torch.sum(clean_indexs) == 0 else torch.sum(clean_indexs)
# print (torch.mean(((1-(Yg**self.q))/self.q)))
loss = (1-(Yg**self.q))/self.q
loss = torch.sum(loss[clean_indexs]) / size
return loss | [
"torch.gt",
"torch.unsqueeze",
"torch.ones",
"torch.from_numpy",
"torch.nn.functional.softmax",
"torch.mean",
"torch.nn.CrossEntropyLoss",
"torch.sum"
] | 1.8.0 | Kthyeon/FINE | ae8a24a4a2514feafd9a9ed394af87f397708ccf |
1.1 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.layers import ConvNorm, LinearNorm
from common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x, inference=False):
if inference:
for linear in self.layers:
x = F.relu(linear(x))
x0 = x[0].unsqueeze(0)
mask = Variable(torch.bernoulli(x0.data.new(x0.data.size()).fill_(0.5)))
mask = mask.expand(x.size(0), x.size(1))
x = x*mask*2
else:
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions,
encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def infer(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
early_stopping):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels,
[prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim,
attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim,
decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2).contiguous()
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outputs: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_input_frames = []
z = int(decoder_inputs.size(2) / self.n_frames_per_step)
for i in range(self.n_frames_per_step):
decoder_input_frames.append(self.prenet(decoder_inputs[:, :, i*z:(i+1)*z]))
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_input_frames[0].size(0) - 1:
for input_frame in decoder_input_frames:
decoder_input = input_frame[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
gate_outputs += [gate_output.squeeze() if memory.shape[0] > 1 else gate_output]
alignments += [attention_weights]
mel_outputs += [mel_output.squeeze(1)]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def infer(self, memory):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32).cuda()
not_finished = torch.ones([memory.size(0)], dtype=torch.int32).cuda()
mel_outputs, gate_outputs, alignments = [], [], []
z = int(decoder_input.size(1) / self.n_frames_per_step)
while True:
decoder_input_frames = []
for i in range(self.n_frames_per_step):
decoder_input_frames.append(decoder_input[:, i * z:(i + 1) * z])
for input_frame in decoder_input_frames:
mel_output, gate_output, alignment = self.decode(self.prenet(input_frame))
gate_outputs += [gate_output]
alignments += [alignment]
mel_outputs += [mel_output.squeeze(1)]
dec = torch.le(torch.sigmoid(gate_output.data),
self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, mask_padding, n_mel_channels,
n_symbols, symbols_embedding_dim, n_speakers, speakers_embedding_dim,
use_emotions, n_emotions, emotions_embedding_dim,
encoder_kernel_size, encoder_n_convolutions, encoder_embedding_dim,
attention_rnn_dim, attention_dim, attention_location_n_filters,
attention_location_kernel_size, n_frames_per_step,
decoder_rnn_dim, prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
postnet_embedding_dim, postnet_kernel_size,
postnet_n_convolutions, decoder_no_early_stopping, **kwargs):
super(Tacotron2, self).__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.symbols_embedding = nn.Embedding(
n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.symbols_embedding.weight.data.uniform_(-val, val)
self.speakers_embedding = nn.Embedding(n_speakers, speakers_embedding_dim)
torch.nn.init.xavier_uniform_(self.speakers_embedding.weight)
self.encoder = Encoder(encoder_n_convolutions,
encoder_embedding_dim,
encoder_kernel_size)
encoder_out_embedding_dim = encoder_embedding_dim + speakers_embedding_dim
self.use_emotions = use_emotions
if self.use_emotions:
self.emotions_embedding = nn.Embedding(n_emotions, emotions_embedding_dim)
torch.nn.init.xavier_uniform_(self.emotions_embedding.weight)
encoder_out_embedding_dim += emotions_embedding_dim
self.decoder = Decoder(n_mel_channels, n_frames_per_step,
encoder_out_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps,
gate_threshold, p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping)
self.postnet = Postnet(n_mel_channels, postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, speaker_ids, emotion_ids = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
speaker_ids = to_gpu(speaker_ids).long()
emotion_ids = to_gpu(emotion_ids).long()
return ((text_padded, input_lengths, mel_padded, max_len, output_lengths, speaker_ids, emotion_ids),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
# Parse inputs
inputs, input_lengths, targets, max_len, output_lengths, speaker_ids, emotion_ids = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
# Outputs
outputs = []
# Get symbols encoder outputs
embedded_inputs = self.symbols_embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
outputs.append(encoder_outputs)
# Extract speaker embeddings
speaker_ids = speaker_ids.unsqueeze(1)
embedded_speakers = self.speakers_embedding(speaker_ids)
embedded_speakers = embedded_speakers.expand(-1, max_len, -1)
outputs.append(embedded_speakers)
# Extract emotion embeddings
if self.use_emotions:
emotion_ids = emotion_ids.unsqueeze(1)
embedded_emotions = self.emotions_embedding(emotion_ids)
embedded_emotions = embedded_emotions.expand(-1, max_len, -1)
outputs.append(embedded_emotions)
# Combine all embeddings embeddings
merged_outputs = torch.cat(outputs, -1)
mel_outputs, gate_outputs, alignments = self.decoder(
merged_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def infer(self, input, speaker_id, emotion_id=None):
# Outputs
outputs = []
# Get symbols encoder output
embedded_input = self.symbols_embedding(input).transpose(1, 2)
encoder_output = self.encoder.infer(embedded_input)
outputs.append(encoder_output)
# Get speaker embedding
speaker_id = speaker_id.unsqueeze(1)
embedded_speaker = self.speakers_embedding(speaker_id)
embedded_speaker = embedded_speaker.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_speaker)
# Extract emotion embeddings
if self.use_emotions:
emotion_id = emotion_id.unsqueeze(1)
embedded_emotion = self.emotions_embedding(emotion_id)
embedded_emotion = embedded_emotion.expand(-1, encoder_output.shape[1], -1)
outputs.append(embedded_emotion)
# Merge embeddings
merged_outputs = torch.cat(outputs, -1)
# Decode
mel_outputs, gate_outputs, alignments = self.decoder.infer(
merged_outputs)
# Post
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
# Parse
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
| [
"torch.sigmoid",
"torch.cat",
"torch.nn.LSTMCell",
"torch.stack",
"torch.nn.ModuleList",
"torch.max",
"torch.nn.functional.dropout",
"torch.nn.init.xavier_uniform_",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.BatchNorm1d",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.functional.softmax",
"torch.tanh",
"torch.nn.Embedding",
"torch.sum"
] | 1.1.0 | HudsonHuang/tacotron2 | fa55a0b633abe358e1258e1dc3b40d85e17b3450 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.