version
stringclasses 25
values | code
stringlengths 75
178k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 9
78
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.0 | ###############################################################################
# BSD 3-Clause License
#
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Author & Contact: Guilin Liu ([email protected])
###############################################################################
import torch
import torch.nn.functional as F
from torch import nn, cuda
from torch.autograd import Variable
class PartialConv2d(nn.Conv2d):
def __init__(self, *args, **kwargs):
# whether the mask is multi-channel or not
if "multi_channel" in kwargs:
self.multi_channel = kwargs["multi_channel"]
kwargs.pop("multi_channel")
else:
self.multi_channel = False
if "return_mask" in kwargs:
self.return_mask = kwargs["return_mask"]
kwargs.pop("return_mask")
else:
self.return_mask = False
super(PartialConv2d, self).__init__(*args, **kwargs)
if self.multi_channel:
self.weight_maskUpdater = torch.ones(
self.out_channels,
self.in_channels,
self.kernel_size[0],
self.kernel_size[1],
)
else:
self.weight_maskUpdater = torch.ones(
1, 1, self.kernel_size[0], self.kernel_size[1]
)
self.slide_winsize = (
self.weight_maskUpdater.shape[1]
* self.weight_maskUpdater.shape[2]
* self.weight_maskUpdater.shape[3]
)
self.last_size = (None, None, None, None)
self.update_mask = None
self.mask_ratio = None
def forward(self, input, mask_in=None):
assert len(input.shape) == 4
if mask_in is not None or self.last_size != tuple(input.shape):
self.last_size = tuple(input.shape)
with torch.no_grad():
if self.weight_maskUpdater.type() != input.type():
self.weight_maskUpdater = self.weight_maskUpdater.to(input)
if mask_in is None:
# if mask is not provided, create a mask
if self.multi_channel:
mask = torch.ones(
input.data.shape[0],
input.data.shape[1],
input.data.shape[2],
input.data.shape[3],
).to(input)
else:
mask = torch.ones(
1, 1, input.data.shape[2], input.data.shape[3]
).to(input)
else:
mask = mask_in
self.update_mask = F.conv2d(
mask,
self.weight_maskUpdater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
# for mixed precision training, change 1e-8 to 1e-6
self.mask_ratio = self.slide_winsize / (self.update_mask + 1e-8)
# self.mask_ratio = torch.max(self.update_mask)/(self.update_mask + 1e-8)
self.update_mask = torch.clamp(self.update_mask, 0, 1)
self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask)
raw_out = super(PartialConv2d, self).forward(
torch.mul(input, mask) if mask_in is not None else input
)
if self.bias is not None:
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view
output = torch.mul(output, self.update_mask)
else:
output = torch.mul(raw_out, self.mask_ratio)
if self.return_mask:
return output, self.update_mask
else:
return output
| [
"torch.mul",
"torch.no_grad",
"torch.clamp",
"torch.ones",
"torch.nn.functional.conv2d"
] | 1.0.0 | dukebw/MichiGAN | 3048e259dd2d368bb7a790a034e54d46f3da2a20 |
1.2 | import torch
from overrides import overrides
from allennlp.modules.span_extractors.span_extractor import SpanExtractor
from allennlp.modules.time_distributed import TimeDistributed
from allennlp.nn import util
@SpanExtractor.register("self_attentive")
class SelfAttentiveSpanExtractor(SpanExtractor):
"""
Computes span representations by generating an unnormalized attention score for each
word in the document. Spans representations are computed with respect to these
scores by normalising the attention scores for words inside the span.
Given these attention distributions over every span, this module weights the
corresponding vector representations of the words in the span by this distribution,
returning a weighted representation of each span.
Parameters
----------
input_dim : ``int``, required.
The final dimension of the ``sequence_tensor``.
Returns
-------
attended_text_embeddings : ``torch.FloatTensor``.
A tensor of shape (batch_size, num_spans, input_dim), which each span representation
is formed by locally normalising a global attention over the sequence. The only way
in which the attention distribution differs over different spans is in the set of words
over which they are normalized.
"""
def __init__(self,
input_dim: int) -> None:
super().__init__()
self._input_dim = input_dim
self._global_attention = TimeDistributed(torch.nn.Linear(input_dim, 1))
def get_input_dim(self) -> int:
return self._input_dim
def get_output_dim(self) -> int:
return self._input_dim
@overrides
def forward(self,
sequence_tensor: torch.FloatTensor,
span_indices: torch.LongTensor,
sequence_mask: torch.LongTensor = None,
span_indices_mask: torch.LongTensor = None) -> torch.FloatTensor:
dtype = sequence_tensor.dtype
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = span_indices.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# shape (batch_size, sequence_length, 1)
global_attention_logits = self._global_attention(sequence_tensor)
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = util.get_range_vector(max_batch_span_width,
util.get_device_of(sequence_tensor)).view(1, 1, -1)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = (max_span_range_indices <= span_widths).to(dtype)
raw_span_indices = span_ends - max_span_range_indices
# We also don't want to include span indices which are less than zero,
# which happens because some spans near the beginning of the sequence
# have an end index < max_batch_span_width, so we add this to the mask here.
span_mask = span_mask * (raw_span_indices >= 0).to(dtype)
span_indices = torch.nn.functional.relu(raw_span_indices.to(dtype)).long()
# Shape: (batch_size * num_spans * max_batch_span_width)
flat_span_indices = util.flatten_and_batch_shift_indices(span_indices, sequence_tensor.size(1))
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = util.batched_index_select(sequence_tensor, span_indices, flat_span_indices)
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_logits = util.batched_index_select(global_attention_logits,
span_indices,
flat_span_indices).squeeze(-1)
# Shape: (batch_size, num_spans, max_batch_span_width)
span_attention_weights = util.masked_softmax(span_attention_logits, span_mask,
memory_efficient=True,
mask_fill_value=-1000)
# Do a weighted sum of the embedded spans with
# respect to the normalised attention distributions.
# Shape: (batch_size, num_spans, embedding_dim)
attended_text_embeddings = util.weighted_sum(span_embeddings, span_attention_weights)
if span_indices_mask is not None:
# Above we were masking the widths of spans with respect to the max
# span width in the batch. Here we are masking the spans which were
# originally passed in as padding.
return attended_text_embeddings * span_indices_mask.unsqueeze(-1).to(dtype)
return attended_text_embeddings
| [
"torch.nn.Linear"
] | 1.2.0 | tkim135/allennlp | 397f46bd83e24ad8c40a9febd2b5be49583012a6 |
0.3 | import json
import os
import pickle
import re
import torch
from tqdm import tqdm
classes = {
'number':['0','1','2','3','4','5','6','7','8','9','10'],
'material':['rubber','metal'],
'color':['cyan','blue','yellow','purple','red','green','gray','brown'],
'shape':['sphere','cube','cylinder'],
'size':['large','small'],
'exist':['yes','no']
}
def build_dictionaries(clevr_dir):
def compute_class(answer):
for name,values in classes.items():
if answer in values:
return name
raise ValueError('Answer {} does not belong to a known class'.format(answer))
cached_dictionaries = os.path.join(clevr_dir, 'questions', 'CLEVR_built_dictionaries.pkl')
if os.path.exists(cached_dictionaries):
print('==> using cached dictionaries: {}'.format(cached_dictionaries))
with open(cached_dictionaries, 'rb') as f:
return pickle.load(f)
quest_to_ix = {}
answ_to_ix = {}
answ_ix_to_class = {}
json_train_filename = os.path.join(clevr_dir, 'questions', 'CLEVR_train_questions.json')
#load all words from all training data
with open(json_train_filename, "r") as f:
questions = json.load(f)['questions']
for q in tqdm(questions):
question = tokenize(q['question'])
answer = q['answer']
#pdb.set_trace()
for word in question:
if word not in quest_to_ix:
quest_to_ix[word] = len(quest_to_ix)+1 #one based indexing; zero is reserved for padding
a = answer.lower()
if a not in answ_to_ix:
ix = len(answ_to_ix)+1
answ_to_ix[a] = ix
answ_ix_to_class[ix] = compute_class(a)
ret = (quest_to_ix, answ_to_ix, answ_ix_to_class)
with open(cached_dictionaries, 'wb') as f:
pickle.dump(ret, f)
return ret
def to_dictionary_indexes(dictionary, sentence):
"""
Outputs indexes of the dictionary corresponding to the words in the sequence.
Case insensitive.
"""
split = tokenize(sentence)
idxs = torch.LongTensor([dictionary[w] for w in split])
return idxs
def collate_samples_from_pixels(batch):
return collate_samples(batch, False, False)
def collate_samples_state_description(batch):
return collate_samples(batch, True, False)
def collate_samples_images_state_description(batch):
return collate_samples(batch, True, True)
def collate_samples(batch, state_description, only_images):
"""
Used by DatasetLoader to merge together multiple samples into one mini-batch.
"""
batch_size = len(batch)
if only_images:
images = batch
else:
images = [d['image'] for d in batch]
answers = [d['answer'] for d in batch]
questions = [d['question'] for d in batch]
# questions are not fixed length: they must be padded to the maximum length
# in this batch, in order to be inserted in a tensor
max_len = max(map(len, questions))
padded_questions = torch.LongTensor(batch_size, max_len).zero_()
for i, q in enumerate(questions):
padded_questions[i, :len(q)] = q
if state_description:
max_len = 12
#even object matrices should be padded (they are variable length)
padded_objects = torch.FloatTensor(batch_size, max_len, images[0].size()[1]).zero_()
for i, o in enumerate(images):
padded_objects[i, :o.size()[0], :] = o
images = padded_objects
if only_images:
collated_batch = torch.stack(images)
else:
collated_batch = dict(
image=torch.stack(images),
answer=torch.stack(answers),
question=torch.stack(padded_questions)
)
return collated_batch
def tokenize(sentence):
# punctuation should be separated from the words
s = re.sub('([.,;:!?()])', r' \1 ', sentence)
s = re.sub('\s{2,}', ' ', s)
# tokenize
split = s.split()
# normalize all words to lowercase
lower = [w.lower() for w in split]
return lower
def load_tensor_data(data_batch, cuda, invert_questions, volatile=False):
# prepare input
var_kwargs = dict(volatile=True) if volatile else dict(requires_grad=False)
qst = data_batch['question']
if invert_questions:
# invert question indexes in this batch
qst_len = qst.size()[1]
qst = qst.index_select(1, torch.arange(qst_len - 1, -1, -1).long())
img = torch.autograd.Variable(data_batch['image'], **var_kwargs)
qst = torch.autograd.Variable(qst, **var_kwargs)
label = torch.autograd.Variable(data_batch['answer'], **var_kwargs)
if cuda:
img, qst, label = img.cuda(), qst.cuda(), label.cuda()
label = (label - 1).squeeze(1)
return img, qst, label
| [
"torch.autograd.Variable",
"torch.LongTensor",
"torch.stack",
"torch.arange"
] | 0.3.1 | mesnico/RelationNetworks-CLEVR | b8e0e7af12408877c8a18d8f2802d88138605983 |
1.3 | # Copyright 2018 Dong-Hyun Lee, Kakao Brain.
# (Strongly inspired by original Google BERT code and Hugging Face's code)
""" Fine-tuning on A Classification Task with pretrained Transformer """
import itertools
import csv
import os
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch import optim
import tokenization
import models
# import optim
import train
import argparse
from utils import set_seeds, get_device, truncate_tokens_pair
class CsvDataset(Dataset):
""" Dataset Class for CSV file """
labels = None
def __init__(self, file, pipeline=[]): # cvs file and pipeline object
Dataset.__init__(self)
data = []
with open(file, "r") as f:
# list of splitted lines : line is also list
lines = csv.reader(f, delimiter='\t', quotechar=None)
for instance in self.get_instances(lines): # instance : tuple of fields
for proc in pipeline: # a bunch of pre-processing
instance = proc(instance)
data.append(instance)
# To Tensors
self.tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*data)]
def __len__(self):
return self.tensors[0].size(0)
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def get_instances(self, lines):
""" get instance array from (csv-separated) line list """
raise NotImplementedError
class MRPC(CsvDataset):
""" Dataset class for MRPC """
labels = ("0", "1") # label names
def __init__(self, file, pipeline=[]):
super().__init__(file, pipeline)
def get_instances(self, lines):
for line in itertools.islice(lines, 1, None): # skip header
yield line[0], line[3], line[4] # label, text_a, text_b
class MNLI(CsvDataset):
""" Dataset class for MNLI """
labels = ("contradiction", "entailment", "neutral") # label names
def __init__(self, file, pipeline=[]):
super().__init__(file, pipeline)
def get_instances(self, lines):
for line in itertools.islice(lines, 1, None): # skip header
yield line[-1], line[8], line[9] # label, text_a, text_b
class SST(CsvDataset):
""" Dataset class for SST """
labels = ("0", "1") # label names
def __init__(self, file='data/', pipeline=[]):
super().__init__(file, pipeline)
def get_instances(self, lines):
for line in itertools.islice(lines, 1, None): # skip header
yield line[-1], line[0], None # label, text_a, text_b
def dataset_class(task):
""" Mapping from task string to Dataset Class """
table = {'mrpc': MRPC, 'sst': SST}
return table[task]
class Pipeline():
""" Preprocess Pipeline Class : callable """
def __init__(self):
super().__init__()
def __call__(self, instance):
raise NotImplementedError
class Tokenizing(Pipeline):
""" Tokenizing sentence pair """
def __init__(self, preprocessor, tokenize):
super().__init__()
self.preprocessor = preprocessor # e.g. text normalization
self.tokenize = tokenize # tokenize function
def __call__(self, instance):
label, text_a, text_b = instance
label = self.preprocessor(label)
tokens_a = self.tokenize(self.preprocessor(text_a))
tokens_b = self.tokenize(self.preprocessor(text_b)) \
if text_b else []
return (label, tokens_a, tokens_b)
class AddSpecialTokensWithTruncation(Pipeline):
""" Add special tokens [CLS], [SEP] with truncation """
def __init__(self, max_len=512):
super().__init__()
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
# -3 special tokens for [CLS] text_a [SEP] text_b [SEP]
# -2 special tokens for [CLS] text_a [SEP]
_max_len = self.max_len - 3 if tokens_b else self.max_len - 2
truncate_tokens_pair(tokens_a, tokens_b, _max_len)
# Add Special Tokens
tokens_a = ['[CLS]'] + tokens_a + ['[SEP]']
tokens_b = tokens_b + ['[SEP]'] if tokens_b else []
return (label, tokens_a, tokens_b)
class TokenIndexing(Pipeline):
""" Convert tokens into token indexes and do zero-padding """
def __init__(self, indexer, labels, max_len=512):
super().__init__()
self.indexer = indexer # function : tokens to indexes
# map from a label name to a label index
self.label_map = {name: i for i, name in enumerate(labels)}
self.max_len = max_len
def __call__(self, instance):
label, tokens_a, tokens_b = instance
input_ids = self.indexer(tokens_a + tokens_b)
segment_ids = [0]*len(tokens_a) + [1]*len(tokens_b) # token type ids
input_mask = [1]*(len(tokens_a) + len(tokens_b))
label_id = self.label_map[label]
# zero padding
n_pad = self.max_len - len(input_ids)
input_ids.extend([0]*n_pad)
segment_ids.extend([0]*n_pad)
input_mask.extend([0]*n_pad)
return (input_ids, segment_ids, input_mask, label_id)
class Classifier(nn.Module):
""" Classifier with Transformer """
def __init__(self, cfg, n_labels):
super().__init__()
self.transformer = models.Transformer(cfg)
self.fc = nn.Linear(cfg.hidden, cfg.hidden)
self.activ = nn.ReLU()
self.drop = nn.Dropout(cfg.p_drop_hidden)
self.pool = nn.AdaptiveMaxPool1d(1)
self.classifier = nn.Linear(cfg.hidden, n_labels)
def forward(self, input_ids, segment_ids, input_mask):
h = self.transformer(input_ids, segment_ids, input_mask)
# only use the first h in the sequence
h = self.pool(h.transpose(1, 2)).squeeze(-1)
pooled_h = self.activ(self.fc(h))
logits = self.classifier(self.drop(pooled_h))
return logits
#pretrain_file='../uncased_L-12_H-768_A-12/bert_model.ckpt',
#pretrain_file='../exp/bert/pretrain_100k/model_epoch_3_steps_9732.pt',
def main(task='mrpc',
train_cfg='config/train_mrpc.json',
model_cfg='config/albert_base.json',
data_file='./data/MRPC/train.tsv',
model_file=None,
pretrain_file=None,
data_parallel=True,
vocab='./data/vocab.txt',
save_dir='./saved/mrpc',
max_len=128,
mode='train'):
cfg = train.Config.from_json(train_cfg)
model_cfg = models.Config.from_json(model_cfg)
set_seeds(cfg.seed)
tokenizer = tokenization.FullTokenizer(vocab_file=vocab, do_lower_case=True)
TaskDataset = dataset_class(task) # task dataset class according to the task
pipeline = [Tokenizing(tokenizer.convert_to_unicode, tokenizer.tokenize),
AddSpecialTokensWithTruncation(max_len),
TokenIndexing(tokenizer.convert_tokens_to_ids,
TaskDataset.labels, max_len)]
dataset = TaskDataset(data_file, pipeline)
data_iter = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=True)
model = Classifier(model_cfg, len(TaskDataset.labels))
model.train()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=cfg.lr, betas=(0.9, 0.999), eps=1e-06)
trainer = train.Trainer(cfg,
model,
data_iter,
optimizer,
save_dir, get_device())
if mode == 'train':
def get_loss(model, batch, global_step): # make sure loss is a scalar tensor
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
loss = criterion(logits, label_id)
return loss
trainer.train(get_loss, model_file, pretrain_file, data_parallel)
elif mode == 'eval':
def evaluate(model, batch):
input_ids, segment_ids, input_mask, label_id = batch
logits = model(input_ids, segment_ids, input_mask)
_, label_pred = logits.max(1)
result = (label_pred == label_id).float() #.cpu().numpy()
accuracy = result.mean()
return accuracy, result
results = trainer.eval(evaluate, model_file, data_parallel)
total_accuracy = torch.cat(results).mean().item()
print('\n-----------------------------')
print('[{}] Accuracy: {:.3f}'.format(task, total_accuracy))
print('-----------------------------\n')
if __name__ == '__main__':
'''
mrpc : 76.6/83.0
random weight init
Accuracy: 0.701
electra : 150000
Accuracy: 0.701
masked : 200000
Accuracy: 0.699
'''
parser = argparse.ArgumentParser(description='GLUE Score')
parser.add_argument('--pretrain', type=str, default='./saved/d_model_steps_150000.pt')
parser.add_argument('--task', type=str, default='mrpc', choices=['mrpc', 'sst'])
parser.add_argument('--cfg', type=str, default='config/train_mrpc.json')
parser.add_argument('--train', type=str, default='./data/MRPC/train.tsv')
parser.add_argument('--eval', type=str, default='./data/MRPC/dev.tsv')
args = parser.parse_args()
cfg = train.Config.from_json(args.cfg)
os.makedirs(os.path.join('saved', args.task), exist_ok=True)
main(mode='train',
task=args.task,
# pretrain_file=args.pretrain,
data_file=args.train,
train_cfg=args.cfg,
)
main(mode='eval',
task=args.task,
data_file=args.eval,
model_file='saved/{}/model_steps_{}.pt'.format(args.task, cfg.total_steps))
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.utils.data.Dataset.__init__",
"torch.nn.ReLU",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss",
"torch.nn.AdaptiveMaxPool1d"
] | 1.3.1 | theblackcat102/ALBERT-Pytorch | eebf3465cbc82c643dbe561b480bed0116f34d21 |
1.1 | import torch.optim as optim
from models import build_dual_model
from dataset import MetricLearningDataset
from torch.utils.data import DataLoader
from augmentation import transform_train, transform_test
from torch.autograd import Variable
import math
import torch
import numpy as np
from trainer.trainer import compute_knn
from loss import CenterBatchCriterion, ClusterCriterion, ReconstructCriterion
import matplotlib.pyplot as plt
# deterministic behaviour
torch.manual_seed(1024)
torch.cuda.manual_seed(1024)
torch.backends.cudnn.benchmark = True
np.random.seed(1024)
device = 'cuda:0'
train_set = MetricLearningDataset('data', train=True, transform=transform_train)
trn_loader = DataLoader(train_set, batch_size=32, shuffle=True, num_workers=4, drop_last=True)
model = build_dual_model('default', pretrained=True, low_dim=128, n_cluster=75).to(device)
# state = torch.load('checkpoint/test_val_loss/Sep-27-16_59_12/model_best.15.pth')
state = torch.load('new_checkpoint/test/Sep-28-22_58_44/model_best.12.pth')
model.load_state_dict(state['model'])
optimizer = optim.SGD(model.parameters(), lr=1e-1)
optimizer.load_state_dict(state['optimizer'])
print(optimizer.param_groups[0]['lr'])
ml_criterion = CenterBatchCriterion(1, 0.1, 64, 1)
ml_criterion.to(device)
rim_criterion = ClusterCriterion(1)
rim_criterion.to(device)
recon_criterion = ReconstructCriterion()
recon_criterion.to(device)
def find_lr(init_value = 1e-8, final_value=1e-1, beta = 0.98):
num = len(trn_loader)-1
mult = (final_value / init_value) ** (1/num)
lr = init_value
optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
batch_num = 0
losses = []
log_lrs = []
# generate positive index
train_set = trn_loader.dataset
n_data = len(train_set)
temp_loader = torch.utils.data.DataLoader(train_set, batch_size=100, shuffle=False, num_workers=4)
train_set.transform = transform_test
labels = np.zeros(n_data)
model.mode = 'pool'
out_index = 1
feat_dim = 1024
features = torch.zeros(n_data, feat_dim)
labels = torch.Tensor(labels)
ptr = 0
with torch.no_grad():
for batch_idx, (inputs, _, _) in enumerate(temp_loader):
batch_size = inputs.size(0)
real_size = min(batch_size, 100)
inputs = inputs.to('cuda:0')
batch_feat = model(inputs)[out_index]
features[ptr:ptr + real_size, :] = batch_feat.cpu()
ptr += 100
train_set.transform = transform_train
model.mode = 'normal'
# select nn Index
dist_feat = np.array(torch.mm(features, features.t()))
nn_index = compute_knn(dist_feat, labels, knn=1, epoch=1)
train_set.nnIndex = nn_index
for inputs1, inputs2, targets in trn_loader:
batch_num += 1
#As before, get the loss for this mini-batch of inputs/outputs
inputs1, inputs2, targets = inputs1.to(device), inputs2.to(device), targets.to(device)
targets = targets.repeat(2)
inputs = torch.cat((inputs1, inputs2), 0)
optimizer.zero_grad()
repr, cluster, emb = model(inputs)
# Total loss
rim_loss = rim_criterion(cluster)
pred_cluster = torch.argmax(torch.softmax(cluster, dim=1), dim=1)
unique_cluster = torch.unique(pred_cluster)
centroid_embedding = torch.zeros(len(unique_cluster), 1024, 7, 7).to(device)
index = pred_cluster == unique_cluster.view(-1, 1)
for i in range(len(index)):
centroid_embedding[i] = torch.mean(emb[index[i]], dim=0)
emb_index = torch.argmax(unique_cluster == pred_cluster.view(-1, 1), dim=1)
model.feat_ext.eval()
x = model.flatten(centroid_embedding.detach().to(device))
x = model.feat_ext(x)
centroid_repr = model.l2norm(x)
model.feat_ext.train()
ml_loss = ml_criterion(repr, centroid_repr, pred_cluster)
# metric_loss = self.ml_criterion(repr)
loss = ml_loss + 0.1 * rim_loss
# loss = recon_loss + ml_loss + rim_loss
#Compute the smoothed loss
avg_loss = beta * avg_loss + (1-beta) * loss.item()
smoothed_loss = avg_loss / (1 - beta**batch_num)
#Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
return log_lrs, losses
#Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
#Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
#Do the SGD step
loss.backward()
optimizer.step()
#Update the lr for the next step
lr *= mult
optimizer.param_groups[0]['lr'] = lr
return log_lrs, losses
logs, losses = find_lr()
plt.plot(logs[10:-5],losses[10:-5])
plt.show()
print(losses) | [
"torch.zeros",
"torch.cat",
"torch.cuda.manual_seed",
"torch.unique",
"torch.no_grad",
"torch.softmax",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.load",
"torch.Tensor",
"torch.mean"
] | 1.1.0 | aioz-ai/BMVC20_CBSwR | fd24336c3cba0b85c0fa2482bf82409457534266 |
1.4 | from collections import namedtuple
import os
from ding.torch_utils.data_helper import to_device, to_dtype, to_tensor
import torch
from torchvision import transforms
import numpy as np
from typing import Dict, List, Any, Optional
from .base_carla_policy import BaseCarlaPolicy
from core.models import PIDController, CustomController
from core.models.lbc_model import LBCBirdviewModel, LBCImageModel
from core.utils.model_utils import common
from ding.utils.data import default_collate, default_decollate
STEPS = 5
SPEED_STEPS = 3
COMMANDS = 4
class LBCBirdviewPolicy(BaseCarlaPolicy):
"""
LBC driving policy with Bird-eye View inputs. It has an LBC NN model which can handle
observations from several environments by collating data into batch. Each environment
has a PID controller related to it to get final control signals. In each updating, all
envs should use the correct env id to make the PID controller works well, and the
controller should be reset when starting a new episode.
:Arguments:
- cfg (Dict): Config Dict.
:Interfaces:
reset, forward
"""
config = dict(
model=dict(cuda=True, backbone='resnet18', input_channel=7, all_branch=False),
steer_points=None,
pid=None,
gap=5,
dt=0.1,
crop_size=192,
pixels_per_meter=5,
)
def __init__(self, cfg: dict) -> None:
super().__init__(cfg, enable_field=set(['eval', 'learn']))
self._controller_dict = dict()
if self._cfg.model.cuda:
if not torch.cuda.is_available():
print('[POLICY] No cuda device found! Use cpu by default')
self._device = torch.device('cpu')
else:
self._device = torch.device('cuda')
else:
self._device = torch.device('cpu')
self._one_hot = torch.FloatTensor(torch.eye(4))
self._transform = transforms.ToTensor()
self._gap = self._cfg.gap
self._dt = self._cfg.dt
self._crop_size = self._cfg.crop_size
self._pixels_per_meter = self._cfg.pixels_per_meter
self._steer_points = self._cfg.steer_points
self._pid = self._cfg.pid
if self._steer_points is None:
self._steer_points = {"1": 3, "2": 2, "3": 2, "4": 2}
if self._pid is None:
self._pid = {
"1": {
"Kp": 1.0,
"Ki": 0.1,
"Kd": 0
}, # Left
"2": {
"Kp": 1.0,
"Ki": 0.1,
"Kd": 0
}, # Right
"3": {
"Kp": 0.8,
"Ki": 0.1,
"Kd": 0
}, # Straight
"4": {
"Kp": 0.8,
"Ki": 0.1,
"Kd": 0
}, # Follow
}
self._speed_control_func = lambda: PIDController(K_P=1.0, K_I=0.1, K_D=2.5)
self._turn_control_func = lambda: CustomController(self._pid)
self._model = LBCBirdviewModel(
self._cfg.model.backbone, self._cfg.model.input_channel, self._cfg.model.all_branch
)
self._model.to(self._device)
def _postprocess(self, steer, throttle, brake):
control = {}
control.update(
{
'steer': np.clip(steer, -1.0, 1.0),
'throttle': np.clip(throttle, 0.0, 1.0),
'brake': np.clip(brake, 0.0, 1.0),
}
)
return control
def _reset_single(self, data_id):
if data_id in self._controller_dict:
self._controller_dict.pop(data_id)
self._controller_dict[data_id] = (self._speed_control_func(), self._turn_control_func())
def _reset(self, data_ids: Optional[List[int]] = None) -> None:
if data_ids is not None:
for id in data_ids:
self._reset_single(id)
else:
for id in self._controller_dict:
self._reset_single(id)
def _forward_eval(self, data: Dict) -> Dict[str, Any]:
"""
Running forward to get control signal of `eval` mode.
:Arguments:
- data (Dict): Input dict, with env id in keys and related observations in values,
:Returns:
Dict: Control and waypoints dict stored in values for each provided env id.
"""
data_ids = list(data.keys())
data = default_collate(list(data.values()))
birdview = to_dtype(data['birdview'], dtype=torch.float32).permute(0, 3, 1, 2)
speed = data['speed']
command_index = [i.item() - 1 for i in data['command']]
command = self._one_hot[command_index]
if command.ndim == 1:
command = command.unsqueeze(0)
with torch.no_grad():
_birdview = birdview.to(self._device)
_speed = speed.to(self._device)
_command = command.to(self._device)
if self._model._all_branch:
_locations, _ = self._model(_birdview, _speed, _command)
else:
_locations = self._model(_birdview, _speed, _command)
_locations = _locations.detach().cpu().numpy()
map_locations = _locations
actions = {}
for index, data_id in enumerate(data_ids):
# Pixel coordinates.
map_location = map_locations[index, ...]
map_location = (map_location + 1) / 2 * self._crop_size
targets = list()
for i in range(STEPS):
pixel_dx, pixel_dy = map_location[i]
pixel_dx = pixel_dx - self._crop_size / 2
pixel_dy = self._crop_size - pixel_dy
angle = np.arctan2(pixel_dx, pixel_dy)
dist = np.linalg.norm([pixel_dx, pixel_dy]) / self._pixels_per_meter
targets.append([dist * np.cos(angle), dist * np.sin(angle)])
target_speed = 0.0
for i in range(1, SPEED_STEPS):
pixel_dx, pixel_dy = map_location[i]
prev_dx, prev_dy = map_location[i - 1]
dx = pixel_dx - prev_dx
dy = pixel_dy - prev_dy
delta = np.linalg.norm([dx, dy])
target_speed += delta / (self._pixels_per_meter * self._gap * self._dt) / (SPEED_STEPS - 1)
_cmd = data['command'][index].item()
_sp = data['speed'][index].item()
n = self._steer_points.get(str(_cmd), 1)
targets = np.concatenate([[[0, 0]], targets], 0)
c, r = ls_circle(targets)
closest = common.project_point_to_circle(targets[n], c, r)
v = [1.0, 0.0, 0.0]
w = [closest[0], closest[1], 0.0]
alpha = common.signed_angle(v, w)
steer = self._controller_dict[data_id][1].run_step(alpha, _cmd)
throttle = self._controller_dict[data_id][0].step(target_speed - _sp)
brake = 0.0
if target_speed < 1.0:
steer = 0.0
throttle = 0.0
brake = 1.0
control = self._postprocess(steer, throttle, brake)
control.update({'map_locations': map_location})
actions[data_id] = {'action': control}
return actions
def _reset_eval(self, data_ids: Optional[List[int]] = None) -> None:
"""
Reset policy of `eval` mode. It will change the NN model into 'eval' mode and reset
the controllers in providded env id.
:Arguments:
- data_id (List[int], optional): List of env id to reset. Defaults to None.
"""
self._model.eval()
self._reset(data_ids)
class LBCImagePolicy(BaseCarlaPolicy):
"""
LBC driving policy with RGB image inputs. It has an LBC NN model which can handle
observations from several environments by collating data into batch. Each environment
has a PID controller related to it to get final control signals. In each updating, all
envs should use the correct env id to make the PID controller works well, and the
controller should be reset when starting a new episode.
:Arguments:
- cfg (Dict): Config Dict.
:Interfaces:
reset, forward
"""
config = dict(
model=dict(cuda=True, backbone='resnet34', all_branch=False),
camera_args=dict(
fixed_offset=4.0,
fov=90,
h=160,
w=384,
world_y=1.4,
),
steer_points=None,
pid=None,
gap=5,
dt=0.1,
)
def __init__(self, cfg: Dict) -> None:
super().__init__(cfg, enable_field=set(['eval', 'learn']))
self._controller_dict = dict()
if self._cfg.model.cuda:
if not torch.cuda.is_available():
print('[POLICY] No cuda device found! Use cpu by default')
self._device = torch.device('cpu')
else:
self._device = torch.device('cuda')
else:
self._device = torch.device('cpu')
self._one_hot = torch.FloatTensor(torch.eye(4))
self._transform = transforms.ToTensor()
self._camera_args = self._cfg.camera_args
self._fixed_offset = self._camera_args.fixed_offset
w = float(self._camera_args.w)
h = float(self._camera_args.h)
self._img_size = np.array([w, h])
self._gap = self._cfg.gap
self._dt = self._cfg.dt
self._steer_points = self._cfg.steer_points
self._pid = self._cfg.pid
if self._steer_points is None:
self._steer_points = {"1": 4, "2": 3, "3": 2, "4": 2}
if self._pid is None:
self._pid = {
"1": {
"Kp": 0.5,
"Ki": 0.20,
"Kd": 0.0
},
"2": {
"Kp": 0.7,
"Ki": 0.10,
"Kd": 0.0
},
"3": {
"Kp": 1.0,
"Ki": 0.10,
"Kd": 0.0
},
"4": {
"Kp": 1.0,
"Ki": 0.50,
"Kd": 0.0
}
}
self._speed_control_func = lambda: PIDController(K_P=.8, K_I=.08, K_D=0.)
self._turn_control_func = lambda: CustomController(self._pid)
self._engine_brake_threshold = 2.0
self._brake_threshold = 2.0
self._model = LBCImageModel(self._cfg.model.backbone, False, all_branch=self._cfg.model.all_branch)
self._model = self._model.to(self._device)
def _reset_single(self, data_id):
if data_id in self._controller_dict:
self._controller_dict.pop(data_id)
self._controller_dict[data_id] = (self._speed_control_func(), self._turn_control_func())
def _reset(self, data_ids: Optional[List[int]] = None) -> None:
if data_ids is not None:
for id in data_ids:
self._reset_single(id)
else:
for id in self._controller_dict:
self._reset_single(id)
def _postprocess(self, steer, throttle, brake):
control = {}
control.update(
{
'steer': np.clip(steer, -1.0, 1.0),
'throttle': np.clip(throttle, 0.0, 1.0),
'brake': np.clip(brake, 0.0, 1.0),
}
)
return control
def _unproject(self, output, world_y=1.4, fov=90):
cx, cy = self._img_size / 2
w, h = self._img_size
f = w / (2 * np.tan(fov * np.pi / 360))
xt = (output[..., 0:1] - cx) / f
yt = (output[..., 1:2] - cy) / f
world_z = world_y / yt
world_x = world_z * xt
world_output = np.stack([world_x, world_z], axis=-1)
if self._fixed_offset:
world_output[..., 1] -= self._fixed_offset
world_output = world_output.squeeze()
return world_output
def _forward_eval(self, data: Dict) -> Dict:
"""
Running forward to get control signal of `eval` mode.
:Arguments:
- data (Dict): Input dict, with env id in keys and related observations in values,
:Returns:
Dict: Control and waypoints dict stored in values for each provided env id.
"""
data_ids = list(data.keys())
data = default_collate(list(data.values()))
rgb = to_dtype(data['rgb'], dtype=torch.float32).permute(0, 3, 1, 2)
speed = data['speed']
command_index = [i.item() - 1 for i in data['command']]
command = self._one_hot[command_index]
if command.ndim == 1:
command = command.unsqueeze(0)
with torch.no_grad():
_rgb = rgb.to(self._device)
_speed = speed.to(self._device)
_command = command.to(self._device)
if self._model.all_branch:
model_pred, _ = self._model(_rgb, _speed, _command)
else:
model_pred = self._model(_rgb, _speed, _command)
model_pred = model_pred.detach().cpu().numpy()
pixels_pred = model_pred
actions = {}
for index, data_id in enumerate(data_ids):
# Project back to world coordinate
pixel_pred = pixels_pred[index, ...]
pixel_pred = (pixel_pred + 1) * self._img_size / 2
world_pred = self._unproject(pixel_pred, self._camera_args.world_y, self._camera_args.fov)
targets = [(0, 0)]
for i in range(STEPS):
pixel_dx, pixel_dy = world_pred[i]
angle = np.arctan2(pixel_dx, pixel_dy)
dist = np.linalg.norm([pixel_dx, pixel_dy])
targets.append([dist * np.cos(angle), dist * np.sin(angle)])
targets = np.array(targets)
target_speed = np.linalg.norm(targets[:-1] - targets[1:], axis=1).mean() / (self._gap * self._dt)
_cmd = data['command'][index].item()
_sp = data['speed'][index].item()
c, r = ls_circle(targets)
n = self._steer_points.get(str(_cmd), 1)
closest = common.project_point_to_circle(targets[n], c, r)
v = [1.0, 0.0, 0.0]
w = [closest[0], closest[1], 0.0]
alpha = common.signed_angle(v, w)
steer = self._controller_dict[data_id][1].run_step(alpha, _cmd)
throttle = self._controller_dict[data_id][0].step(target_speed - _sp)
brake = 0.0
# Slow or stop.
if target_speed <= self._engine_brake_threshold:
steer = 0.0
throttle = 0.0
if target_speed <= self._brake_threshold:
brake = 1.0
control = self._postprocess(steer, throttle, brake)
control.update({'map_locations': pixels_pred})
actions[data_id] = {'action': control}
return actions
def _reset_eval(self, data_ids: Optional[List[int]]) -> None:
"""
Reset policy of `eval` mode. It will change the NN model into 'eval' mode and reset
the controllers in providded env id.
:Arguments:
- data_id (List[int], optional): List of env id to reset. Defaults to None.
"""
self._model.eval()
self._reset(data_ids)
def ls_circle(points):
'''
Input: Nx2 points
Output: cx, cy, r
'''
xs = points[:, 0]
ys = points[:, 1]
us = xs - np.mean(xs)
vs = ys - np.mean(ys)
Suu = np.sum(us ** 2)
Suv = np.sum(us * vs)
Svv = np.sum(vs ** 2)
Suuu = np.sum(us ** 3)
Suvv = np.sum(us * vs * vs)
Svvv = np.sum(vs ** 3)
Svuu = np.sum(vs * us * us)
A = np.array([[Suu, Suv], [Suv, Svv]])
b = np.array([1 / 2. * Suuu + 1 / 2. * Suvv, 1 / 2. * Svvv + 1 / 2. * Svuu])
cx, cy = np.linalg.solve(A, b)
r = np.sqrt(cx * cx + cy * cy + (Suu + Svv) / len(xs))
cx += np.mean(xs)
cy += np.mean(ys)
return np.array([cx, cy]), r
| [
"torch.device",
"torch.no_grad",
"torch.eye",
"torch.cuda.is_available"
] | 1.4 | timothijoe/DI-drive | 3cddefc85bbbca6bcdd8a4d796decacaf8d81778 |
1.4 | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, input_channel=7, num_classes=1000, zero_init_residual=False, bias_first=True):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(input_channel, 64, kernel_size=7, stride=2, padding=3, bias=bias_first)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNetv2(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNetv2, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(2)
# TODO: THis is a super hardcoding ..., in order to fit my image size on resnet
if block.__name__ == 'Bottleneck':
self.fc = nn.Linear(6144, num_classes)
else:
self.fc = nn.Linear(1536, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x0 = self.maxpool(x)
x1 = self.layer1(x0)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x = self.avgpool(x4)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, [x0, x1, x2, x3, x4] # output, intermediate
def get_layers_features(self, x):
# Just get the intermediate layers directly.
x = self.conv1(x)
x = self.bn1(x)
x0 = self.relu(x)
x = self.maxpool(x0)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
x5 = self.avgpool(x4)
x = x5.view(x.size(0), -1)
x = self.fc(x)
all_layers = [x0, x1, x2, x3, x4, x5, x]
return all_layers
model_funcs = {
'resnet18': (BasicBlock, [2, 2, 2, 2], -1),
'resnet34': (BasicBlock, [3, 4, 6, 3], 512),
'resnet50': (Bottleneck, [3, 4, 6, 3], -1),
'resnet101': (Bottleneck, [3, 4, 23, 3], -1),
'resnet152': (Bottleneck, [3, 8, 36, 3], -1),
}
def get_resnet(model_name='resnet18', pretrained=False, **kwargs):
block, layers, c_out = model_funcs[model_name]
model = ResNet(block, layers, **kwargs)
if pretrained and kwargs.get('input_channel', 3) == 3:
url = model_urls[model_name]
print("Loading ResNet weights from : %s" % url)
model.load_state_dict(model_zoo.load_url(url))
return model, c_out
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetv2(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model_dict = model_zoo.load_url(model_urls['resnet18'])
# remove the fc layers
del model_dict['fc.weight']
del model_dict['fc.bias']
state = model.state_dict()
state.update(model_dict)
model.load_state_dict(state)
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetv2(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model_dict = model_zoo.load_url(model_urls['resnet34'])
# remove the fc layers
del model_dict['fc.weight']
del model_dict['fc.bias']
state = model.state_dict()
state.update(model_dict)
model.load_state_dict(state)
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetv2(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model_dict = model_zoo.load_url(model_urls['resnet50'])
# remove the fc layers
del model_dict['fc.weight']
del model_dict['fc.bias']
state = model.state_dict()
state.update(model_dict)
model.load_state_dict(state)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetv2(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNetv2(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.4 | timothijoe/DI-drive | 3cddefc85bbbca6bcdd8a4d796decacaf8d81778 |
1.9 | from conf import *
import torch
import random
import numpy as np
import os
from typing import Dict, Tuple, Any
from sklearn.metrics import roc_auc_score
from scipy.special import expit, softmax
from sklearn.metrics import precision_score
def set_seed(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def global_average_precision_score(y_true, y_pred, ignore_non_landmarks=False):
indexes = np.argsort(y_pred[1])[::-1]
queries_with_target = (y_true < args.n_classes).sum()
correct_predictions = 0
total_score = 0.
i = 1
for k in indexes:
if ignore_non_landmarks and y_true[k] == args.n_classes:
continue
if y_pred[0][k] == args.n_classes:
continue
relevance_of_prediction_i = 0
if y_true[k] == y_pred[0][k]:
correct_predictions += 1
relevance_of_prediction_i = 1
precision_at_rank_i = correct_predictions / i
total_score += precision_at_rank_i * relevance_of_prediction_i
i += 1
return 1 / queries_with_target * total_score
def comp_metric(y_true, logits, ignore_non_landmarks=False):
score = global_average_precision_score(y_true, logits, ignore_non_landmarks=ignore_non_landmarks)
return score
def cos_similarity_matrix(a, b, eps=1e-8):
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
def get_topk_cossim(test_emb, tr_emb, batchsize = 64, k=10, device='cuda:0',verbose=True):
tr_emb = torch.tensor(tr_emb, dtype = torch.float32, device=torch.device(device))
test_emb = torch.tensor(test_emb, dtype = torch.float32, device=torch.device(device))
vals = []
inds = []
for test_batch in test_emb.split(batchsize):
sim_mat = cos_similarity_matrix(test_batch, tr_emb)
vals_batch, inds_batch = torch.topk(sim_mat, k=k, dim=1)
vals += [vals_batch.detach().cpu()]
inds += [inds_batch.detach().cpu()]
vals = torch.cat(vals)
inds = torch.cat(inds)
return vals, inds | [
"torch.device",
"torch.cat",
"torch.cuda.manual_seed",
"torch.manual_seed",
"torch.ones_like",
"torch.topk"
] | 1.9.0 | iamkaiwei/kaggle-landmark-recognition-2020-1st-place | 97df71ecfd37122730b7f0b29fde09ac36358609 |
0.4 | import numpy as np
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import copy
import time
import logging
from torch.autograd import Variable
import pdb
from src.components.utils import *
from src.components.encoder import *
from src.components.decoder import *
from src.components.self_attention import *
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Directional Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask),src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def make_model(args, src_vocab, tgt_vocab, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
no_position = NoPositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(args, d_model, c(attn), c(attn), c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
ntoks = batch.ntokens.float()
loss = loss_compute(out, batch.trg_y, ntoks)
total_loss += loss
total_tokens += ntoks
tokens += ntoks
if i % 200 == 1:
elapsed = time.time() - start + 1e-8
print("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / ntoks, tokens/elapsed))
start = time.time()
tokens = 0
return total_loss / total_tokens
| [
"torch.nn.Linear",
"torch.nn.init.xavier_uniform_"
] | 0.4.0 | arkilpatel/Transformer-Computation-Analysis | 82341f5f2f9cd0831e390f44b338165e45cd6413 |
1.1 | import torch
from tqdm import tqdm
from ...utils.learning import adjust_learning_rate
from ...utils.log import logger
from ...base.module import Module
from .config import DEVICE, DEFAULT_CONFIG
from .model import Config, BiLstmCrf
from .tool import cws_tool
from .utils.convert import bis_cws
seed = 2019
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
class CWS(Module):
"""
"""
def __init__(self):
self._model = None
self._word_vocab = None
self._tag_vocab = None
def train(self, train_path, save_path=DEFAULT_CONFIG['save_path'], dev_path=None, vectors_path=None, **kwargs):
train_dataset = cws_tool.get_dataset(train_path)
if dev_path:
dev_dataset = cws_tool.get_dataset(dev_path)
word_vocab, tag_vocab = cws_tool.get_vocab(train_dataset, dev_dataset)
else:
word_vocab, tag_vocab = cws_tool.get_vocab(train_dataset)
self._word_vocab = word_vocab
self._tag_vocab = tag_vocab
train_iter = cws_tool.get_iterator(train_dataset, batch_size=DEFAULT_CONFIG['batch_size'])
config = Config(word_vocab, tag_vocab, save_path=save_path, vector_path=vectors_path, **kwargs)
bilstmcrf = BiLstmCrf(config)
self._model = bilstmcrf
optim = torch.optim.Adam(bilstmcrf.parameters(), lr=config.lr)
for epoch in range(config.epoch):
bilstmcrf.train()
acc_loss = 0
for item in tqdm(train_iter):
print(item.text[0], "========")
bilstmcrf.zero_grad()
item_text_sentences = item.text[0]
item_text_lengths = item.text[1]
item_loss = (-bilstmcrf.loss(item_text_sentences, item_text_lengths, item.tag)) / item.tag.size(1)
acc_loss += item_loss.view(-1).cpu().data.tolist()[0]
item_loss.backward()
optim.step()
logger.info('epoch: {}, acc_loss: {}'.format(epoch, acc_loss))
logger.info("我在这里有些改进:进度条哈哈")
if dev_path:
dev_score = self._validate(dev_dataset)
logger.info('dev score:{}'.format(dev_score))
adjust_learning_rate(optim, config.lr / (1 + (epoch + 1) * config.lr_decay))
config.save()
bilstmcrf.save()
def predict(self, text):
self._model.eval()
vec_text = torch.tensor([self._word_vocab.stoi[x] for x in text])
len_text = torch.tensor([len(vec_text)]).to(DEVICE)
vec_predict = self._model(vec_text.view(-1, 1).to(DEVICE), len_text)[0]
tag_predict = [self._tag_vocab.itos[i] for i in vec_predict]
return bis_cws([x for x in text], tag_predict)
def load(self, save_path=DEFAULT_CONFIG['save_path']):
config = Config.load(save_path)
bilstmcrf = BiLstmCrf(config)
bilstmcrf.load()
self._model = bilstmcrf
self._word_vocab = config.word_vocab
self._tag_vocab = config.tag_vocab
def test(self, test_path):
test_dataset = cws_tool.get_dataset(test_path)
test_score = self._validate(test_dataset)
logger.info('test score:{}'.format(test_score))
def _validate(self, dev_dataset):
self._model.eval()
dev_score_list = []
for dev_item in tqdm(dev_dataset):
item_score = cws_tool.get_score(self._model, dev_item.text, dev_item.tag, self._word_vocab, self._tag_vocab)
dev_score_list.append(item_score)
return sum(dev_score_list) / len(dev_score_list)
| [
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.tensor"
] | 1.1.0 | CNLPT/lightNLP | c7f128422ba5b16f514bb294145cb3b562e95829 |
1.0 | #
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import logging
import re
import warnings
from collections import namedtuple, OrderedDict, defaultdict
from typing import Callable, Optional
import torch
import torch.nn as nn
import distiller
msglogger = logging.getLogger()
QBits = namedtuple('QBits', ['acts', 'wts', 'bias']) # quantization bitwidth
FP_BKP_PREFIX = 'float_' # float-point backup prefix
def has_bias(module):
return hasattr(module, 'bias') and module.bias is not None
def hack_float_backup_parameter(module, name, num_bits):
"""
https://nervanasystems.github.io/distiller/design.html#quantization-aware-training
The existing ``torch.nn.Parameter``, e.g. ``weights``, is replaced by a ``torch.nn.Parameter`` named ``float_weight``.
To maintain the existing functionality of the module, we then register a ``buffer`` in the module with the original name - ``weights``.
During training, ``float_weight`` will be passed to ``param_quantization_fn`` and the result will be stored in ``weight``.
"""
try:
data = dict(module.named_parameters())[name].data
except KeyError:
raise ValueError('Module has no Parameter named ' + name)
module.register_parameter(FP_BKP_PREFIX + name, nn.Parameter(data)) # create a new parameter with prefix
delattr(module, name) # remove original parameter
module.register_buffer(name, torch.zeros_like(data)) # store ``param_quantization_fn(fp32_weight)``
""" Add extra representation """
first = False
if not hasattr(module, 'repr_mod'):
setattr(module, 'repr_mod', ', \nDistiller_QuantAwareTrain: ')
first = True
module.original_extra_repr = module.extra_repr
module.extra_repr = lambda: module.original_extra_repr() + module.repr_mod
if not first:
module.repr_mod += ' ; '
module.repr_mod += '{0} --> {1} bits'.format(name, num_bits)
class _ParamToQuant(object):
""" Represent a module parameter to be quantized """
def __init__(self, module, module_name, fp_attr_name, q_attr_name, num_bits):
self.module = module
self.module_name = module_name
self.fp_attr_name = fp_attr_name
self.q_attr_name = q_attr_name
self.num_bits = num_bits
def __repr__(self):
return "ParamToQuant(module_name=%s,num_bits=%s)" % (self.module_name, self.num_bits)
class Quantizer(object):
"""
Base class for quantizers.
Args:
model (``torch.nn.Module``): The model to be quantized
optimizer (``torch.optim.Optimizer``): An optimizer instance, required in cases where the quantizer is going
to perform changes to existing model parameters and/or add new ones.
Specifically, when ``train_with_fp_copy`` is ``True``, this cannot be ``None``.
bits_activations/weights/bias (``int``): Default number of bits to use when quantizing each tensor type.
Value of ``None`` means do not quantize.
overrides (OrderedDict): Dictionary mapping *regular expressions* of layer name patterns to dictionary with
overrides of default values.
The keys in the overrides dictionary should be parameter names that the Quantizer accepts default values
for in its init function.
The parameters 'bits_activations', 'bits_weights', and 'bits_bias' which are accepted by the base Quantizer
are supported by default.
Other than those, each sub-class of Quantizer defines the set of parameter for which it supports
over-riding.
OrderedDict is used to enable handling of overlapping name patterns. So, for example, one could define
certain override parameters for a group of layers, e.g. 'conv*', but also define different parameters for
specific layers in that group, e.g. 'conv1'.
The patterns are evaluated eagerly - the first match wins. Therefore, the more specific patterns must
come before the broad patterns.
train_with_fp_copy (``bool``): If ``true``, will modify layers with weights to keep both a quantized and
floating-point copy, such that the following flow occurs in each training iteration:
1. ``q_weights = quantize(fp_weights)``
2. Forward through network using ``q_weights``
3. In back-prop:
3.1 Gradients calculated with respect to ``q_weights``
3.2 We also back-prop through the 'quantize' operation (STE) from step 1
4. Update ``fp_weights`` with gradients calculated in step 3.2
"""
def __init__(self, model, optimizer=None,
bits_activations=None, bits_weights=None, bits_bias=None,
overrides=None, train_with_fp_copy=False):
if overrides is None:
overrides = OrderedDict()
if not isinstance(overrides, OrderedDict):
raise TypeError('overrides must be an instance of collections.OrderedDict or None')
if train_with_fp_copy and optimizer is None:
raise ValueError('optimizer cannot be None when train_with_fp_copy is True')
self.default_qbits = QBits(acts=bits_activations, wts=bits_weights, bias=bits_bias)
self.overrides = overrides
self.model = model
self.optimizer = optimizer
# Stash some quantizer data in the model so we can re-apply the quantizer on a resuming model
self.model.quantizer_metadata = {'type': type(self), # Quantizer type
'params': {'bits_activations': bits_activations,
'bits_weights': bits_weights,
'bits_bias': bits_bias,
'overrides': copy.deepcopy(overrides)}}
# Re-assemble overriding bitwidths as a QBits object
for k, v in self.overrides.items(): # regexp of module name -> override dict
if any(old_bits_key in v.keys() for old_bits_key in ['acts', 'wts', 'bias']):
raise ValueError("Using 'acts' / 'wts' / 'bias' to specify bit-width overrides is deprecated.\n"
"Please use the full parameter names: "
"'bits_activations' / 'bits_weights' / 'bits_bias'")
qbits = QBits(acts=v.pop('bits_activations', self.default_qbits.acts),
wts=v.pop('bits_weights', self.default_qbits.wts),
bias=v.pop('bits_bias', self.default_qbits.bias))
v['bits'] = qbits
# Prepare explicit mapping from each layer to QBits based on default + overrides
patterns = []
regex_overrides = None
if overrides:
patterns = list(overrides.keys())
regex_overrides_str = '|'.join(['(^{0}$)'.format(pattern) for pattern in patterns]) # the first match wins
regex_overrides = re.compile(regex_overrides_str)
self.module_qbits_map = {}
self.module_overrides_map = {}
for module_full_name, module in model.named_modules():
# Need to account for scenario where model is parallelized with DataParallel, which wraps the original
# module with a wrapper module called 'module' :)
name_to_match = module_full_name.replace('module.', '', 1)
qbits = self.default_qbits
# perfect match (overrides: OrderedDict)
override_entry = self.overrides.get(name_to_match, OrderedDict())
# match by regexp
if regex_overrides: # if override dict specified
m_overrides = regex_overrides.match(name_to_match)
if m_overrides: # matching success
group_idx = 0 # idx of matching pattern
groups = m_overrides.groups()
while groups[group_idx] is None:
group_idx += 1
override_entry = copy.deepcopy(override_entry or self.overrides[patterns[group_idx]])
qbits = override_entry.pop('bits', self.default_qbits) # override_entry -> others + qbits
self._add_qbits_entry(module_full_name, type(module), qbits) # update self.module_qbits_map
self._add_override_entry(module_full_name, override_entry) # update module_overrides_map
# Mapping from module type to function generating a replacement module suited for quantization
# To be populated by child classes
# Unspecified layer types return None by default.
self.replacement_factory = defaultdict(lambda: None) # module_type -> wrapper_factory
# Pointer to parameters quantization function, triggered during training process
# To be populated by child classes
self.param_quantization_fn = None
self.train_with_fp_copy = train_with_fp_copy
self.params_to_quantize = []
# A dictionary of replaced modules and their respective names.
self.modules_processed = OrderedDict()
def _add_qbits_entry(self, module_name, module_type, qbits):
if module_type not in [nn.Conv2d, nn.Linear, nn.Embedding]:
# For now we support weights quantization only for Conv, FC and Embedding layers (so, for example, we don't
# support quantization of batch norm scale parameters)
qbits = QBits(acts=qbits.acts, wts=None, bias=None)
self.module_qbits_map[module_name] = qbits
def _add_override_entry(self, module_name, entry):
self.module_overrides_map[module_name] = entry
def prepare_model(self):
"""
Traverses the model and replaces sub-modules with quantized counterparts according to the bit-width
and overrides configuration provided to __init__(), and according to the replacement_factory as
defined by the Quantizer sub-class being used.
Note:
If multiple sub-modules within the model actually reference the same module, then that module
is replaced only once, according to the configuration (bit-width and/or overrides) of the
first encountered reference.
Toy Example - say a module is constructed using this bit of code:
shared_relu = nn.ReLU
self.relu1 = shared_relu
self.relu2 = shared_relu
When traversing the model, a replacement will be generated when 'self.relu1' is encountered.
Let's call it `new_relu1'. When 'self.relu2' will be encountered, it'll simply be replaced
with a reference to 'new_relu1'. Any override configuration made specifically for 'self.relu2'
will be ignored. A warning message will be shown.
"""
self._prepare_model_impl()
msglogger.info('Quantized model:\n\n{0}\n'.format(self.model))
def _prepare_model_impl(self):
"""
Iterates over the model and replaces modules with their quantized counterparts as defined by
self.replacement_factory
"""
msglogger.info('Preparing model for quantization using {0}'.format(self.__class__.__name__))
self._pre_process_container(self.model) # replace modules
# hack float backup parameters, populate ``self.params_to_quantize``
for module_name, module in self.model.named_modules():
qbits = self.module_qbits_map[module_name]
curr_parameters = dict(module.named_parameters())
for param_name, param in curr_parameters.items():
n_bits = qbits.bias if param_name.endswith('bias') else qbits.wts
if n_bits is None: continue
fp_attr_name = param_name
if self.train_with_fp_copy:
hack_float_backup_parameter(module, param_name, n_bits)
fp_attr_name = FP_BKP_PREFIX + param_name
self.params_to_quantize.append(_ParamToQuant(module, module_name, fp_attr_name, param_name, n_bits))
param_full_name = '.'.join([module_name, param_name])
msglogger.info("Parameter '{0}' will be quantized to {1} bits".format(param_full_name, n_bits))
# If an optimizer was passed, assume we need to update it
if self.optimizer:
optimizer_type = type(self.optimizer)
new_optimizer = optimizer_type(self._get_updated_optimizer_params_groups(), **self.optimizer.defaults)
self.optimizer.__setstate__({'param_groups': new_optimizer.param_groups})
def _pre_process_container(self, container, prefix=''):
# Iterate through model, insert quantization functions as appropriate
for name, module in container.named_children():
full_name = prefix + name
if module in self.modules_processed: # referencing to a module already replaced
previous_name, previous_wrapper = self.modules_processed[module]
warnings.warn("Module '{0}' references to same module as '{1}'."
' Replacing with reference the same wrapper.'.format(full_name, previous_name),
UserWarning)
if previous_wrapper:
msglogger.debug('Module {0}: Replacing \n{1} with \n{2}'. format(full_name, module, previous_wrapper))
setattr(container, name, previous_wrapper) # point to previous wrapper
else:
msglogger.debug('Module {0}: Skipping \n{1}.'.format(full_name, module))
continue
current_qbits = self.module_qbits_map[full_name]
if current_qbits.acts is None and current_qbits.wts is None:
if self.module_overrides_map[full_name]:
raise ValueError("Adding overrides while not quantizing is not allowed.")
# We indicate this module wasn't replaced by a wrapper
msglogger.debug('Module {0}: Skipping \n{1}.'.format(full_name, module))
self.modules_processed[module] = full_name, None
continue
# We use a type hint comment to let IDEs know replace_fn is a function
# replacement factory: module type -> func
replace_fn = self.replacement_factory[type(module)] # type: Optional[Callable]
# If the replacement function wasn't specified - continue without replacing this module.
if replace_fn is not None:
valid_kwargs, invalid_kwargs = distiller.filter_kwargs(self.module_overrides_map[full_name], replace_fn)
if invalid_kwargs:
raise TypeError("""Quantizer of type %s doesn't accept \"%s\"
as override arguments for %s. Allowed kwargs: %s"""
% (type(self), list(invalid_kwargs), type(module), list(valid_kwargs)))
new_module = replace_fn(module, full_name, self.module_qbits_map, **valid_kwargs)
msglogger.debug('Module {0}: Replacing \n{1} with \n{2}'.format(full_name, module, new_module))
# Add to history of prepared submodules
self.modules_processed[module] = full_name, new_module
setattr(container, name, new_module)
# If a "leaf" module was replaced by a container, add the new layers to the QBits mapping
if not distiller.has_children(module) and distiller.has_children(new_module):
for sub_module_name, sub_module in new_module.named_modules():
self._add_qbits_entry(full_name + '.' + sub_module_name, type(sub_module), current_qbits)
self.module_qbits_map[full_name] = QBits(acts=current_qbits.acts, wts=None, bias=None)
if distiller.has_children(module):
# For container we call recursively
self._pre_process_container(module, full_name + '.')
def _get_updated_optimizer_params_groups(self):
"""
Returns a list of model parameter groups and optimizer hyper-parameter overrides,
as expected by the __init__ function of torch.optim.Optimizer.
This is called after all model changes were made in prepare_model, in case an Optimizer instance was
passed to __init__.
Subclasses which add parameters to the model should override as needed.
:return: List of parameter groups
"""
# Default implementation - just return all model parameters as one group
return [{'params': self.model.parameters()}]
def quantize_params(self):
"""
Quantize all parameters using self.param_quantization_fn (with the defined number of bits for each parameter)
"""
for ptq in self.params_to_quantize:
q_param = self.param_quantization_fn(getattr(ptq.module, ptq.fp_attr_name), ptq)
if self.train_with_fp_copy:
setattr(ptq.module, ptq.q_attr_name, q_param)
else:
getattr(ptq.module, ptq.q_attr_name).data = q_param.data
| [
"torch.zeros_like",
"torch.nn.Parameter"
] | 1.0.1 | HatsuneMiku4/distiller | 8fbacb01ebcb7d70c5d3ecb6a88093e6c4d42137 |
1.4 | """Functions for runtime type checking. More strict but slower than availabe
static type checking. Off by default.
"""
import os
from typing import Any, Optional, Tuple
import torch
def assert_joint_probability(
x: torch.Tensor, shape: Tuple[int, ...], allow_improper: bool = False
) -> None:
"""Assert `x` is joint probability distribution over two variables
Args:
x: Possible joint probability distribution
shape: Required shape
allow_improper: Whether improper distribution (all zeros) is permitted
"""
if os.getenv("strict_type_check") == "1":
norm = x.sum(dim=(-1, -2))
if allow_improper:
norm[norm == 0] = 1
assert torch.isclose(norm, torch.Tensor([1.0]).to(norm.device)).all()
assert x.shape[-1] == x.shape[-2]
assert x.shape == shape
def assert_prescription(
x: torch.Tensor,
shape: Tuple[int, ...],
pure: bool = True,
allow_improper: bool = False,
) -> None:
"""Assert `x` is valid prescription
Args:
x: Possible prescription
shape: Required shape
pure: Whether prescription is required to be deterministic
allow_improper: Whether improper distribution (all zeros) is permitted
"""
if os.getenv("strict_type_check") == "1":
norm = x.sum(dim=-1)
if allow_improper:
norm[norm == 0] = 1
assert torch.isclose(norm, torch.Tensor([1.0]).to(x.device)).all()
assert (x >= 0).all()
assert x.shape == shape
if pure:
max_vals = x.max(dim=-1).values
if allow_improper:
max_vals[max_vals == 0] = 1
assert (max_vals == 1).all()
def assert_label_prescription(
x: torch.Tensor, num_actions: int, shape: Tuple[int, ...]
) -> None:
"""Assert `x` is valid label prescription
Args:
x: Possible prescription
num_actions: Number of action labels
shape: Required shape
"""
if os.getenv("strict_type_check") == "1":
assert x.dtype == torch.int64
assert (x >= 0).all()
assert (x < num_actions).all()
assert x.shape == shape
def assert_shape(
x: torch.Tensor, shape: Tuple[int, ...], dim: Optional[int] = None
) -> None:
"""Assert `x` has shape `shape`
Args:
x: Tensor
shape: Required shape
dim: If specified, enforce shape requirement only for axis `dim`
"""
if os.getenv("strict_type_check") == "1":
if dim:
assert (x.shape[dim],) == shape
else:
assert x.shape == shape
def assert_num_dims(x: torch.Tensor, num_dims: int) -> None:
"""Assert `x` has `num_dims` dimensions
Args:
x: Tensor
num_dims: Required number of dimensions
"""
if os.getenv("strict_type_check") == "1":
assert len(x.shape) == num_dims
def assert_element(x: Any, collection: Tuple[Any, ...]) -> None:
"""Assert `x` in `collection`
Args:
x: Anything
collection: Tuple
"""
if os.getenv("strict_type_check") == "1":
assert x in collection
| [
"torch.Tensor"
] | 1.4.0 | tcfuji/capi | 4c0f648216ae22d29c537318fb9a646d430cf310 |
0.6 | import math
import numpy as np
from typing import Optional
import torch
import torch.nn.functional as F
__all__ = [
"focal_loss_with_logits",
"softmax_focal_loss_with_logits",
"soft_jaccard_score",
"soft_dice_score",
"wing_loss",
]
def to_tensor(x, dtype=None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, np.ndarray):
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
if isinstance(x, (list, tuple)):
x = np.array(x)
x = torch.from_numpy(x)
if dtype is not None:
x = x.type(dtype)
return x
def focal_loss_with_logits(
output: torch.Tensor,
target: torch.Tensor,
gamma: float = 2.0,
alpha: Optional[float] = 0.25,
reduction: str = "mean",
normalized: bool = False,
reduced_threshold: Optional[float] = None,
eps: float = 1e-6,
) -> torch.Tensor:
"""Compute binary focal loss between target and output logits.
See :class:`~pytorch_toolbelt.losses.FocalLoss` for details.
Args:
output: Tensor of arbitrary shape (predictions of the model)
target: Tensor of the same shape as input
gamma: Focal loss power factor
alpha: Weight factor to balance positive and negative samples. Alpha must be in [0...1] range,
high values will give more weight to positive class.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum' | 'batchwise_mean'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`.
'batchwise_mean' computes mean loss per sample in batch. Default: 'mean'
normalized (bool): Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).
reduced_threshold (float, optional): Compute reduced focal loss (https://arxiv.org/abs/1903.01347).
References:
https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/loss/losses.py
"""
target = target.type(output.type())
logpt = F.binary_cross_entropy_with_logits(output, target, reduction="none")
pt = torch.exp(-logpt)
# compute the loss
if reduced_threshold is None:
focal_term = (1.0 - pt).pow(gamma)
else:
focal_term = ((1.0 - pt) / reduced_threshold).pow(gamma)
focal_term[pt < reduced_threshold] = 1
loss = focal_term * logpt
if alpha is not None:
loss *= alpha * target + (1 - alpha) * (1 - target)
if normalized:
norm_factor = focal_term.sum().clamp_min(eps)
loss /= norm_factor
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
if reduction == "batchwise_mean":
loss = loss.sum(0)
return loss
def softmax_focal_loss_with_logits(
output: torch.Tensor,
target: torch.Tensor,
gamma: float = 2.0,
reduction="mean",
normalized=False,
reduced_threshold: Optional[float] = None,
eps: float = 1e-6,
) -> torch.Tensor:
"""Softmax version of focal loss between target and output logits.
See :class:`~pytorch_toolbelt.losses.FocalLoss` for details.
Args:
output: Tensor of shape [B, C, *] (Similar to nn.CrossEntropyLoss)
target: Tensor of shape [B, *] (Similar to nn.CrossEntropyLoss)
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'mean' | 'sum' | 'batchwise_mean'. 'none': no reduction will be applied,
'mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`.
'batchwise_mean' computes mean loss per sample in batch. Default: 'mean'
normalized (bool): Compute normalized focal loss (https://arxiv.org/pdf/1909.07829.pdf).
reduced_threshold (float, optional): Compute reduced focal loss (https://arxiv.org/abs/1903.01347).
"""
log_softmax = F.log_softmax(output, dim=1)
loss = F.nll_loss(log_softmax, target, reduction="none")
pt = torch.exp(-loss)
# compute the loss
if reduced_threshold is None:
focal_term = (1.0 - pt).pow(gamma)
else:
focal_term = ((1.0 - pt) / reduced_threshold).pow(gamma)
focal_term[pt < reduced_threshold] = 1
loss = focal_term * loss
if normalized:
norm_factor = focal_term.sum().clamp_min(eps)
loss = loss / norm_factor
if reduction == "mean":
loss = loss.mean()
if reduction == "sum":
loss = loss.sum()
if reduction == "batchwise_mean":
loss = loss.sum(0)
return loss
def soft_jaccard_score(
output: torch.Tensor,
target: torch.Tensor,
smooth: float = 0.0,
eps: float = 1e-7,
dims=None,
) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
union = cardinality - intersection
jaccard_score = (intersection + smooth) / (union + smooth).clamp_min(eps)
return jaccard_score
def soft_dice_score(
output: torch.Tensor,
target: torch.Tensor,
smooth: float = 0.0,
eps: float = 1e-7,
dims=None,
) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims)
cardinality = torch.sum(output + target, dim=dims)
else:
intersection = torch.sum(output * target)
cardinality = torch.sum(output + target)
dice_score = (2.0 * intersection + smooth) / (cardinality + smooth).clamp_min(eps)
return dice_score
def soft_tversky_score(
output: torch.Tensor,
target: torch.Tensor,
alpha: float,
beta: float,
smooth: float = 0.0,
eps: float = 1e-7,
dims=None,
) -> torch.Tensor:
assert output.size() == target.size()
if dims is not None:
intersection = torch.sum(output * target, dim=dims) # TP
fp = torch.sum(output * (1.0 - target), dim=dims)
fn = torch.sum((1 - output) * target, dim=dims)
else:
intersection = torch.sum(output * target) # TP
fp = torch.sum(output * (1.0 - target))
fn = torch.sum((1 - output) * target)
tversky_score = (intersection + smooth) / (intersection + alpha * fp + beta * fn + smooth).clamp_min(eps)
return tversky_score
def wing_loss(output: torch.Tensor, target: torch.Tensor, width=5, curvature=0.5, reduction="mean"):
"""Wing loss
References:
https://arxiv.org/pdf/1711.06753.pdf
"""
diff_abs = (target - output).abs()
loss = diff_abs.clone()
idx_smaller = diff_abs < width
idx_bigger = diff_abs >= width
loss[idx_smaller] = width * torch.log(1 + diff_abs[idx_smaller] / curvature)
C = width - width * math.log(1 + width / curvature)
loss[idx_bigger] = loss[idx_bigger] - C
if reduction == "sum":
loss = loss.sum()
if reduction == "mean":
loss = loss.mean()
return loss
def label_smoothed_nll_loss(
lprobs: torch.Tensor,
target: torch.Tensor,
epsilon: float,
ignore_index=None,
reduction="mean",
dim=-1,
) -> torch.Tensor:
"""NLL loss with label smoothing
References:
https://github.com/pytorch/fairseq/blob/master/fairseq/criterions/label_smoothed_cross_entropy.py
Args:
lprobs (torch.Tensor): Log-probabilities of predictions (e.g after log_softmax)
"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(dim)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
target = target.masked_fill(pad_mask, 0)
nll_loss = -lprobs.gather(dim=dim, index=target)
smooth_loss = -lprobs.sum(dim=dim, keepdim=True)
# nll_loss.masked_fill_(pad_mask, 0.0)
# smooth_loss.masked_fill_(pad_mask, 0.0)
nll_loss = nll_loss.masked_fill(pad_mask, 0.0)
smooth_loss = smooth_loss.masked_fill(pad_mask, 0.0)
else:
nll_loss = -lprobs.gather(dim=dim, index=target)
smooth_loss = -lprobs.sum(dim=dim, keepdim=True)
nll_loss = nll_loss.squeeze(dim)
smooth_loss = smooth_loss.squeeze(dim)
if reduction == "sum":
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
if reduction == "mean":
nll_loss = nll_loss.mean()
smooth_loss = smooth_loss.mean()
eps_i = epsilon / lprobs.size(dim)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.from_numpy",
"torch.nn.functional.log_softmax",
"torch.nn.functional.nll_loss",
"torch.log",
"torch.exp",
"torch.sum"
] | 0.6.3 | vietnhatthai/segmentation_models.pytorch | 9052aa2a4f09a600f687120e69ad2b57c04cc0dd |
1.4 | # -*- coding: utf-8 -*-
from collections.abc import Sequence
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"linear_fbanks",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
"resample",
"edit_distance",
"pitch_shift",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: bool = True,
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
return_complex (bool, optional):
Indicates whether the resulting complex-valued Tensor should be represented with
native complex dtype, such as `torch.cfloat` and `torch.cdouble`, or real dtype
mimicking complex value with an extra dimension for real and imaginary parts.
(See also ``torch.view_as_real``.)
This argument is only effective when ``power=None``. It is ignored for
cases where ``power`` is a number as in those cases, the returned tensor is
power spectrogram, which is a real-valued tensor.
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if power is None and not return_complex:
warnings.warn(
"The use of pseudo complex type in spectrogram is now deprecated."
"Please migrate to native complex type by providing `return_complex=True`. "
"Please refer to https://github.com/pytorch/audio/issues/1337 "
"for more details about torchaudio's plan to migrate to native complex type."
)
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
if not return_complex:
return torch.view_as_real(spec_f)
return spec_f
def _get_complex_dtype(real_dtype: torch.dtype):
if real_dtype == torch.double:
return torch.cdouble
if real_dtype == torch.float:
return torch.cfloat
if real_dtype == torch.half:
return torch.complex32
raise ValueError(f'Unexpected dtype {real_dtype}')
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from
*librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# initialize the phase
if rand_init:
angles = torch.rand(
specgram.size(),
dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
else:
angles = torch.full(
specgram.size(), 1,
dtype=_get_complex_dtype(specgram.dtype), device=specgram.device)
# And initialize the previous iterate to 0
tprev = torch.tensor(0., dtype=specgram.dtype, device=specgram.device)
for _ in range(n_iter):
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# Rebuild the spectrogram
rebuilt = torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(angles.abs().add(1e-16))
# Store the previous iterate
tprev = rebuilt
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def _create_triangular_filterbank(
all_freqs: Tensor,
f_pts: Tensor,
) -> Tensor:
"""Create a triangular filter bank.
Args:
all_freqs (Tensor): STFT freq points of size (`n_freqs`).
f_pts (Tensor): Filter mid points of size (`n_filter`).
Returns:
fb (Tensor): The filter bank of size (`n_freqs`, `n_filter`).
"""
# Adopted from Librosa
# calculate the difference between each filter mid point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_filter + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_filter + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_filter)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_filter)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
return fb
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# create filterbank
fb = _create_triangular_filterbank(all_freqs, f_pts)
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def linear_fbanks(
n_freqs: int,
f_min: float,
f_max: float,
n_filter: int,
sample_rate: int,
) -> Tensor:
r"""Creates a linear triangular filterbank.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_filter (int): Number of (linear) triangular filter
sample_rate (int): Sample rate of the audio waveform
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_filter``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * linear_fbanks(A.size(-1), ...)``.
"""
# freq bins
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# filter mid-points
f_pts = torch.linspace(f_min, f_max, n_filter + 2)
# create filterbank
fb = _create_triangular_filterbank(all_freqs, f_pts)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
@_mod_utils.deprecated(
"Please convert the input Tensor to complex type with `torch.view_as_complex` then "
"use `torch.abs`. "
"Please refer to https://github.com/pytorch/audio/issues/1337 "
"for more details about torchaudio's plan to migrate to native complex type.",
version="0.11",
)
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
@_mod_utils.deprecated(
"Please convert the input Tensor to complex type with `torch.view_as_complex` then "
"use `torch.angle`. "
"Please refer to https://github.com/pytorch/audio/issues/1337 "
"for more details about torchaudio's plan to migrate to native complex type.",
version="0.11",
)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
@_mod_utils.deprecated(
"Please convert the input Tensor to complex type with `torch.view_as_complex` then "
"use `torch.abs` and `torch.angle`. "
"Please refer to https://github.com/pytorch/audio/issues/1337 "
"for more details about torchaudio's plan to migrate to native complex type.",
version="0.11",
)
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor):
Either a real tensor of dimension of ``(..., freq, num_frame, complex=2)``
or a tensor of dimension ``(..., freq, num_frame)`` with complex dtype.
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor:
Stretched spectrogram. The resulting tensor is of the same dtype as the input
spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
Example - With Tensor of complex dtype
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time)
>>> complex_specgrams = torch.randn(2, freq, 300, dtype=torch.cfloat)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231])
Example - With Tensor of real dtype and extra dimension for complex field
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
if rate == 1.0:
return complex_specgrams
if not complex_specgrams.is_complex():
warnings.warn(
"The support for pseudo complex type in `torchaudio.functional.phase_vocoder` and "
"`torchaudio.transforms.TimeStretch` is now deprecated and will be removed "
"from 0.11 release."
"Please migrate to native complex type by converting the input tensor with "
"`torch.view_as_complex`. "
"Please refer to https://github.com/pytorch/audio/issues/1337 "
"for more details about torchaudio's plan to migrate to native complex type."
)
if complex_specgrams.size(-1) != 2:
raise ValueError(
"complex_specgrams must be either native complex tensors or "
"real valued tensors with shape (..., 2)")
is_complex = complex_specgrams.is_complex()
if not is_complex:
complex_specgrams = torch.view_as_complex(complex_specgrams)
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-2:]))
# Figures out the corresponding real dtype, i.e. complex128 -> float64, complex64 -> float32
# Note torch.real is a view so it does not incur any memory copy.
real_dtype = torch.real(complex_specgrams).dtype
time_steps = torch.arange(
0,
complex_specgrams.size(-1),
rate,
device=complex_specgrams.device,
dtype=real_dtype)
alphas = time_steps % 1.0
phase_0 = complex_specgrams[..., :1].angle()
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-1, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-1, (time_steps + 1).long())
angle_0 = complex_specgrams_0.angle()
angle_1 = complex_specgrams_1.angle()
norm_0 = complex_specgrams_0.abs()
norm_1 = complex_specgrams_1.abs()
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
complex_specgrams_stretch = torch.polar(mag, phase_acc)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-2] + complex_specgrams_stretch.shape[1:])
if not is_complex:
return torch.view_as_real(complex_specgrams_stretch)
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis not in [2, 3]:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams = specgrams.masked_fill((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
if axis not in [1, 2]:
raise ValueError('Only Frequency and Time masking are supported')
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
mask = torch.arange(0, specgram.shape[axis], device=specgram.device, dtype=specgram.dtype)
mask = (mask >= mask_start) & (mask < mask_end)
if axis == 1:
mask = mask.unsqueeze(-1)
assert mask_end - mask_start < mask_param
specgram = specgram.masked_fill(mask, mask_value)
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
specgram: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
specgram (Tensor): Tensor of audio of dimension (..., time, freq)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor matching input shape (..., freq, time)
"""
input_shape = specgram.shape
num_frames, num_feats = input_shape[-2:]
specgram = specgram.view(-1, num_frames, num_feats)
num_channels = specgram.shape[0]
dtype = specgram.dtype
device = specgram.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_specgram = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = specgram[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = specgram[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = specgram[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_specgram[:, t, :] = specgram[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_specgram[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_specgram[:, t, :] *= variance
cmn_specgram = cmn_specgram.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_specgram = cmn_specgram.squeeze(0)
return cmn_specgram
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
@_mod_utils.requires_sox()
def apply_codec(
waveform: Tensor,
sample_rate: int,
format: str,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
) -> Tensor:
r"""
Apply codecs as a form of augmentation.
Args:
waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.
sample_rate (int): Sample rate of the audio waveform.
format (str): File format.
channels_first (bool):
When True, both the input and output Tensor have dimension ``[channel, time]``.
Otherwise, they have dimension ``[time, channel]``.
compression (float): Used for formats other than WAV.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
encoding (str, optional): Changes the encoding for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
bits_per_sample (int, optional): Changes the bit depth for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
Returns:
torch.Tensor: Resulting Tensor.
If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``.
"""
bytes = io.BytesIO()
torchaudio.backend.sox_io_backend.save(bytes,
waveform,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample
)
bytes.seek(0)
augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(
bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format)
return augmented
@_mod_utils.requires_kaldi()
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in *A pitch extraction algorithm tuned
for automatic speech recognition* [:footcite:`6854049`].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension
corresponds to pitch and NCCF.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
def _get_sinc_resample_kernel(
orig_freq: float,
new_freq: float,
gcd: int,
lowpass_filter_width: int,
rolloff: float,
resampling_method: str,
beta: Optional[float],
device: torch.device = torch.device("cpu"),
dtype: Optional[torch.dtype] = None):
if not (int(orig_freq) == orig_freq and int(new_freq) == new_freq):
warnings.warn(
"Non-integer frequencies are being cast to ints and may result in poor resampling quality "
"because the underlying algorithm requires an integer ratio between `orig_freq` and `new_freq`. "
"Using non-integer valued frequencies will throw an error in release 0.10. "
"To work around this issue, manually convert both frequencies to integer values "
"that maintain their resampling rate ratio before passing them into the function "
"Example: To downsample a 44100 hz waveform by a factor of 8, use "
"`orig_freq=8` and `new_freq=1` instead of `orig_freq=44100` and `new_freq=5512.5` "
"For more information or to leave feedback about this change, please refer to "
"https://github.com/pytorch/audio/issues/1487."
)
if resampling_method not in ['sinc_interpolation', 'kaiser_window']:
raise ValueError('Invalid resampling method: {}'.format(resampling_method))
orig_freq = int(orig_freq) // gcd
new_freq = int(new_freq) // gcd
assert lowpass_filter_width > 0
kernels = []
base_freq = min(orig_freq, new_freq)
# This will perform antialiasing filtering by removing the highest frequencies.
# At first I thought I only needed this when downsampling, but when upsampling
# you will get edge artifacts without this, as the edge is equivalent to zero padding,
# which will add high freq artifacts.
base_freq *= rolloff
# The key idea of the algorithm is that x(t) can be exactly reconstructed from x[i] (tensor)
# using the sinc interpolation formula:
# x(t) = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - t))
# We can then sample the function x(t) with a different sample rate:
# y[j] = x(j / new_freq)
# or,
# y[j] = sum_i x[i] sinc(pi * orig_freq * (i / orig_freq - j / new_freq))
# We see here that y[j] is the convolution of x[i] with a specific filter, for which
# we take an FIR approximation, stopping when we see at least `lowpass_filter_width` zeros crossing.
# But y[j+1] is going to have a different set of weights and so on, until y[j + new_freq].
# Indeed:
# y[j + new_freq] = sum_i x[i] sinc(pi * orig_freq * ((i / orig_freq - (j + new_freq) / new_freq))
# = sum_i x[i] sinc(pi * orig_freq * ((i - orig_freq) / orig_freq - j / new_freq))
# = sum_i x[i + orig_freq] sinc(pi * orig_freq * (i / orig_freq - j / new_freq))
# so y[j+new_freq] uses the same filter as y[j], but on a shifted version of x by `orig_freq`.
# This will explain the F.conv1d after, with a stride of orig_freq.
width = math.ceil(lowpass_filter_width * orig_freq / base_freq)
# If orig_freq is still big after GCD reduction, most filters will be very unbalanced, i.e.,
# they will have a lot of almost zero values to the left or to the right...
# There is probably a way to evaluate those filters more efficiently, but this is kept for
# future work.
idx_dtype = dtype if dtype is not None else torch.float64
idx = torch.arange(-width, width + orig_freq, device=device, dtype=idx_dtype)
for i in range(new_freq):
t = (-i / new_freq + idx / orig_freq) * base_freq
t = t.clamp_(-lowpass_filter_width, lowpass_filter_width)
# we do not use built in torch windows here as we need to evaluate the window
# at specific positions, not over a regular grid.
if resampling_method == "sinc_interpolation":
window = torch.cos(t * math.pi / lowpass_filter_width / 2)**2
else:
# kaiser_window
if beta is None:
beta = 14.769656459379492
beta_tensor = torch.tensor(float(beta))
window = torch.i0(beta_tensor * torch.sqrt(1 - (t / lowpass_filter_width) ** 2)) / torch.i0(beta_tensor)
t *= math.pi
kernel = torch.where(t == 0, torch.tensor(1.).to(t), torch.sin(t) / t)
kernel.mul_(window)
kernels.append(kernel)
scale = base_freq / orig_freq
kernels = torch.stack(kernels).view(new_freq, 1, -1).mul_(scale)
if dtype is None:
kernels = kernels.to(dtype=torch.float32)
return kernels, width
def _apply_sinc_resample_kernel(
waveform: Tensor,
orig_freq: float,
new_freq: float,
gcd: int,
kernel: Tensor,
width: int,
):
orig_freq = int(orig_freq) // gcd
new_freq = int(new_freq) // gcd
# pack batch
shape = waveform.size()
waveform = waveform.view(-1, shape[-1])
num_wavs, length = waveform.shape
waveform = torch.nn.functional.pad(waveform, (width, width + orig_freq))
resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq)
resampled = resampled.transpose(1, 2).reshape(num_wavs, -1)
target_length = int(math.ceil(new_freq * length / orig_freq))
resampled = resampled[..., :target_length]
# unpack batch
resampled = resampled.view(shape[:-1] + resampled.shape[-1:])
return resampled
def resample(
waveform: Tensor,
orig_freq: float,
new_freq: float,
lowpass_filter_width: int = 6,
rolloff: float = 0.99,
resampling_method: str = "sinc_interpolation",
beta: Optional[float] = None,
) -> Tensor:
r"""Resamples the waveform at the new frequency using bandlimited interpolation.
https://ccrma.stanford.edu/~jos/resample/Theory_Ideal_Bandlimited_Interpolation.html
Note:
``transforms.Resample`` precomputes and reuses the resampling kernel, so using it will result in
more efficient computation if resampling multiple waveforms with the same resampling parameters.
Args:
waveform (Tensor): The input signal of dimension (..., time)
orig_freq (float): The original frequency of the signal
new_freq (float): The desired frequency
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. (Default: ``6``)
rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist.
Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``)
resampling_method (str, optional): The resampling method to use.
Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``)
beta (float or None): The shape parameter used for kaiser window.
Returns:
Tensor: The waveform at the new frequency of dimension (..., time).
"""
assert orig_freq > 0.0 and new_freq > 0.0
if orig_freq == new_freq:
return waveform
gcd = math.gcd(int(orig_freq), int(new_freq))
kernel, width = _get_sinc_resample_kernel(orig_freq, new_freq, gcd, lowpass_filter_width, rolloff,
resampling_method, beta, waveform.device, waveform.dtype)
resampled = _apply_sinc_resample_kernel(waveform, orig_freq, new_freq, gcd, kernel, width)
return resampled
@torch.jit.unused
def edit_distance(seq1: Sequence, seq2: Sequence) -> int:
"""
Calculate the word level edit (Levenshtein) distance between two sequences.
The function computes an edit distance allowing deletion, insertion and
substitution. The result is an integer.
For most applications, the two input sequences should be the same type. If
two strings are given, the output is the edit distance between the two
strings (character edit distance). If two lists of strings are given, the
output is the edit distance between sentences (word edit distance). Users
may want to normalize the output by the length of the reference sequence.
torchscipt is not supported for this function.
Args:
seq1 (Sequence): the first sequence to compare.
seq2 (Sequence): the second sequence to compare.
Returns:
int: The distance between the first and second sequences.
"""
len_sent2 = len(seq2)
dold = list(range(len_sent2 + 1))
dnew = [0 for _ in range(len_sent2 + 1)]
for i in range(1, len(seq1) + 1):
dnew[0] = i
for j in range(1, len_sent2 + 1):
if seq1[i - 1] == seq2[j - 1]:
dnew[j] = dold[j - 1]
else:
substitution = dold[j - 1] + 1
insertion = dnew[j - 1] + 1
deletion = dold[j] + 1
dnew[j] = min(substitution, insertion, deletion)
dnew, dold = dold, dnew
return int(dold[-1])
def pitch_shift(
waveform: Tensor,
sample_rate: int,
n_steps: int,
bins_per_octave: int = 12,
n_fft: int = 512,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window: Optional[Tensor] = None,
) -> Tensor:
"""
Shift the pitch of a waveform by ``n_steps`` steps.
Args:
waveform (Tensor): The input waveform of shape `(..., time)`.
sample_rate (float): Sample rate of `waveform`.
n_steps (int): The (fractional) steps to shift `waveform`.
bins_per_octave (int, optional): The number of steps per octave (Default: ``12``).
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
hop_length (int or None, optional): Length of hop between STFT windows. If None, then
``win_length // 4`` is used (Default: ``None``).
window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
Returns:
Tensor: The pitch-shifted audio waveform of shape `(..., time)`.
"""
if hop_length is None:
hop_length = n_fft // 4
if win_length is None:
win_length = n_fft
if window is None:
window = torch.hann_window(window_length=win_length, device=waveform.device)
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
ori_len = shape[-1]
rate = 2.0 ** (-float(n_steps) / bins_per_octave)
spec_f = torch.stft(input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True)
phase_advance = torch.linspace(0, math.pi * hop_length, spec_f.shape[-2], device=spec_f.device)[..., None]
spec_stretch = phase_vocoder(spec_f, rate, phase_advance)
len_stretch = int(round(ori_len / rate))
waveform_stretch = torch.istft(spec_stretch,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=len_stretch)
waveform_shift = resample(waveform_stretch, sample_rate / rate, float(sample_rate))
shift_len = waveform_shift.size()[-1]
if shift_len > ori_len:
waveform_shift = waveform_shift[..., :ori_len]
else:
waveform_shift = torch.nn.functional.pad(waveform_shift, [0, ori_len - shift_len])
# unpack batch
waveform_shift = waveform_shift.view(shape[:-1] + waveform_shift.shape[-1:])
return waveform_shift
| [
"torch.round",
"torch.cat",
"torch.view_as_real",
"torch.stack",
"torch.istft",
"torch.nn.functional.pad",
"torch.exp",
"torch.stft",
"torch.sum",
"torch.sqrt",
"torch.log1p",
"torch.norm",
"torch.view_as_complex",
"torch.i0",
"torch.abs",
"torch.tensor",
"torch.polar",
"torch.zeros",
"torch.device",
"torch.real",
"torch.cos",
"torch.min",
"torch.max",
"torch.hann_window",
"torch.linspace",
"torch.clamp",
"torch.nn.functional.conv1d",
"torch.cumsum",
"torch.pow",
"torch.rand",
"torch.arange",
"torch.sin",
"torch.ops.torchaudio.kaldi_ComputeKaldiPitch",
"torch.sign",
"torch.atan2",
"torch.median"
] | 1.4.0 | jaeyeun97/audio | 8a347b62cf5c907d2676bdc983354834e500a282 |
1.2 | """
Fixtures for unit tests.
"""
import pytest
import numpy as np
import torch
from lettuce import (
stencils, Stencil, get_subclasses, Transform, Lattice, moments
)
STENCILS = list(get_subclasses(Stencil, stencils))
TRANSFORMS = list(get_subclasses(Transform, moments))
@pytest.fixture(
params=["cpu", pytest.param(
"cuda:0", marks=pytest.mark.skipif(
not torch.cuda.is_available(), reason="CUDA not available.")
)])
def device(request):
"""Run a test case for all available devices."""
return request.param
@pytest.fixture(params=[torch.float32, torch.float64])
# not testing torch.float16 (half precision is not precise enough)
def dtype_device(request, device):
"""Run a test case for all available devices and data types available on the device."""
if device == "cpu" and request.param == torch.float16:
pytest.skip("Half precision is only available on GPU.")
return request.param, device
@pytest.fixture(params=STENCILS)
def stencil(request):
"""Run a test for all stencils."""
return request.param
@pytest.fixture(params=STENCILS)
def lattice(request, dtype_device):
"""Run a test for all lattices (all stencils, devices and data types available on the device.)"""
dtype, device = dtype_device
return Lattice(request.param, device=device, dtype=dtype)
@pytest.fixture()
def f_lattice(lattice):
"""Run a test for all lattices; return a grid with 3^D sample distribution functions alongside the lattice."""
np.random.seed(1) # arbitrary, but deterministic
return lattice.convert_to_tensor(np.random.random([lattice.Q] + [3] * lattice.D)), lattice
@pytest.fixture(params=[Lattice])
def f_all_lattices(request, lattice):
"""Run a test for all lattices and lattices-of-vector;
return a grid with 3^D sample distribution functions alongside the lattice.
"""
np.random.seed(1)
f = np.random.random([lattice.Q] + [3] * lattice.D)
Ltc = request.param
ltc = Ltc(lattice.stencil, lattice.device, lattice.dtype)
return ltc.convert_to_tensor(f), ltc
@pytest.fixture(params=TRANSFORMS)
def f_transform(request, f_all_lattices):
Transform = request.param
f, lattice = f_all_lattices
if lattice.stencil in Transform.supported_stencils:
return f, Transform(lattice)
else:
pytest.skip("Stencil not supported for this transform.")
| [
"torch.cuda.is_available"
] | 1.2 | je-santos/lettuce | 9455449b997eb245cd714c5759d7a7cd4c33b1dc |
1.2 | """
Collision models
"""
import torch
from lettuce.equilibrium import QuadraticEquilibrium
from lettuce.util import LettuceException
__all__ = [
"BGKCollision", "KBCCollision2D", "KBCCollision3D", "MRTCollision", "RegularizedCollision",
"SmagorinskyCollision", "TRTCollision", "BGKInitialization"
]
class BGKCollision:
def __init__(self, lattice, tau, force=None):
self.force = force
self.lattice = lattice
self.tau = tau
def __call__(self, f):
rho = self.lattice.rho(f)
u_eq = 0 if self.force is None else self.force.u_eq(f)
u = self.lattice.u(f) + u_eq
feq = self.lattice.equilibrium(rho, u)
Si = 0 if self.force is None else self.force.source_term(u)
return f - 1.0 / self.tau * (f - feq) + Si
class MRTCollision:
"""Multiple relaxation time collision operator
This is an MRT operator in the most general sense of the word.
The transform does not have to be linear and can, e.g., be any moment or cumulant transform.
"""
def __init__(self, lattice, transform, relaxation_parameters):
self.lattice = lattice
self.transform = transform
self.relaxation_parameters = lattice.convert_to_tensor(relaxation_parameters)
def __call__(self, f):
m = self.transform.transform(f)
meq = self.transform.equilibrium(m)
m = m - self.lattice.einsum("q,q->q", [1 / self.relaxation_parameters, m - meq])
f = self.transform.inverse_transform(m)
return f
class TRTCollision:
"""Two relaxation time collision model - standard implementation (cf. Krüger 2017)
"""
def __init__(self, lattice, tau, tau_minus=1.0):
self.lattice = lattice
self.tau_plus = tau
self.tau_minus = tau_minus
def __call__(self, f):
rho = self.lattice.rho(f)
u = self.lattice.u(f)
feq = self.lattice.equilibrium(rho, u)
f_diff_neq = ((f + f[self.lattice.stencil.opposite]) - (feq + feq[self.lattice.stencil.opposite])) / (
2.0 * self.tau_plus)
f_diff_neq += ((f - f[self.lattice.stencil.opposite]) - (feq - feq[self.lattice.stencil.opposite])) / (
2.0 * self.tau_minus)
f = f - f_diff_neq
return f
class RegularizedCollision:
"""Regularized LBM according to Jonas Latt and Bastien Chopard (2006)"""
def __init__(self, lattice, tau):
self.lattice = lattice
self.tau = tau
self.Q_matrix = torch.zeros([lattice.Q, lattice.D, lattice.D], device=lattice.device, dtype=lattice.dtype)
for a in range(lattice.Q):
for b in range(lattice.D):
for c in range(lattice.D):
self.Q_matrix[a, b, c] = lattice.e[a, b] * lattice.e[a, c]
if b == c:
self.Q_matrix[a, b, c] -= lattice.cs * lattice.cs
def __call__(self, f):
rho = self.lattice.rho(f)
u = self.lattice.u(f)
feq = self.lattice.equilibrium(rho, u)
pi_neq = self.lattice.shear_tensor(f - feq)
cs4 = self.lattice.cs ** 4
pi_neq = self.lattice.einsum("qab,ab->q", [self.Q_matrix, pi_neq])
pi_neq = self.lattice.einsum("q,q->q", [self.lattice.w, pi_neq])
fi1 = pi_neq / (2 * cs4)
f = feq + (1. - 1. / self.tau) * fi1
return f
class KBCCollision2D:
"""Entropic multi-relaxation time model according to Karlin et al. in two dimensions"""
def __init__(self, lattice, tau):
self.lattice = lattice
assert lattice.Q == 9, LettuceException("KBC2D only realized for D2Q9")
self.tau = tau
self.beta = 1. / (2 * tau)
# Build a matrix that contains the indices
self.M = torch.zeros([3, 3, 9], device=lattice.device, dtype=lattice.dtype)
for i in range(3):
for j in range(3):
self.M[i, j] = lattice.e[:, 0] ** i * lattice.e[:, 1] ** j
def kbc_moment_transform(self, f):
"""Transforms the f into the KBC moment representation"""
m = torch.einsum('abq,qmn', self.M, f)
rho = m[0, 0]
m = m / rho
m[0, 0] = rho
return m
def compute_s_seq_from_m(self, f, m):
s = torch.zeros_like(f)
T = m[2, 0] + m[0, 2]
N = m[2, 0] - m[0, 2]
Pi_xy = m[1, 1]
s[0] = m[0, 0] * -T
s[1] = 1. / 2. * m[0, 0] * (0.5 * (T + N))
s[2] = 1. / 2. * m[0, 0] * (0.5 * (T - N))
s[3] = 1. / 2. * m[0, 0] * (0.5 * (T + N))
s[4] = 1. / 2. * m[0, 0] * (0.5 * (T - N))
s[5] = 1. / 4. * m[0, 0] * (Pi_xy)
s[6] = -s[5]
s[7] = 1. / 4 * m[0, 0] * Pi_xy
s[8] = -s[7]
return s
def __call__(self, f):
# the deletes are not part of the algorithm, they just keep the memory usage lower
feq = self.lattice.equilibrium(self.lattice.rho(f), self.lattice.u(f))
# k = torch.zeros_like(f)
m = self.kbc_moment_transform(f)
delta_s = self.compute_s_seq_from_m(f, m)
# k[0] = m[0, 0]
# k[1] = m[0, 0] / 2. * m[1, 0]
# k[2] = m[0, 0] / 2. * m[0, 1]
# k[3] = -m[0, 0] / 2. * m[1, 0]
# k[4] = -m[0, 0] / 2. * m[0, 1]
# k[5] = 0
# k[6] = 0
# k[7] = 0
# k[8] = 0
m = self.kbc_moment_transform(feq)
delta_s -= self.compute_s_seq_from_m(f, m)
del m
delta_h = f - feq - delta_s
sum_s = self.lattice.rho(delta_s * delta_h / feq)
sum_h = self.lattice.rho(delta_h * delta_h / feq)
del feq
gamma_stab = 1. / self.beta - (2 - 1. / self.beta) * sum_s / sum_h
gamma_stab[gamma_stab < 1E-15] = 2.0
gamma_stab[torch.isnan(gamma_stab)] = 2.0
f = f - self.beta * (2 * delta_s + gamma_stab * delta_h)
return f
class KBCCollision3D:
"""Entropic multi-relaxation time-relaxation time model according to Karlin et al. in three dimensions"""
def __init__(self, lattice, tau):
self.lattice = lattice
assert lattice.Q == 27, LettuceException("KBC only realized for D3Q27")
self.tau = tau
self.beta = 1. / (2 * tau)
# Build a matrix that contains the indices
self.M = torch.zeros([3, 3, 3, 27], device=lattice.device, dtype=lattice.dtype)
for i in range(3):
for j in range(3):
for k in range(3):
self.M[i, j, k] = lattice.e[:, 0] ** i * lattice.e[:, 1] ** j * lattice.e[:, 2] ** k
def kbc_moment_transform(self, f):
"""Transforms the f into the KBC moment representation"""
m = torch.einsum('abcq,qmno', self.M, f)
rho = m[0, 0, 0]
m = m / rho
m[0, 0, 0] = rho
return m
def compute_s_seq_from_m(self, f, m):
s = torch.zeros_like(f)
T = m[2, 0, 0] + m[0, 2, 0] + m[0, 0, 2]
N_xz = m[2, 0, 0] - m[0, 0, 2]
N_yz = m[0, 2, 0] - m[0, 0, 2]
Pi_xy = m[1, 1, 0]
Pi_xz = m[1, 0, 1]
Pi_yz = m[0, 1, 1]
s[0] = m[0, 0, 0] * -T
s[1] = 1. / 6. * m[0, 0, 0] * (2 * N_xz - N_yz + T)
s[2] = s[1]
s[3] = 1. / 6. * m[0, 0, 0] * (2 * N_yz - N_xz + T)
s[4] = s[3]
s[5] = 1. / 6. * m[0, 0, 0] * (-N_xz - N_yz + T)
s[6] = s[5]
s[7] = 1. / 4 * m[0, 0, 0] * Pi_yz
s[8] = s[7]
s[9] = - 1. / 4 * m[0, 0, 0] * Pi_yz
s[10] = s[9]
s[11] = 1. / 4 * m[0, 0, 0] * Pi_xz
s[12] = s[11]
s[13] = -1. / 4 * m[0, 0, 0] * Pi_xz
s[14] = s[13]
s[15] = 1. / 4 * m[0, 0, 0] * Pi_xy
s[16] = s[15]
s[17] = -1. / 4 * m[0, 0, 0] * Pi_xy
s[18] = s[17]
return s
def __call__(self, f):
# the deletes are not part of the algorithm, they just keep the memory usage lower
feq = self.lattice.equilibrium(self.lattice.rho(f), self.lattice.u(f))
# k = torch.zeros_like(f)
m = self.kbc_moment_transform(f)
delta_s = self.compute_s_seq_from_m(f, m)
# k[1] = m[0, 0, 0] / 6. * (3. * m[1, 0, 0])
# k[0] = m[0, 0, 0]
# k[2] = -k[1]
# k[3] = m[0, 0, 0] / 6. * (3. * m[0, 1, 0])
# k[4] = -k[3]
# k[5] = m[0, 0, 0] / 6. * (3. * m[0, 0, 1])
# k[6] = -k[5]
m = self.kbc_moment_transform(feq)
delta_s -= self.compute_s_seq_from_m(f, m)
del m
delta_h = f - feq - delta_s
sum_s = self.lattice.rho(delta_s * delta_h / feq)
sum_h = self.lattice.rho(delta_h * delta_h / feq)
del feq
gamma_stab = 1. / self.beta - (2 - 1. / self.beta) * sum_s / sum_h
gamma_stab[gamma_stab < 1E-15] = 2.0
# Detect NaN
gamma_stab[torch.isnan(gamma_stab)] = 2.0
f = f - self.beta * (2 * delta_s + gamma_stab * delta_h)
return f
class SmagorinskyCollision:
"""Smagorinsky large eddy simulation (LES) collision model with BGK operator."""
def __init__(self, lattice, tau, smagorinsky_constant=0.17, force=None):
self.force = force
self.lattice = lattice
self.tau = tau
self.iterations = 2
self.tau_eff = tau
self.constant = smagorinsky_constant
def __call__(self, f):
rho = self.lattice.rho(f)
u_eq = 0 if self.force is None else self.force.u_eq(f)
u = self.lattice.u(f) + u_eq
feq = self.lattice.equilibrium(rho, u)
S_shear = self.lattice.shear_tensor(f - feq)
S_shear /= (2.0 * rho * self.lattice.cs ** 2)
self.tau_eff = self.tau
nu = (self.tau - 0.5) / 3.0
for i in range(self.iterations):
S = S_shear / self.tau_eff
S = self.lattice.einsum('ab,ab->', [S, S])
nu_t = self.constant ** 2 * S
nu_eff = nu + nu_t
self.tau_eff = nu_eff * 3.0 + 0.5
Si = 0 if self.force is None else self.force.source_term(u)
return f - 1.0 / self.tau_eff * (f - feq) + Si
class BGKInitialization:
"""Keep velocity constant."""
def __init__(self, lattice, flow, moment_transformation):
self.lattice = lattice
self.tau = flow.units.relaxation_parameter_lu
self.moment_transformation = moment_transformation
p, u = flow.initial_solution(flow.grid)
self.u = flow.units.convert_velocity_to_lu(lattice.convert_to_tensor(u))
self.rho0 = flow.units.characteristic_density_lu
self.equilibrium = QuadraticEquilibrium(self.lattice)
momentum_names = tuple([f"j{x}" for x in "xyz"[:self.lattice.D]])
self.momentum_indices = moment_transformation[momentum_names]
def __call__(self, f):
rho = self.lattice.rho(f)
feq = self.equilibrium(rho, self.u)
m = self.moment_transformation.transform(f)
meq = self.moment_transformation.transform(feq)
mnew = m - 1.0 / self.tau * (m - meq)
mnew[0] = m[0] - 1.0 / (self.tau + 1) * (m[0] - meq[0])
mnew[self.momentum_indices] = rho * self.u
f = self.moment_transformation.inverse_transform(mnew)
return f
| [
"torch.zeros",
"torch.zeros_like",
"torch.isnan",
"torch.einsum"
] | 1.2 | je-santos/lettuce | 9455449b997eb245cd714c5759d7a7cd4c33b1dc |
0.4 | #!/usr/bin/env python3
import gym
from collections import namedtuple
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.nn as nn
import torch.optim as optim
HIDDEN_SIZE = 128
BATCH_SIZE = 16
PERCENTILE = 70
class Net(nn.Module):
def __init__(self, obs_size, hidden_size, n_actions):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, n_actions)
)
def forward(self, x):
return self.net(x)
Episode = namedtuple('Episode', field_names=['reward', 'steps'])
EpisodeStep = namedtuple('EpisodeStep', field_names=['observation', 'action'])
def iterate_batches(env, net, batch_size):
batch = []
episode_reward = 0.0
episode_steps = []
obs = env.reset()
sm = nn.Softmax(dim=1)
while True:
obs_v = torch.FloatTensor([obs])
act_probs_v = sm(net(obs_v))
act_probs = act_probs_v.data.numpy()[0]
action = np.random.choice(len(act_probs), p=act_probs)
next_obs, reward, is_done, _ = env.step(action)
episode_reward += reward
episode_steps.append(EpisodeStep(observation=obs, action=action))
if is_done:
batch.append(Episode(reward=episode_reward, steps=episode_steps))
episode_reward = 0.0
episode_steps = []
next_obs = env.reset()
if len(batch) == batch_size:
yield batch
batch = []
obs = next_obs
def filter_batch(batch, percentile):
rewards = list(map(lambda s: s.reward, batch))
reward_bound = np.percentile(rewards, percentile)
reward_mean = float(np.mean(rewards))
train_obs = []
train_act = []
for example in batch:
if example.reward < reward_bound:
continue
train_obs.extend(map(lambda step: step.observation, example.steps))
train_act.extend(map(lambda step: step.action, example.steps))
train_obs_v = torch.FloatTensor(train_obs)
train_act_v = torch.LongTensor(train_act)
return train_obs_v, train_act_v, reward_bound, reward_mean
if __name__ == "__main__":
env = gym.make("CartPole-v0")
env = gym.wrappers.Monitor(env, directory="mon", force=True)
obs_size = env.observation_space.shape[0]
n_actions = env.action_space.n
net = Net(obs_size, HIDDEN_SIZE, n_actions)
objective = nn.CrossEntropyLoss()
optimizer = optim.Adam(params=net.parameters(), lr=0.01)
writer = SummaryWriter(comment="-cartpole")
for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):
obs_v, acts_v, reward_b, reward_m = filter_batch(batch, PERCENTILE)
optimizer.zero_grad()
action_scores_v = net(obs_v)
loss_v = objective(action_scores_v, acts_v)
loss_v.backward()
optimizer.step()
print("%d: loss=%.3f, reward_mean=%.1f, reward_bound=%.1f" % (
iter_no, loss_v.item(), reward_m, reward_b))
writer.add_scalar("loss", loss_v.item(), iter_no)
writer.add_scalar("reward_bound", reward_b, iter_no)
writer.add_scalar("reward_mean", reward_m, iter_no)
if reward_m > 199:
print("Solved!")
break
writer.close()
| [
"torch.nn.Linear",
"torch.nn.Softmax",
"torch.FloatTensor",
"torch.nn.ReLU",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss"
] | 0.4.1 | castorfou/drl_handson | 4f5a07611c483ad20022afd37961559131c5bf31 |
1.7 | """
(c) 2020 Spencer Rose, MIT Licence
Python Landscape Classification Tool (PyLC)
Reference: An evaluation of deep learning semantic segmentation
for land cover classification of oblique ground-based photography,
MSc. Thesis 2020.
<http://hdl.handle.net/1828/12156>
Spencer Rose <[email protected]>, June 2020
University of Victoria
Module: Evaluator Class
File: evaluate.py
"""
import json
import os
import torch
import utils.tex as tex
import numpy as np
import cv2
import utils.tools as utils
from config import defaults, Parameters
from utils.metrics import Metrics
class Evaluator:
"""
Handles model test/evaluation functionality.
Parameters
------
params: Parameters
Updated parameters.
"""
def __init__(self, params=None):
# initialize parameters, metrics
self.meta = Parameters(params) if params is not None else defaults
self.metrics = Metrics()
# Model results
self.fid = None
self.logits = None
self.mask_pred = None
self.results = []
# data buffers
self.y_true = None
self.y_pred = None
self.labels = []
# multi-image data buffers for aggregate evaluation
self.aggregate = False
self.y_true_aggregate = []
self.y_pred_aggregate = []
# Make output and mask directories for results
self.model_path = None
self.output_dir = os.path.join(defaults.output_dir, self.meta.id)
self.masks_dir = utils.mk_path(os.path.join(self.output_dir, 'masks'))
self.logits_dir = utils.mk_path(os.path.join(self.output_dir, 'logits'))
self.metrics_dir = utils.mk_path(os.path.join(self.output_dir, 'metrics'))
def load(self, mask_pred, meta, mask_true_path=None, scale=None):
"""
Initialize predicted/ground truth image masks for
evaluation metrics.
Parameters:
-----------
mask_pred_logits: torch.tensor
Unnormalized model logits for predicted segmentation [NCHW]
meta: dict
Reconstruction metadata.
mask_true_path: str
File path to ground-truth mask [CHW]
"""
# store metadata
self.meta = meta
# file identifier (include current scale)
self.fid = self.meta.extract['fid']
# reconstruct unnormalized model outputs into mask data array
self.mask_pred = mask_pred
if mask_true_path:
# load ground-truth data
mask_true, w, h, w_scaled, h_scaled = utils.get_image(
mask_true_path,
ch=3,
scale=scale,
interpolate=cv2.INTER_NEAREST
)
# check dimensions of ground truth mask and predicted mask
if not (w_scaled == self.meta.extract['w_scaled'] and h_scaled == self.meta.extract['h_scaled']):
print("Ground truth mask dims ({}px X {}px) do not match predicted mask dims ({}px X {}px).".format(
w_scaled, h_scaled, self.meta.extract['w_scaled'], self.meta.extract['h_scaled']
))
exit(1)
self.y_true = torch.as_tensor(torch.tensor(mask_true), dtype=torch.uint8).permute(2, 0, 1).unsqueeze(0)
self.y_pred = torch.as_tensor(self.mask_pred, dtype=torch.uint8).permute(2, 0, 1).unsqueeze(0)
# Class encode input predicted data
self.y_pred = utils.class_encode(self.y_pred, self.meta.palette_rgb)
self.y_true = utils.class_encode(self.y_true, self.meta.palette_rgb)
# Verify same size of target == input
assert self.y_pred.shape == self.y_true.shape, "Input dimensions {} not same as target {}.".format(
self.y_pred.shape, self.y_true.shape)
self.y_pred = self.y_pred.flatten()
self.y_true = self.y_true.flatten()
# load input data into metrics
self.y_true_aggregate += [self.y_true]
self.y_pred_aggregate += [self.y_pred]
return self
def update(self, meta):
"""
Update local metadata
"""
self.meta = meta
return self
def evaluate(self, aggregate=False):
"""
Compute evaluation metrics
Parameters
----------
aggregate: bool
Compute aggregate metrics for multiple data loads.
"""
self.aggregate = aggregate
self.validate()
self.metrics.f1_score(self.y_true, self.y_pred)
self.metrics.jaccard(self.y_true, self.y_pred)
self.metrics.mcc(self.y_true, self.y_pred)
self.metrics.confusion_matrix(self.y_true, self.y_pred, labels=self.labels)
self.metrics.report(self.y_true, self.y_pred, labels=self.labels)
return self
def validate(self):
"""
Validates mask data for computations.
- Ensures all classes are represented in ground truth mask.
"""
self.labels = defaults.class_codes
# aggregated metrics
if self.aggregate:
self.fid = 'aggregate_metrics'
assert self.y_true_aggregate and self.y_pred_aggregate, \
"Aggregate evaluation failed. Data buffer is empty."
print("\nReporting aggregate metrics ... ")
print("\t - Total generated masks: {}".format(len(self.y_pred_aggregate)))
print()
# Concatenate aggregated data
self.y_true = np.concatenate((self.y_true_aggregate))
self.y_pred = np.concatenate((self.y_pred_aggregate))
# ensure class coverage
for idx in range(len(self.labels)):
self.y_true[idx] = idx
self.y_pred[idx] = idx
return self
def reset(self):
"""
Resets evaluator buffers.
"""
self.logits = None
self.mask_pred = None
self.results = []
self.meta = {}
self.y_true = None
self.y_pred = None
def save_logits(self, logits):
"""
Save unnormalized model outputs (logits) to file.
Parameters
----------
logits: list
Unnormalized model outputs.
Returns
-------
logits_file: str
Output path to model outputs file.
"""
# save unnormalized model outputs
logits_file = os.path.join(self.logits_dir, self.fid + '_output.pth')
if utils.confirm_write_file(logits_file):
torch.save({"results": logits, "meta": self.meta}, logits_file)
print("Model output data saved to \n\t{}.".format(logits_file))
return logits_file
return
def save_metrics(self):
"""
Save prediction evaluation results to files.
Returns
-------
metrics_file: str
Output path to metrics data file.
metrics_file: str
Output path to confusion matrix PDF file.
metrics_file: str
Output path to confusion matrix data file.
"""
# Build output file paths
metrics_file = os.path.join(self.metrics_dir, self.fid + '_eval.json')
cmap_img_file = os.path.join(self.metrics_dir, self.fid + '_cmap.pdf')
cmap_data_file = os.path.join(self.metrics_dir, self.fid + '_cmap.npy')
# save evaluation metrics results as JSON file
if utils.confirm_write_file(metrics_file):
with open(metrics_file, 'w') as fp:
json.dump(self.metrics.results, fp, indent=4)
# save confusion matrix as PDF and data file
if utils.confirm_write_file(cmap_img_file):
self.metrics.cmap.get_figure().savefig(cmap_img_file, format='pdf', dpi=400)
np.save(cmap_data_file, self.metrics.cmatrix)
# clear metrics plot
self.metrics.plt.clf()
return metrics_file, cmap_img_file, cmap_data_file
def save_tex(self):
"""
Save prediction evaluation results as LaTeX table to file.
Returns
-------
tex_file: str
Output path to TeX data file.
"""
tex_file = os.path.join(self.metrics_dir, self.fid + '_metrics.tex')
if utils.confirm_write_file(tex_file):
with open(tex_file, 'w') as fp:
fp.write(tex.convert_md_to_tex(self.meta))
return tex_file
return
def save_image(self):
"""
Reconstructs segmentation prediction as mask image.
Output mask image saved to file (RGB -> BGR conversion)
Note that the default color format in OpenCV is often
referred to as RGB but it is actually BGR (the bytes are
reversed).
Returns
-------
mask_data: np.array
Output mask data.
"""
# Build mask file path
mask_file = os.path.join(self.masks_dir, self.fid + '.png')
if self.mask_pred is None:
print("Mask has not been reconstructed. Image save cancelled.")
if utils.confirm_write_file(mask_file):
# Reconstruct seg-mask from predicted tiles and write to file
cv2.imwrite(mask_file, cv2.cvtColor(self.mask_pred, cv2.COLOR_RGB2BGR))
print("Output mask saved to: \n\t{}.".format(mask_file))
return mask_file
return
| [
"torch.save",
"torch.tensor",
"torch.as_tensor"
] | 1.7.0 | scrose/pylc | 9c4c4e84a14cb3adc0b4226199e4cd5841384b0b |
1.7 | import torch
import torch.utils.data as td
from typing import Optional, Dict, Union
from transformers import BatchEncoding
from argparse import Namespace
import numpy as np
import pandas as pd
from pytorch_quik import io
Tensor_Target = Union[str, np.ndarray]
Tensor_Data = Union[pd.DataFrame, torch.Tensor, BatchEncoding]
def make_TensorDataset(
tens: Tensor_Data,
labels: Tensor_Target,
ttype: Optional[str] = "train",
data_types: Optional[Dict[int, type]] = None,
args: Optional[Namespace] = None,
) -> td.TensorDataset:
"""Will turn a set of data into tensors in a torch TensorDataset for use
in a Neural Network. Also provides the option of saving the TensorDataset
Args:
tens (Union[torch.Tensor, BatchEncoding]): Either a torch.Tensor,
or if from transformers, a BatchEncoding.
labels (Union[str, np.ndarray]): Can either be a string of the label
name found in tens, or the actual labels as an np.ndarray
args (Namespace, optional): The argparse arguments for the job.
Defaults to None.
ttype (str, optional): The type of dataset (train, valid, test).
Defaults to "train".
data_types (Dict[int, type], optional): If the tensor data types
need to be changed for space considerations. Defaults to None.
Returns:
td.TensorDataset: The final TensorDataset
"""
if data_types is not None:
for i, col in enumerate(tens):
tens[col] = tens[col].astype(data_types[i])
if isinstance(labels, str):
ttar = tens.pop(labels)
if isinstance(tens, BatchEncoding):
tds = td.TensorDataset(
tens["input_ids"], tens["attention_mask"], torch.LongTensor(labels)
)
else:
tens = torch.tensor(tens.values)
tens = tens.transpose(0, 1)
ttar = torch.tensor(ttar.values)
tds = td.TensorDataset(*tens, ttar)
if args is not None:
tds_id = io.id_str(ttype, args)
torch.save(tds, tds_id)
return tds
def transform_TensorDataset(
tds: td.TensorDataset,
pop: Optional[str] = None,
torch_types: Optional[Dict[int, torch.dtype]] = None,
) -> td.TensorDataset:
"""Transforms a torch.utils.data.TensorDataset by either popping out
a tensor, changing the data types, or both.
Args:
tds (td.TensorDataset): The original TensorDataset
pop (str, optional): The ordinal of the tensor to pop. Defaults to
None.
torch_types (Dict[int, torch.dtype], optional): The ordinal and
new data type of each tensor. Defaults to None.
Returns:
td.TensorDataset: The final transformed TensorDataset
"""
tl = list(tds.tensors)
if pop is not None:
del tl[pop]
if torch_types is not None:
tl = [tens.type(torch_types[i]) for i, tens in enumerate(tl)]
return td.TensorDataset(*tl)
| [
"torch.LongTensor",
"torch.save",
"torch.tensor",
"torch.utils.data.TensorDataset"
] | 1.7.0 | donchesworth/pytorch-quik | e59ea3393bf017a17ab92991f14fe3bd6c5b2d0c |
1.5 | import torch
import torch.nn as nn
class HEDLN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
dropout, bidirectional, num_classes1, num_classes2):
super(HEDLN, self).__init__()
self.num_directions = 2 if bidirectional else 1
self.num_classes1 = num_classes1
self.num_classes2 = num_classes2
self.rnn1 = nn.LSTM(input_size, hidden_size, num_layers,
dropout=dropout, bidirectional=bidirectional)
self.fc1 = nn.Linear(self.num_directions * hidden_size, num_classes1)
self.act1 = nn.Sigmoid() # sigmoid+bce
self.rnn2 = nn.LSTM(input_size + self.num_directions * hidden_size,
hidden_size, num_layers,
dropout=dropout, bidirectional=bidirectional)
self.fc2 = nn.Linear(self.num_directions * hidden_size, num_classes2)
self.act2 = nn.Sigmoid() # sigmoid+bce
def forward(self, x):
""" batch-first
x: (seq_len, bz, input_size)
rnn1_out: (seq_len, bz, num_directions * hidden_size)
rnn2_out: (seq_len, bz, num_directions * hidden_size)
logits1: (seq_len*bz, 2)
logits2: (seq_len*bz, 3)
"""
T, B, S1 = x.shape
rnn1_out, _ = self.rnn1(x)
S2 = rnn1_out.shape[-1]
fc1_in = rnn1_out.reshape(-1, S2)
logits1 = self.fc1(fc1_in)
logits1 = self.act1(logits1) # sigmoid+bce
logits1 = logits1.reshape(T, B, self.num_classes1) # sigmoid+bce
rnn2_in = torch.cat((x, rnn1_out), 2)
rnn2_out, _ = self.rnn2(rnn2_in)
fc2_in = rnn2_out.reshape(-1, S2)
logits2 = self.fc2(fc2_in)
logits2 = self.act2(logits2) # sigmoid+bce
logits2 = logits2.reshape(T, B, self.num_classes2) # sigmoid+bce
return logits1, logits2
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.Sigmoid"
] | 1.5.0 | jiabijue/md_mrle | 21830842ca4e663153b9abb94ca2db604059a91f |
1.0 | import numpy as np
import torch
import matplotlib.pyplot as plt
import seaborn as sns
# two_stage_baseline_data = [torch.load(f"sparse_dr_{i}M_eval.pt") for i in range(1, 5)]
# curl_data = torch.load(f"curl_eval.pt")
# dense_dr_data = torch.load(f"dense_dr_eval.pt")
clrs = [
'#1f77b4', # muted blue
'#ff7f0e', # safety orange
'#2ca02c', # cooked asparagus green
'#d62728', # brick red
'#9467bd', # muted purple
'#8c564b', # chestnut brown
'#e377c2', # raspberry yogurt pink
'#7f7f7f', # middle gray
'#bcbd22', # curry yellow-green
'#17becf' # blue-teal
]
data = {
'1cm': [[], [], []],
'2cm': [[], [], []],
'4cm': [[], [], []],
'8cm': [[], [], []],
'16cm': [[], [], []],
}
for i in range(50, 101, 10):
for key in data:
if key != '1cm' and (i == 90 or i == 100):
continue
try:
results = torch.load(f'train_lengths/{i}p_{key}_rack_train_lengths.pt')
except FileNotFoundError:
continue
episodes = [x / 64 for x in results[0]]
mean = np.mean(episodes)
std = np.std(episodes)
data[key][0] += [i]
data[key][1] += [mean]
data[key][2] += [std]
fig, ax = plt.subplots()
plt.xlabel('Target success rate (%)')
plt.ylabel('Number of episodes')
with sns.axes_style("darkgrid"):
for i, key in enumerate(data):
x, mean, error = data[key]
str_mean = [f"${mu}$" for mu in mean]
print(" & ".join(str_mean))
mean = np.array(mean)
error = np.array(error)
ax.plot(x, mean, label=f"{key} pipeline", linewidth=0.9, color=clrs[i])
ax.fill_between(x, mean - error, mean + error, alpha=0.3, facecolor=clrs[i])
ax.legend(loc=2)
# plt.xlim((0, 4000000))
plt.xticks([50, 60, 70, 80, 90, 100])
# plt.title("Success rate on Dish Rack over training time")
plt.draw()
plt.show()
# plt.close(fig)
| [
"torch.load"
] | 1.0.1 | harry-uglow/Curriculum-Reinforcement-Learning | cb050556e1fdc7b7de8d63ad932fc712a35ac144 |
1.11 | #!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" Advanced server neuron.
Example:
$ python miners/text/advanced_server/main.py
"""
from time import time
import bittensor
import torch
import wandb
import pandas
import datetime
import traceback
import sys
import os
from loguru import logger; logger = logger.opt(colors=True)
from torch.nn.utils import clip_grad_norm_
from datetime import datetime,timedelta
from threading import Lock
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
def serve(
config,
gp_server= None,
subtensor = None,
wallet = None,
metagraph = None,
axon = None
):
config.to_defaults()
# Create Subtensor connection
subtensor = bittensor.subtensor(config = config) if subtensor == None else subtensor
# Load/Create our bittensor wallet.
if wallet == None:
wallet = bittensor.wallet( config = config ).create().register(subtensor=subtensor)
else:
wallet.register(subtensor=subtensor)
# Load/Sync/Save our metagraph.
if metagraph == None:
metagraph = bittensor.metagraph (
subtensor = subtensor
).load().sync().save()
else:
metagraph.load().sync().save()
# Instantiate the model we are going to serve on the network.
# Creating a threading lock for updates to the model
mutex = Lock()
gp_server = gp_server.to(gp_server.device)
# Create our optimizer.
optimizer = torch.optim.SGD(
[ {"params": gp_server.parameters()} ],
lr = config.neuron.learning_rate,
momentum = config.neuron.momentum,
)
bittensor.tokenizer()
timecheck = {}
n_topk_peer_weights = subtensor.min_allowed_weights
# Define our forward function.
def forward_text ( inputs_x ):
r""" Forward function that is called when the axon recieves a forward request from other peers
Args:
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
Returns:
outputs (:obj:`torch.FloatTensor`):
The nucleus's outputs as a torch tensor of shape [batch_size, sequence_len, __network_dim__]
"""
return gp_server.encode_forward( inputs_x.to(gp_server.device) )
# Define our backward function.
def backward_text (inputs_x, grads_dy ):
r"""Backwards function that is called when the axon recieves a backwards request from other peers.
Updates the server parameters with gradients through the chain.
Args:
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs from previous forward call.
grads_dy ( :obj:`torch.Tensor`, `required`):
torch grads of forward output.
"""
# -- normalized grads --
grads_dy = grads_dy/(grads_dy.sum() + 0.00001)
with mutex:
outputs_y = gp_server.encode_forward( inputs_x.to(gp_server.device) )
with torch.autograd.set_detect_anomaly(True):
torch.autograd.backward (
tensors = [ outputs_y ],
grad_tensors = [ grads_dy.to(gp_server.device) ],
retain_graph=True
)
logger.info('Backwards axon gradient applied')
gp_server.backward_gradients += inputs_x.size(0)
def priority(pubkey:str, request_type:bittensor.proto.RequestType, inputs_x) -> float:
r"""Calculates the priority on requests based on stake and size of input
Args:
pubkey ( str, `required`):
The public key of the caller.
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
try:
uid = metagraph.hotkeys.index(pubkey)
priority = metagraph.S[uid].item()/ sys.getsizeof(inputs_x)
except:
# zero priority for those who are not registered.
priority = 0
return priority
def blacklist(pubkey:str, request_type:bittensor.proto.RequestType) -> bool:
r"""Axon security blacklisting, used to blacklist message from low stake members
Args:
pubkey ( str, `required`):
The public key of the caller.
request_type ( bittensor.proto.RequestType, `required`):
the request type ('FORWARD' or 'BACKWARD').
"""
def registration_check():
# If we allow non-registered requests return False = not blacklisted.
is_registered = pubkey in metagraph.hotkeys
if not is_registered:
if config.neuron.blacklist_allow_non_registered:
return False
raise Exception('Registration blacklist')
# Check for stake
def stake_check() -> bool:
# Check stake.
uid = metagraph.hotkeys.index(pubkey)
if request_type == bittensor.proto.RequestType.FORWARD:
if metagraph.S[uid].item() < config.neuron.blacklist.stake.forward:
raise Exception('Stake blacklist')
return False
elif request_type == bittensor.proto.RequestType.BACKWARD:
if metagraph.S[uid].item() < config.neuron.blacklist.stake.backward:
raise Exception('Stake blacklist')
return False
def validator_check():
uid = metagraph.hotkeys.index(pubkey)
if (metagraph.W[uid] >0).sum() >= n_topk_peer_weights:
return False
raise Exception('Validator blacklist')
# Check for time
def time_check():
current_time = datetime.now()
if pubkey in timecheck.keys():
prev_time = timecheck[pubkey]
if current_time - prev_time >= timedelta(seconds=config.neuron.blacklist.time):
timecheck[pubkey] = current_time
return False
else:
timecheck[pubkey] = current_time
raise Exception('Time blacklist')
else:
timecheck[pubkey] = current_time
return False
# Blacklist checks
try:
registration_check()
stake_check()
time_check()
validator_check()
return False
#blacklisted
except Exception as e:
return True
if axon == None:
# Create our axon server
axon = bittensor.axon (
config = config,
wallet = wallet,
forward_text = forward_text,
backward_text = backward_text,
blacklist = blacklist,
priority = priority
)
# Training Data
dataset = bittensor.dataset(config=config)
# load our old model
if not config.neuron.restart :
gp_server.load(config.neuron.full_path)
if config.wandb.api_key != 'default':
# --- Init Wandb.
bittensor.wandb(
config = config,
cold_pubkey = wallet.coldkeypub.ss58_address,
hot_pubkey = wallet.hotkey.ss58_address,
root_dir = config.neuron.full_path
)
nn = subtensor.neuron_for_pubkey(wallet.hotkey.ss58_address)
# --- last sync block
last_sync_block = subtensor.get_current_block()
last_set_block = last_sync_block
# -- Main Training loop --
try:
# -- download files from the mountain
data = next(dataset)
# --- creating our chain weights
# --- query the chain for the most current number of peers on the network
chain_weights = torch.zeros(subtensor.n)
uid = nn.uid
chain_weights[uid] = 1
# -- serve axon to the network.
axon.start().serve(subtensor = subtensor)
while True:
# --- Check registration and optionally re-register
nn = subtensor.neuron_for_pubkey(wallet.hotkey.ss58_address)
if not wallet.is_registered( subtensor = subtensor ):
wallet.register( subtensor = subtensor )
axon.serve( subtensor = subtensor ) # Re-serve the erased axon data.
nn = subtensor.neuron_for_pubkey(wallet.hotkey.ss58_address)
# --- Run
current_block = subtensor.get_current_block()
end_block = current_block + config.neuron.blocks_per_epoch
interation = 0
# --- Training step.
while end_block >= current_block:
if current_block != subtensor.get_current_block():
loss, _ = gp_server( next( dataset ).to(gp_server.device) )
if interation > 0 :
losses += loss
else:
losses = loss
interation += 1
current_block = subtensor.get_current_block()
#Custom learning rate
if gp_server.backward_gradients > 0:
optimizer.param_groups[0]['lr'] = 1/(gp_server.backward_gradients)
else:
optimizer.param_groups[0]['lr'] = 0.1
# --- Update parameters
if interation != 0 or gp_server.backward_gradients != 0:
with mutex:
logger.info('Backpropagation Started')
if interation != 0:
losses.backward()
clip_grad_norm_(gp_server.parameters(), 1.0)
optimizer.step()
optimizer.zero_grad()
logger.info('Backpropagation Successful: Model updated')
nn = subtensor.neuron_for_pubkey(wallet.hotkey.ss58_address)
gp_server.backward_gradients = 0
# --- logging data
wandb_data = {
'block': end_block,
'loss': losses.cpu().item()/interation,
'stake': nn.stake,
'rank': nn.rank,
'incentive': nn.incentive,
'trust': nn.trust,
'consensus': nn.consensus,
'incentive': nn.incentive,
'dividends': nn.dividends,
'emission': nn.emission,
}
bittensor.__console__.print('[green]Current Status:[/green]', wandb_data)
# Add additional wandb data for axon, metagraph etc.
if config.wandb.api_key != 'default':
if uid in metagraph.W:
df = pandas.concat( [
bittensor.utils.indexed_values_to_dataframe( prefix = 'w_i_{}'.format(nn.uid), index = metagraph.uids, values = metagraph.W[:, uid] ),
bittensor.utils.indexed_values_to_dataframe( prefix = 's_i'.format(nn.uid), index = metagraph.uids, values = metagraph.S ),
axon.to_dataframe( metagraph = metagraph ),
], axis = 1)
df['uid'] = df.index
stats_data_table = wandb.Table( dataframe = df )
wandb_info_axon = axon.to_wandb()
wandb.log( { **wandb_data, **wandb_info_axon }, step = current_block )
wandb.log( { 'stats': stats_data_table }, step = current_block )
wandb.log( { 'axon_query_times': wandb.plot.scatter( stats_data_table, "uid", "axon_query_time", title="Axon Query time by UID") } )
wandb.log( { 'in_weights': wandb.plot.scatter( stats_data_table, "uid", 'w_i_{}'.format(nn.uid), title="Inward weights by UID") } )
wandb.log( { 'stake': wandb.plot.scatter( stats_data_table, "uid", 's_i', title="Stake by UID") } )
# Save the model
gp_server.save(config.neuron.full_path)
if current_block - last_set_block > config.neuron.blocks_per_set_weights:
# --- Setting weights
try:
last_set_block = current_block
# Set self weights to maintain activity.
did_set = subtensor.set_weights(
uids=torch.arange(0,subtensor.n),
weights = chain_weights,
wait_for_inclusion = False,
wallet = wallet,
)
if did_set:
logger.success('Successfully set weights on the chain')
else:
logger.error('Failed to set weights on chain. (Timeout)')
except Exception as e:
logger.error('Failure setting weights on chain with error: {}', e)
if current_block - last_sync_block > config.neuron.metagraph_sync:
metagraph.sync()
last_sync_block = current_block
except KeyboardInterrupt:
# --- User ended session ----
axon.stop()
dataset.close()
except Exception as e:
# --- Unknown error ----
logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
| [
"torch.zeros",
"torch.autograd.set_detect_anomaly",
"torch.arange"
] | 1.11 | opentensor/BitTensor | 59de2d0fe48f3bd02ba5bff6159e6625bd6cb945 |
1.11 | import binascii
import multiprocessing
import ctypes
import struct
import hashlib
from Crypto.Hash import keccak
import math
import bittensor
import random
import rich
import time
import torch
import numbers
import pandas
import requests
from substrateinterface.utils import ss58
from substrateinterface import Keypair, KeypairType
from typing import Any, Tuple, List, Union, Optional
def indexed_values_to_dataframe (
prefix: Union[str, int],
index: Union[list, torch.LongTensor],
values: Union[list, torch.Tensor],
filter_zeros: bool = False
) -> 'pandas.DataFrame':
# Type checking.
if not isinstance(prefix, str) and not isinstance(prefix, numbers.Number):
raise ValueError('Passed prefix must have type str or Number')
if isinstance(prefix, numbers.Number):
prefix = str(prefix)
if not isinstance(index, list) and not isinstance(index, torch.Tensor):
raise ValueError('Passed uids must have type list or torch.Tensor')
if not isinstance(values, list) and not isinstance(values, torch.Tensor):
raise ValueError('Passed values must have type list or torch.Tensor')
if not isinstance(index, list):
index = index.tolist()
if not isinstance(values, list):
values = values.tolist()
index = [ idx_i for idx_i in index if idx_i < len(values) and idx_i >= 0 ]
dataframe = pandas.DataFrame(columns=[prefix], index = index )
for idx_i in index:
value_i = values[ idx_i ]
if value_i > 0 or not filter_zeros:
dataframe.loc[idx_i] = pandas.Series( { str(prefix): value_i } )
return dataframe
def unbiased_topk( values, k, dim=0, sorted = True, largest = True):
r""" Selects topk as in torch.topk but does not bias lower indices when values are equal.
Args:
values: (torch.Tensor)
Values to index into.
k: (int):
Number to take.
Return:
topk: (torch.Tensor):
topk k values.
indices: (torch.LongTensor)
indices of the topk values.
"""
permutation = torch.randperm(values.shape[ dim ])
permuted_values = values[ permutation ]
topk, indices = torch.topk( permuted_values, k, dim = dim, sorted=sorted, largest=largest )
return topk, permutation[ indices ]
def hex_bytes_to_u8_list( hex_bytes: bytes ):
hex_chunks = [int(hex_bytes[i:i+2], 16) for i in range(0, len(hex_bytes), 2)]
return hex_chunks
def u8_list_to_hex( values: list ):
total = 0
for val in reversed(values):
total = (total << 8) + val
return total
def create_seal_hash( block_hash:bytes, nonce:int ) -> bytes:
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
block_bytes = block_hash.encode('utf-8')[2:]
pre_seal = nonce_bytes + block_bytes
seal = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
return seal
def seal_meets_difficulty( seal:bytes, difficulty:int ):
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
limit = int(math.pow(2,256))- 1
if product > limit:
return False
else:
return True
def solve_for_difficulty( block_hash, difficulty ):
meets = False
nonce = -1
while not meets:
nonce += 1
seal = create_seal_hash( block_hash, nonce )
meets = seal_meets_difficulty( seal, difficulty )
if nonce > 1:
break
return nonce, seal
def solve_for_difficulty_fast( subtensor, wallet, num_processes: int = None, update_interval: int = 500000 ) -> Tuple[int, int, Any, int, Any]:
"""
Solves the POW for registration using multiprocessing.
Args:
subtensor
Subtensor to connect to for block information and to submit.
wallet:
Wallet to use for registration.
num_processes: int
Number of processes to use.
update_interval: int
Number of nonces to solve before updating block information.
Note:
- We should modify the number of processes based on user input.
- We can also modify the update interval to do smaller blocks of work,
while still updating the block information after a different number of nonces,
to increase the transparency of the process while still keeping the speed.
"""
if num_processes == None:
num_processes = multiprocessing.cpu_count()
block_number = subtensor.get_current_block()
difficulty = subtensor.difficulty
block_hash = subtensor.substrate.get_block_hash( block_number )
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number )
block_bytes = block_hash.encode('utf-8')[2:]
limit = int(math.pow(2,256)) - 1
nonce_limit = int(math.pow(2,64)) - 1
nonce = random.randint( 0, nonce_limit )
start_time = time.time()
console = bittensor.__console__
status = console.status("Solving")
#found_solution = multiprocessing.Value('q', -1, lock=False) # int
found_solution = multiprocessing.Array('Q', [0, 0, 0], lock=True) # [valid, nonce_high, nonce_low]
best_raw = struct.pack("d", float('inf'))
best = multiprocessing.Array(ctypes.c_char, best_raw, lock=True) # byte array to get around int size of ctypes
best_seal = multiprocessing.Array('h', 32, lock=True) # short array should hold bytes (0, 256)
with multiprocessing.Pool(processes=num_processes, initializer=initProcess_, initargs=(solve_, found_solution, best, best_seal)) as pool:
status.start()
while found_solution[0] == 0 and not wallet.is_registered(subtensor):
iterable = [( nonce_start,
nonce_start + update_interval ,
block_bytes,
difficulty,
block_hash,
block_number,
limit) for nonce_start in list(range(nonce, nonce + update_interval*num_processes, update_interval))]
result = pool.starmap(solve_, iterable=iterable)
old_nonce = nonce
nonce += update_interval*num_processes
nonce = nonce % nonce_limit
itrs_per_sec = update_interval*num_processes / (time.time() - start_time)
start_time = time.time()
difficulty = subtensor.difficulty
block_number = subtensor.get_current_block()
block_hash = subtensor.substrate.get_block_hash( block_number)
while block_hash == None:
block_hash = subtensor.substrate.get_block_hash( block_number)
block_bytes = block_hash.encode('utf-8')[2:]
with best_seal.get_lock():
message = f"""Solving
time spent: {time.time() - start_time}
Nonce: [bold white]{nonce}[/bold white]
Difficulty: [bold white]{difficulty}[/bold white]
Iters: [bold white]{int(itrs_per_sec)}/s[/bold white]
Block: [bold white]{block_number}[/bold white]
Block_hash: [bold white]{block_hash.encode('utf-8')}[/bold white]
Best: [bold white]{binascii.hexlify(bytes(best_seal) or bytes(0))}[/bold white]"""
status.update(message.replace(" ", ""))
# exited while, found_solution contains the nonce or wallet is registered
if found_solution[0] == 0: # didn't find solution
status.stop()
return None, None, None, None, None
found_unpacked: int = found_solution[1] << 32 | found_solution[2]
nonce, block_number, block_hash, difficulty, seal = result[ math.floor( (found_unpacked-old_nonce) / update_interval) ]
status.stop()
return nonce, block_number, block_hash, difficulty, seal
def initProcess_(f, found_solution, best, best_seal):
f.found = found_solution
f.best = best
f.best_seal = best_seal
def solve_(nonce_start, nonce_end, block_bytes, difficulty, block_hash, block_number, limit):
best_local = float('inf')
best_seal_local = [0]*32
start = time.time()
for nonce in range(nonce_start, nonce_end):
# Create seal.
nonce_bytes = binascii.hexlify(nonce.to_bytes(8, 'little'))
pre_seal = nonce_bytes + block_bytes
seal_sh256 = hashlib.sha256( bytearray(hex_bytes_to_u8_list(pre_seal)) ).digest()
kec = keccak.new(digest_bits=256)
seal = kec.update( seal_sh256 ).digest()
seal_number = int.from_bytes(seal, "big")
product = seal_number * difficulty
if product < limit:
with solve_.found.get_lock():
solve_.found[0] = 1;
solve_.found[1] = nonce >> 32
solve_.found[2] = nonce & 0xFFFFFFFF # low 32 bits
return (nonce, block_number, block_hash, difficulty, seal)
if (product - limit) < best_local:
best_local = product - limit
best_seal_local = seal
with solve_.best.get_lock():
best_value_as_d = struct.unpack('d', solve_.best.raw)[0]
if best_local < best_value_as_d:
with solve_.best_seal.get_lock():
solve_.best.raw = struct.pack('d', best_local)
for i in range(32):
solve_.best_seal[i] = best_seal_local[i]
return None
def create_pow( subtensor, wallet ):
nonce, block_number, block_hash, difficulty, seal = solve_for_difficulty_fast( subtensor, wallet )
return None if nonce is None else {
'nonce': nonce,
'difficulty': difficulty,
'block_number': block_number,
'block_hash': block_hash,
'work': binascii.hexlify(seal)
}
def version_checking():
response = requests.get(bittensor.__pipaddress__)
latest_version = response.json()['info']['version']
version_split = latest_version.split(".")
latest_version_as_int = (100 * int(version_split[0])) + (10 * int(version_split[1])) + (1 * int(version_split[2]))
if latest_version_as_int > bittensor.__version_as_int__:
print('\u001b[31m Current Bittensor Version: {}, Latest Bittensor Version {} \n Please update to the latest version'.format(bittensor.__version__,latest_version))
def is_valid_ss58_address( address: str ) -> bool:
"""
Checks if the given address is a valid ss58 address.
Args:
address(str): The address to check.
Returns:
True if the address is a valid ss58 address for Bittensor, False otherwise.
"""
try:
return ss58.is_valid_ss58_address( address, valid_ss58_format=bittensor.__ss58_format__ )
except (IndexError):
return False
def is_valid_ed25519_pubkey( public_key: Union[str, bytes] ) -> bool:
"""
Checks if the given public_key is a valid ed25519 key.
Args:
public_key(Union[str, bytes]): The public_key to check.
Returns:
True if the public_key is a valid ed25519 key, False otherwise.
"""
try:
if isinstance( public_key, str ):
if len(public_key) != 64 and len(public_key) != 66:
raise ValueError( "a public_key should be 64 or 66 characters" )
elif isinstance( public_key, bytes ):
if len(public_key) != 32:
raise ValueError( "a public_key should be 32 bytes" )
else:
raise ValueError( "public_key must be a string or bytes" )
keypair = Keypair(
public_key=public_key,
ss58_format=bittensor.__ss58_format__
)
ss58_addr = keypair.ss58_address
return ss58_addr is not None
except (ValueError, IndexError):
return False
def is_valid_destination_address( address: Union[str, bytes] ) -> bool:
"""
Checks if the given address is a valid destination address.
Args:
address(Union[str, bytes]): The address to check.
Returns:
True if the address is a valid destination address, False otherwise.
"""
if isinstance( address, str ):
# Check if ed25519
if address.startswith('0x'):
if not is_valid_ed25519_pubkey( address ):
bittensor.__console__.print(":cross_mark: [red]Invalid Destination Public Key[/red]: {}".format( address ))
return False
# Assume ss58 address
else:
if not is_valid_ss58_address( address ):
bittensor.__console__.print(":cross_mark: [red]Invalid Destination Address[/red]: {}".format( address ))
return False
elif isinstance( address, bytes ):
# Check if ed25519
if not is_valid_ed25519_pubkey( address ):
bittensor.__console__.print(":cross_mark: [red]Invalid Destination Public Key[/red]: {}".format( address ))
return False
else:
bittensor.__console__.print(":cross_mark: [red]Invalid Destination[/red]: {}".format( address ))
return False
return True
| [
"torch.randperm",
"torch.topk"
] | 1.11 | opentensor/BitTensor | 59de2d0fe48f3bd02ba5bff6159e6625bd6cb945 |
1.1 | ##################################################################################
# Fast-SCNN: Fast Semantic Segmentation Network
# Paper-Link: https://arxiv.org/pdf/1902.04502.pdf
##################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
__all__ = ["FastSCNNX5"]
class _ConvBNReLU(nn.Module):
"""Conv-BN-ReLU"""
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, **kwargs):
super(_ConvBNReLU, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DSConv(nn.Module):
"""Depthwise Separable Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DSConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True),
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DWConv(nn.Module):
"""Depthwise Convolutions"""
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1):
super(_DWConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
"""LinearBottleneck used in MobileNetV2"""
def __init__(self, in_channels, out_channels, t=6, kernel_size=3, stride=1, padding=1):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
self.block = nn.Sequential(
# pw
_ConvBNReLU(in_channels, in_channels * t, 1),
# dw
_DWConv(in_channels * t, in_channels * t, kernel_size, stride, padding),
# pw-linear
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out
return out
class PyramidPooling(nn.Module):
"""Pyramid pooling module"""
def __init__(self, in_channels, out_channels, **kwargs):
super(PyramidPooling, self).__init__()
inter_channels = int(in_channels / 4)
self.conv1 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv2 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv3 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.conv4 = _ConvBNReLU(in_channels, inter_channels, 1, **kwargs)
self.out = _ConvBNReLU(in_channels * 2, out_channels, 1)
def pool(self, x, size):
avgpool = nn.AdaptiveAvgPool2d(size)
return avgpool(x)
def upsample(self, x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
def forward(self, x):
size = x.size()[2:]
feat1 = self.upsample(self.conv1(self.pool(x, 1)), size)
feat2 = self.upsample(self.conv2(self.pool(x, 2)), size)
feat3 = self.upsample(self.conv3(self.pool(x, 3)), size)
feat4 = self.upsample(self.conv4(self.pool(x, 6)), size)
x = torch.cat([x, feat1, feat2, feat3, feat4], dim=1)
x = self.out(x)
return x
class LearningToDownsample(nn.Module):
"""Learning to downsample module"""
def __init__(self, dw_channels1=32, dw_channels2=48, out_channels=64, **kwargs):
super(LearningToDownsample, self).__init__()
self.conv = _ConvBNReLU(3, dw_channels1, 2, 2, 0)
self.conv1 = _ConvBNReLU(dw_channels1, dw_channels1, 3, 1, 1)
self.dsconv1 = _DSConv(dw_channels1, dw_channels2, 3, 2, 1)
self.dsconv2 = _DSConv(dw_channels2, out_channels, 3, 2, 1)
def forward(self, x):
x = self.conv(x)
x = self.conv1(x)
x = self.dsconv1(x)
x = self.dsconv2(x)
return x
class GlobalFeatureExtractor(nn.Module):
"""Global feature extractor module"""
def __init__(self, in_channels=64, block_channels=(64, 96, 128),
out_channels=128, t=6, num_blocks=(3, 3, 3), **kwargs):
super(GlobalFeatureExtractor, self).__init__()
self.bottleneck1 = self._make_layer(LinearBottleneck, in_channels, block_channels[0], num_blocks[0], t, 3, 2, 1)
self.bottleneck2 = self._make_layer(LinearBottleneck, block_channels[0], block_channels[1], num_blocks[1], t, 3,
2, 1)
self.bottleneck3 = self._make_layer(LinearBottleneck, block_channels[1], block_channels[2], num_blocks[2], t, 3,
1, 1)
self.ppm = PyramidPooling(block_channels[2], out_channels)
def _make_layer(self, block, inplanes, planes, blocks, t=6, kernel_size=3, stride=1, padding=1):
layers = []
layers.append(block(inplanes, planes, t, kernel_size, stride, padding))
for i in range(1, blocks):
layers.append(block(planes, planes, t, 3, 1, 1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = self.ppm(x)
return x
class FeatureFusionModule(nn.Module):
"""Feature fusion module"""
def __init__(self, highter_in_channels, lower_in_channels, out_channels, scale_factor=4, **kwargs):
super(FeatureFusionModule, self).__init__()
self.scale_factor = scale_factor
self.dwconv = _DWConv(lower_in_channels, out_channels)
self.conv_lower_res = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.conv_higher_res = nn.Sequential(
nn.Conv2d(highter_in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.relu = nn.ReLU(True)
def forward(self, higher_res_feature, lower_res_feature):
_, _, h, w = higher_res_feature.size()
lower_res_feature = F.interpolate(lower_res_feature, size=(h, w), mode='bilinear', align_corners=True)
lower_res_feature = self.dwconv(lower_res_feature)
lower_res_feature = self.conv_lower_res(lower_res_feature)
higher_res_feature = self.conv_higher_res(higher_res_feature)
out = higher_res_feature + lower_res_feature
return self.relu(out)
class Classifer(nn.Module):
"""Classifer"""
def __init__(self, dw_channels, num_classes):
super(Classifer, self).__init__()
self.dsconv1 = _DSConv(dw_channels, dw_channels)
self.dsconv2 = _DSConv(dw_channels, dw_channels)
self.conv = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(dw_channels, num_classes, 1)
)
def forward(self, x):
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.conv(x)
return x
# 该网络基本和context的网络相同,区别在于,将头部的shallownet变成了公共部分,然后再deepnet中增加了PPM
class FastSCNNX5(nn.Module):
def __init__(self, classes, aux=False, **kwargs):
super(FastSCNNX5, self).__init__()
self.aux = aux
self.learning_to_downsample = LearningToDownsample(32, 48, 64) # 与contextnet的Shallow_net相似
self.global_feature_extractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6,
[3, 3, 3]) # 与contextnet的deepnet相似,多了PPM
self.feature_fusion = FeatureFusionModule(64, 128, 128) # 与context一样
self.classifier = Classifer(128, classes) # 与context一样
if self.aux:
self.auxlayer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(32, classes, 1)
)
def forward(self, x):
size = x.size()[2:]
higher_res_features = self.learning_to_downsample(x)
x = self.global_feature_extractor(higher_res_features)
x = self.feature_fusion(higher_res_features, x)
x = self.classifier(x)
outputs = []
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(higher_res_features)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return x
# return tuple(outputs)
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = FastSCNNX5(classes=19).to(device)
summary(model, (3, 512, 1024))
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
x = torch.randn(2, 3, 512, 1024).to(device)
from fvcore.nn.jit_handles import batchnorm_flop_jit
from fvcore.nn.jit_handles import generic_activation_jit
supported_ops = {
"aten::batch_norm": batchnorm_flop_jit,
}
flop_dict, _ = flop_count(model, (x,), supported_ops)
flops_count, params_count = get_model_complexity_info(model, (3, 512, 1024),
as_strings=False,
print_per_layer_stat=True)
input = x
macs, params = profile(model, inputs=(input,))
print(flop_dict)
print(flops_count, params_count)
print(macs, params)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/FastSCNNX5.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 256, 512] 384
BatchNorm2d-2 [-1, 32, 256, 512] 64
ReLU-3 [-1, 32, 256, 512] 0
_ConvBNReLU-4 [-1, 32, 256, 512] 0
Conv2d-5 [-1, 32, 256, 512] 9,216
BatchNorm2d-6 [-1, 32, 256, 512] 64
ReLU-7 [-1, 32, 256, 512] 0
_ConvBNReLU-8 [-1, 32, 256, 512] 0
Conv2d-9 [-1, 32, 128, 256] 288
BatchNorm2d-10 [-1, 32, 128, 256] 64
ReLU-11 [-1, 32, 128, 256] 0
Conv2d-12 [-1, 48, 128, 256] 1,536
BatchNorm2d-13 [-1, 48, 128, 256] 96
ReLU-14 [-1, 48, 128, 256] 0
_DSConv-15 [-1, 48, 128, 256] 0
Conv2d-16 [-1, 48, 64, 128] 432
BatchNorm2d-17 [-1, 48, 64, 128] 96
ReLU-18 [-1, 48, 64, 128] 0
Conv2d-19 [-1, 64, 64, 128] 3,072
BatchNorm2d-20 [-1, 64, 64, 128] 128
ReLU-21 [-1, 64, 64, 128] 0
_DSConv-22 [-1, 64, 64, 128] 0
LearningToDownsample-23 [-1, 64, 64, 128] 0
Conv2d-24 [-1, 384, 64, 128] 24,576
BatchNorm2d-25 [-1, 384, 64, 128] 768
ReLU-26 [-1, 384, 64, 128] 0
_ConvBNReLU-27 [-1, 384, 64, 128] 0
Conv2d-28 [-1, 384, 32, 64] 3,456
BatchNorm2d-29 [-1, 384, 32, 64] 768
ReLU-30 [-1, 384, 32, 64] 0
_DWConv-31 [-1, 384, 32, 64] 0
Conv2d-32 [-1, 64, 32, 64] 24,576
BatchNorm2d-33 [-1, 64, 32, 64] 128
LinearBottleneck-34 [-1, 64, 32, 64] 0
Conv2d-35 [-1, 384, 32, 64] 24,576
BatchNorm2d-36 [-1, 384, 32, 64] 768
ReLU-37 [-1, 384, 32, 64] 0
_ConvBNReLU-38 [-1, 384, 32, 64] 0
Conv2d-39 [-1, 384, 32, 64] 3,456
BatchNorm2d-40 [-1, 384, 32, 64] 768
ReLU-41 [-1, 384, 32, 64] 0
_DWConv-42 [-1, 384, 32, 64] 0
Conv2d-43 [-1, 64, 32, 64] 24,576
BatchNorm2d-44 [-1, 64, 32, 64] 128
LinearBottleneck-45 [-1, 64, 32, 64] 0
Conv2d-46 [-1, 384, 32, 64] 24,576
BatchNorm2d-47 [-1, 384, 32, 64] 768
ReLU-48 [-1, 384, 32, 64] 0
_ConvBNReLU-49 [-1, 384, 32, 64] 0
Conv2d-50 [-1, 384, 32, 64] 3,456
BatchNorm2d-51 [-1, 384, 32, 64] 768
ReLU-52 [-1, 384, 32, 64] 0
_DWConv-53 [-1, 384, 32, 64] 0
Conv2d-54 [-1, 64, 32, 64] 24,576
BatchNorm2d-55 [-1, 64, 32, 64] 128
LinearBottleneck-56 [-1, 64, 32, 64] 0
Conv2d-57 [-1, 384, 32, 64] 24,576
BatchNorm2d-58 [-1, 384, 32, 64] 768
ReLU-59 [-1, 384, 32, 64] 0
_ConvBNReLU-60 [-1, 384, 32, 64] 0
Conv2d-61 [-1, 384, 16, 32] 3,456
BatchNorm2d-62 [-1, 384, 16, 32] 768
ReLU-63 [-1, 384, 16, 32] 0
_DWConv-64 [-1, 384, 16, 32] 0
Conv2d-65 [-1, 96, 16, 32] 36,864
BatchNorm2d-66 [-1, 96, 16, 32] 192
LinearBottleneck-67 [-1, 96, 16, 32] 0
Conv2d-68 [-1, 576, 16, 32] 55,296
BatchNorm2d-69 [-1, 576, 16, 32] 1,152
ReLU-70 [-1, 576, 16, 32] 0
_ConvBNReLU-71 [-1, 576, 16, 32] 0
Conv2d-72 [-1, 576, 16, 32] 5,184
BatchNorm2d-73 [-1, 576, 16, 32] 1,152
ReLU-74 [-1, 576, 16, 32] 0
_DWConv-75 [-1, 576, 16, 32] 0
Conv2d-76 [-1, 96, 16, 32] 55,296
BatchNorm2d-77 [-1, 96, 16, 32] 192
LinearBottleneck-78 [-1, 96, 16, 32] 0
Conv2d-79 [-1, 576, 16, 32] 55,296
BatchNorm2d-80 [-1, 576, 16, 32] 1,152
ReLU-81 [-1, 576, 16, 32] 0
_ConvBNReLU-82 [-1, 576, 16, 32] 0
Conv2d-83 [-1, 576, 16, 32] 5,184
BatchNorm2d-84 [-1, 576, 16, 32] 1,152
ReLU-85 [-1, 576, 16, 32] 0
_DWConv-86 [-1, 576, 16, 32] 0
Conv2d-87 [-1, 96, 16, 32] 55,296
BatchNorm2d-88 [-1, 96, 16, 32] 192
LinearBottleneck-89 [-1, 96, 16, 32] 0
Conv2d-90 [-1, 576, 16, 32] 55,296
BatchNorm2d-91 [-1, 576, 16, 32] 1,152
ReLU-92 [-1, 576, 16, 32] 0
_ConvBNReLU-93 [-1, 576, 16, 32] 0
Conv2d-94 [-1, 576, 16, 32] 5,184
BatchNorm2d-95 [-1, 576, 16, 32] 1,152
ReLU-96 [-1, 576, 16, 32] 0
_DWConv-97 [-1, 576, 16, 32] 0
Conv2d-98 [-1, 128, 16, 32] 73,728
BatchNorm2d-99 [-1, 128, 16, 32] 256
LinearBottleneck-100 [-1, 128, 16, 32] 0
Conv2d-101 [-1, 768, 16, 32] 98,304
BatchNorm2d-102 [-1, 768, 16, 32] 1,536
ReLU-103 [-1, 768, 16, 32] 0
_ConvBNReLU-104 [-1, 768, 16, 32] 0
Conv2d-105 [-1, 768, 16, 32] 6,912
BatchNorm2d-106 [-1, 768, 16, 32] 1,536
ReLU-107 [-1, 768, 16, 32] 0
_DWConv-108 [-1, 768, 16, 32] 0
Conv2d-109 [-1, 128, 16, 32] 98,304
BatchNorm2d-110 [-1, 128, 16, 32] 256
LinearBottleneck-111 [-1, 128, 16, 32] 0
Conv2d-112 [-1, 768, 16, 32] 98,304
BatchNorm2d-113 [-1, 768, 16, 32] 1,536
ReLU-114 [-1, 768, 16, 32] 0
_ConvBNReLU-115 [-1, 768, 16, 32] 0
Conv2d-116 [-1, 768, 16, 32] 6,912
BatchNorm2d-117 [-1, 768, 16, 32] 1,536
ReLU-118 [-1, 768, 16, 32] 0
_DWConv-119 [-1, 768, 16, 32] 0
Conv2d-120 [-1, 128, 16, 32] 98,304
BatchNorm2d-121 [-1, 128, 16, 32] 256
LinearBottleneck-122 [-1, 128, 16, 32] 0
Conv2d-123 [-1, 32, 1, 1] 4,096
BatchNorm2d-124 [-1, 32, 1, 1] 64
ReLU-125 [-1, 32, 1, 1] 0
_ConvBNReLU-126 [-1, 32, 1, 1] 0
Conv2d-127 [-1, 32, 2, 2] 4,096
BatchNorm2d-128 [-1, 32, 2, 2] 64
ReLU-129 [-1, 32, 2, 2] 0
_ConvBNReLU-130 [-1, 32, 2, 2] 0
Conv2d-131 [-1, 32, 3, 3] 4,096
BatchNorm2d-132 [-1, 32, 3, 3] 64
ReLU-133 [-1, 32, 3, 3] 0
_ConvBNReLU-134 [-1, 32, 3, 3] 0
Conv2d-135 [-1, 32, 6, 6] 4,096
BatchNorm2d-136 [-1, 32, 6, 6] 64
ReLU-137 [-1, 32, 6, 6] 0
_ConvBNReLU-138 [-1, 32, 6, 6] 0
Conv2d-139 [-1, 128, 16, 32] 32,768
BatchNorm2d-140 [-1, 128, 16, 32] 256
ReLU-141 [-1, 128, 16, 32] 0
_ConvBNReLU-142 [-1, 128, 16, 32] 0
PyramidPooling-143 [-1, 128, 16, 32] 0
GlobalFeatureExtractor-144 [-1, 128, 16, 32] 0
Conv2d-145 [-1, 128, 64, 128] 1,152
BatchNorm2d-146 [-1, 128, 64, 128] 256
ReLU-147 [-1, 128, 64, 128] 0
_DWConv-148 [-1, 128, 64, 128] 0
Conv2d-149 [-1, 128, 64, 128] 16,512
BatchNorm2d-150 [-1, 128, 64, 128] 256
Conv2d-151 [-1, 128, 64, 128] 8,320
BatchNorm2d-152 [-1, 128, 64, 128] 256
ReLU-153 [-1, 128, 64, 128] 0
FeatureFusionModule-154 [-1, 128, 64, 128] 0
Conv2d-155 [-1, 128, 64, 128] 1,152
BatchNorm2d-156 [-1, 128, 64, 128] 256
ReLU-157 [-1, 128, 64, 128] 0
Conv2d-158 [-1, 128, 64, 128] 16,384
BatchNorm2d-159 [-1, 128, 64, 128] 256
ReLU-160 [-1, 128, 64, 128] 0
_DSConv-161 [-1, 128, 64, 128] 0
Conv2d-162 [-1, 128, 64, 128] 1,152
BatchNorm2d-163 [-1, 128, 64, 128] 256
ReLU-164 [-1, 128, 64, 128] 0
Conv2d-165 [-1, 128, 64, 128] 16,384
BatchNorm2d-166 [-1, 128, 64, 128] 256
ReLU-167 [-1, 128, 64, 128] 0
_DSConv-168 [-1, 128, 64, 128] 0
Dropout-169 [-1, 128, 64, 128] 0
Conv2d-170 [-1, 19, 64, 128] 2,451
Classifer-171 [-1, 19, 64, 128] 0
================================================================
Total params: 1,146,851
Trainable params: 1,146,851
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 6.00
Forward/backward pass size (MB): 927.30
Params size (MB): 4.37
Estimated Total Size (MB): 937.67
----------------------------------------------------------------
Skipped operation aten::relu_ 35 time(s)
Skipped operation aten::add 7 time(s)
Skipped operation aten::adaptive_avg_pool2d 4 time(s)
Skipped operation aten::upsample_bilinear2d 6 time(s)
Skipped operation aten::dropout 1 time(s)
FastSCNNX5(
2.918 GMac, 100.000% MACs,
(learning_to_downsample): LearningToDownsample(
1.383 GMac, 47.388% MACs,
(conv): _ConvBNReLU(
0.063 GMac, 2.156% MACs,
(conv): Sequential(
0.063 GMac, 2.156% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 3, 32, kernel_size=(2, 2), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.008 GMac, 0.288% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.144% MACs, inplace=True)
)
)
(conv1): _ConvBNReLU(
1.221 GMac, 41.835% MACs,
(conv): Sequential(
1.221 GMac, 41.835% MACs,
(0): Conv2d(1.208 GMac, 41.404% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.008 GMac, 0.288% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.004 GMac, 0.144% MACs, inplace=True)
)
)
(dsconv1): _DSConv(
0.068 GMac, 2.318% MACs,
(conv): Sequential(
0.068 GMac, 2.318% MACs,
(0): Conv2d(0.009 GMac, 0.323% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
(3): Conv2d(0.05 GMac, 1.725% MACs, 32, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.003 GMac, 0.108% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.002 GMac, 0.054% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.031 GMac, 1.078% MACs,
(conv): Sequential(
0.031 GMac, 1.078% MACs,
(0): Conv2d(0.004 GMac, 0.121% MACs, 48, 48, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=48, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.027% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.013% MACs, inplace=True)
(3): Conv2d(0.025 GMac, 0.863% MACs, 48, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.036% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.018% MACs, inplace=True)
)
)
)
(global_feature_extractor): GlobalFeatureExtractor(
0.994 GMac, 34.061% MACs,
(bottleneck1): Sequential(
0.496 GMac, 17.009% MACs,
(0): LinearBottleneck(
0.271 GMac, 9.282% MACs,
(block): Sequential(
0.271 GMac, 9.282% MACs,
(0): _ConvBNReLU(
0.211 GMac, 7.224% MACs,
(conv): Sequential(
0.211 GMac, 7.224% MACs,
(0): Conv2d(0.201 GMac, 6.901% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.006 GMac, 0.216% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.003 GMac, 0.108% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.323% MACs,
(conv): Sequential(
0.009 GMac, 0.323% MACs,
(0): Conv2d(0.007 GMac, 0.243% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 1.725% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.009% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.113 GMac, 3.864% MACs,
(block): Sequential(
0.113 GMac, 3.864% MACs,
(0): _ConvBNReLU(
0.053 GMac, 1.806% MACs,
(conv): Sequential(
0.053 GMac, 1.806% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.323% MACs,
(conv): Sequential(
0.009 GMac, 0.323% MACs,
(0): Conv2d(0.007 GMac, 0.243% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 1.725% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.009% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.113 GMac, 3.864% MACs,
(block): Sequential(
0.113 GMac, 3.864% MACs,
(0): _ConvBNReLU(
0.053 GMac, 1.806% MACs,
(conv): Sequential(
0.053 GMac, 1.806% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(1): _DWConv(
0.009 GMac, 0.323% MACs,
(conv): Sequential(
0.009 GMac, 0.323% MACs,
(0): Conv2d(0.007 GMac, 0.243% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 1.725% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.009% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck2): Sequential(
0.196 GMac, 6.729% MACs,
(0): LinearBottleneck(
0.074 GMac, 2.537% MACs,
(block): Sequential(
0.074 GMac, 2.537% MACs,
(0): _ConvBNReLU(
0.053 GMac, 1.806% MACs,
(conv): Sequential(
0.053 GMac, 1.806% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.002 GMac, 0.054% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.027% MACs, inplace=True)
)
)
(1): _DWConv(
0.002 GMac, 0.081% MACs,
(conv): Sequential(
0.002 GMac, 0.081% MACs,
(0): Conv2d(0.002 GMac, 0.061% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.013% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.007% MACs, inplace=True)
)
)
(2): Conv2d(0.019 GMac, 0.647% MACs, 384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.003% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.061 GMac, 2.096% MACs,
(block): Sequential(
0.061 GMac, 2.096% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.001% MACs,
(conv): Sequential(
0.029 GMac, 1.001% MACs,
(0): Conv2d(0.028 GMac, 0.970% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.121% MACs,
(conv): Sequential(
0.004 GMac, 0.121% MACs,
(0): Conv2d(0.003 GMac, 0.091% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 0.970% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.003% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.061 GMac, 2.096% MACs,
(block): Sequential(
0.061 GMac, 2.096% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.001% MACs,
(conv): Sequential(
0.029 GMac, 1.001% MACs,
(0): Conv2d(0.028 GMac, 0.970% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.121% MACs,
(conv): Sequential(
0.004 GMac, 0.121% MACs,
(0): Conv2d(0.003 GMac, 0.091% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(2): Conv2d(0.028 GMac, 0.970% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.003% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck3): Sequential(
0.284 GMac, 9.734% MACs,
(0): LinearBottleneck(
0.071 GMac, 2.420% MACs,
(block): Sequential(
0.071 GMac, 2.420% MACs,
(0): _ConvBNReLU(
0.029 GMac, 1.001% MACs,
(conv): Sequential(
0.029 GMac, 1.001% MACs,
(0): Conv2d(0.028 GMac, 0.970% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(1): _DWConv(
0.004 GMac, 0.121% MACs,
(conv): Sequential(
0.004 GMac, 0.121% MACs,
(0): Conv2d(0.003 GMac, 0.091% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.020% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.010% MACs, inplace=True)
)
)
(2): Conv2d(0.038 GMac, 1.294% MACs, 576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.107 GMac, 3.657% MACs,
(block): Sequential(
0.107 GMac, 3.657% MACs,
(0): _ConvBNReLU(
0.052 GMac, 1.766% MACs,
(conv): Sequential(
0.052 GMac, 1.766% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.027% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.013% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.162% MACs,
(conv): Sequential(
0.005 GMac, 0.162% MACs,
(0): Conv2d(0.004 GMac, 0.121% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.027% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.013% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 1.725% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.107 GMac, 3.657% MACs,
(block): Sequential(
0.107 GMac, 3.657% MACs,
(0): _ConvBNReLU(
0.052 GMac, 1.766% MACs,
(conv): Sequential(
0.052 GMac, 1.766% MACs,
(0): Conv2d(0.05 GMac, 1.725% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.027% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.013% MACs, inplace=True)
)
)
(1): _DWConv(
0.005 GMac, 0.162% MACs,
(conv): Sequential(
0.005 GMac, 0.162% MACs,
(0): Conv2d(0.004 GMac, 0.121% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.027% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.013% MACs, inplace=True)
)
)
(2): Conv2d(0.05 GMac, 1.725% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(ppm): PyramidPooling(
0.017 GMac, 0.589% MACs,
(conv1): _ConvBNReLU(
0.0 GMac, 0.000% MACs,
(conv): Sequential(
0.0 GMac, 0.000% MACs,
(0): Conv2d(0.0 GMac, 0.000% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv2): _ConvBNReLU(
0.0 GMac, 0.001% MACs,
(conv): Sequential(
0.0 GMac, 0.001% MACs,
(0): Conv2d(0.0 GMac, 0.001% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv3): _ConvBNReLU(
0.0 GMac, 0.001% MACs,
(conv): Sequential(
0.0 GMac, 0.001% MACs,
(0): Conv2d(0.0 GMac, 0.001% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(conv4): _ConvBNReLU(
0.0 GMac, 0.005% MACs,
(conv): Sequential(
0.0 GMac, 0.005% MACs,
(0): Conv2d(0.0 GMac, 0.005% MACs, 128, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(out): _ConvBNReLU(
0.017 GMac, 0.582% MACs,
(conv): Sequential(
0.017 GMac, 0.582% MACs,
(0): Conv2d(0.017 GMac, 0.575% MACs, 256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.004% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
)
)
)
(feature_fusion): FeatureFusionModule(
0.221 GMac, 7.584% MACs,
(dwconv): _DWConv(
0.013 GMac, 0.431% MACs,
(conv): Sequential(
0.013 GMac, 0.431% MACs,
(0): Conv2d(0.009 GMac, 0.323% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
)
)
(conv_lower_res): Sequential(
0.137 GMac, 4.708% MACs,
(0): Conv2d(0.135 GMac, 4.636% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv_higher_res): Sequential(
0.07 GMac, 2.408% MACs,
(0): Conv2d(0.068 GMac, 2.336% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(relu): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
)
(classifier): Classifer(
0.32 GMac, 10.967% MACs,
(dsconv1): _DSConv(
0.15 GMac, 5.140% MACs,
(conv): Sequential(
0.15 GMac, 5.140% MACs,
(0): Conv2d(0.009 GMac, 0.323% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 4.600% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
)
)
(dsconv2): _DSConv(
0.15 GMac, 5.140% MACs,
(conv): Sequential(
0.15 GMac, 5.140% MACs,
(0): Conv2d(0.009 GMac, 0.323% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
(3): Conv2d(0.134 GMac, 4.600% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.002 GMac, 0.072% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.036% MACs, inplace=True)
)
)
(conv): Sequential(
0.02 GMac, 0.688% MACs,
(0): Dropout(0.0 GMac, 0.000% MACs, p=0.1, inplace=False)
(1): Conv2d(0.02 GMac, 0.688% MACs, 128, 19, kernel_size=(1, 1), stride=(1, 1))
)
)
)
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._ConvBNReLU'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DSConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LearningToDownsample'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__._DWConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LinearBottleneck'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.PyramidPooling'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GlobalFeatureExtractor'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FeatureFusionModule'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.dropout.Dropout'>.
[WARN] Cannot find rule for <class '__main__.Classifer'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FastSCNNX5'>. Treat it as zero Macs and zero Params.
defaultdict(<class 'float'>, {'conv': 5.645221888, 'batchnorm': 0.251802112})
2917503680.0 1146851
5775628544.0 1146851.0
Process finished with exit code 0
'''
| [
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 1.1.0 | Ethan-ye/Efficient-Segmentation-Networks | 27272e43126a507a6d93b21cd2372f5432f61237 |
1.1 | ##################################################################################
#ContextNetX10: Exploring Context and Detail for Semantic Segmentation in Real-time
#Paper-Link: https://arxiv.org/abs/1805.04554
##################################################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
__all__ = ["ContextNetX10"]
class Custom_Conv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=0, **kwargs):
super(Custom_Conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class DepthSepConv(nn.Module):
'''
We omit the nonlinear-ity between depth-wise and point-wise convolutions in our full resolution branch
'''
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1, **kwargs):
super(DepthSepConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, dw_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(dw_channels),
nn.ReLU(True), #此处与原文不同
nn.Conv2d(dw_channels, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class DepthConv(nn.Module):
def __init__(self, dw_channels, out_channels, kernel_size=3, stride=1, padding=1, **kwargs):
super(DepthConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(dw_channels, out_channels, kernel_size, stride, padding, groups=dw_channels, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class LinearBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, t=6, stride=2, **kwargs):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and in_channels == out_channels
self.block = nn.Sequential(
Custom_Conv(in_channels, in_channels * t, 1,1,0),
DepthConv(in_channels * t, in_channels * t, 3,stride,1),
nn.Conv2d(in_channels * t, out_channels, 1, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
out = self.block(x)
if self.use_shortcut:
out = x + out #输出不含relu激活
return out
class Shallow_net(nn.Module):
'''
The first layer uses standard convolution while all other layers
use depth-wise separable convolutions with kernel size 3 × 3. The stride is 2 for
all but the last layer, where it is 1.
'''
def __init__(self, dw_channels1=32, dw_channels2=64, out_channels=128, **kwargs):
super(Shallow_net, self).__init__()
self.conv0 = Custom_Conv(3, 3, 2, 1, 1)
self.conv = Custom_Conv(3, dw_channels1, 3, 2, 0)
self.dsconv1 = DepthSepConv(dw_channels1, dw_channels2, 3,2,1)
self.dsconv2 = DepthSepConv(dw_channels2, out_channels, 3,2,1)
self.dsconv3 = DepthSepConv(out_channels, out_channels, 3,1,1)
def forward(self, x):
x = self.conv0(x)
x = self.conv(x)
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.dsconv3(x)
return x
class Deep_net(nn.Module):
def __init__(self, in_channels, block_channels,
t, num_blocks, **kwargs):
super(Deep_net, self).__init__()
self.block_channels = block_channels
self.t = t
self.num_blocks = num_blocks
self.conv_0 = Custom_Conv(3, 3, 2, 1, 1)
self.conv_ = Custom_Conv(3, in_channels, 3, 2, 0)
self.bottleneck1 = self._layer(LinearBottleneck, in_channels, block_channels[0], num_blocks[0], t[0], 1)
self.bottleneck2 = self._layer(LinearBottleneck, block_channels[0], block_channels[1], num_blocks[1], t[1], 1)
self.bottleneck3 = self._layer(LinearBottleneck, block_channels[1], block_channels[2], num_blocks[2], t[2], 2)
self.bottleneck4 = self._layer(LinearBottleneck, block_channels[2], block_channels[3], num_blocks[3], t[3], 2)
self.bottleneck5 = self._layer(LinearBottleneck, block_channels[3], block_channels[4], num_blocks[4], t[4], 1)
self.bottleneck6 = self._layer(LinearBottleneck, block_channels[4], block_channels[5], num_blocks[5], t[5], 1)
# 收尾部分缺少一个conv2d
def _layer(self, block, in_channels, out_channels, blocks, t, stride):
layers = []
layers.append(block(in_channels, out_channels, t, stride))
for i in range(1, blocks):
layers.append(block(out_channels, out_channels, t, 1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv_0(x)
x = self.conv_(x)
x = self.bottleneck1(x)
x = self.bottleneck2(x)
x = self.bottleneck3(x)
x = self.bottleneck4(x)
x = self.bottleneck5(x)
x = self.bottleneck6(x)
return x
class FeatureFusionModule(nn.Module):
def __init__(self, highter_in_channels, lower_in_channels, out_channels, scale_factor=4, **kwargs):
super(FeatureFusionModule, self).__init__()
self.scale_factor = scale_factor
self.dwconv = DepthConv(lower_in_channels, out_channels, 3,1,1) #原文为DWConv (dilation 4) 3/1, f
self.conv_lower_res = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.conv_higher_res = nn.Sequential(
nn.Conv2d(highter_in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
self.relu = nn.ReLU(True)
def forward(self, higher_res_feature, lower_res_feature):
_, _, h, w = higher_res_feature.size()
lower_res_feature = F.interpolate(lower_res_feature, size=(h,w), mode='bilinear', align_corners=True)
lower_res_feature = self.dwconv(lower_res_feature)
lower_res_feature = self.conv_lower_res(lower_res_feature)
higher_res_feature = self.conv_higher_res(higher_res_feature)
out = higher_res_feature + lower_res_feature
return self.relu(out)
class Classifer(nn.Module):
def __init__(self, dw_channels, num_classes, stride=1, **kwargs):
super(Classifer, self).__init__()
self.dsconv1 = DepthSepConv(dw_channels, dw_channels, 3,stride,1)
self.dsconv2 = DepthSepConv(dw_channels, dw_channels, 3,stride,1)
self.conv = nn.Sequential(
nn.Dropout(0.1),
nn.Conv2d(dw_channels, num_classes, 1)
)
def forward(self, x):
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.conv(x)
return x
class ContextNetX10(nn.Module):
def __init__(self, classes, aux=False, **kwargs):
super(ContextNetX10, self).__init__()
self.aux = aux
self.spatial_detail = Shallow_net(32, 64, 128)
self.context_feature_extractor = Deep_net(32, [32, 32, 48, 64, 96, 128], [1, 6, 6, 6, 6, 6], [1, 1, 3, 3, 2, 2])
self.feature_fusion = FeatureFusionModule(128, 128, 128)
self.classifier = Classifer(128, classes)
if self.aux:
self.auxlayer = nn.Sequential(
nn.Conv2d(128, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(32, classes, 1)
)
def forward(self, x):
size = x.size()[2:]
higher_res_features = self.spatial_detail(x)
x_low = F.interpolate(x, scale_factor = 0.25, mode='bilinear', align_corners=True)
x = self.context_feature_extractor(x_low)
x = self.feature_fusion(higher_res_features, x)
x = self.classifier(x)
outputs = []
x = F.interpolate(x, size, mode='bilinear', align_corners=True)
outputs.append(x)
if self.aux:
auxout = self.auxlayer(higher_res_features)
auxout = F.interpolate(auxout, size, mode='bilinear', align_corners=True)
outputs.append(auxout)
return x
# return tuple(outputs)
"""print layers and params of network"""
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ContextNetX10(classes=11).to(device)
summary(model, (3, 352, 480))
flops_count, params_count = get_model_complexity_info(model, (3, 352, 480),
as_strings=False,
print_per_layer_stat=True)
print(flops_count/1000000000,'GMac', params_count/1000000, params_count/1024/1024*4,'MB')
x = torch.randn(2, 3, 352, 480).to(device)
input = x
macs, params = profile(model, inputs=(input,))
print(macs/2000000000,'GMac', params/1000000, params/1024/1024*4,'MB')
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/ContextNetX10.py
/home/ethan/anaconda3/envs/py36_cuda101/lib/python3.6/site-packages/torch/nn/functional.py:3000: UserWarning: The default behavior for interpolate/upsample with float scale_factor changed in 1.6.0 to align with other frameworks/libraries, and uses scale_factor directly, instead of relying on the computed output size. If you wish to keep the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details.
warnings.warn("The default behavior for interpolate/upsample with float scale_factor changed "
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 3, 353, 481] 36
BatchNorm2d-2 [-1, 3, 353, 481] 6
ReLU-3 [-1, 3, 353, 481] 0
Custom_Conv-4 [-1, 3, 353, 481] 0
Conv2d-5 [-1, 32, 176, 240] 864
BatchNorm2d-6 [-1, 32, 176, 240] 64
ReLU-7 [-1, 32, 176, 240] 0
Custom_Conv-8 [-1, 32, 176, 240] 0
Conv2d-9 [-1, 32, 88, 120] 288
BatchNorm2d-10 [-1, 32, 88, 120] 64
ReLU-11 [-1, 32, 88, 120] 0
Conv2d-12 [-1, 64, 88, 120] 2,048
BatchNorm2d-13 [-1, 64, 88, 120] 128
ReLU-14 [-1, 64, 88, 120] 0
DepthSepConv-15 [-1, 64, 88, 120] 0
Conv2d-16 [-1, 64, 44, 60] 576
BatchNorm2d-17 [-1, 64, 44, 60] 128
ReLU-18 [-1, 64, 44, 60] 0
Conv2d-19 [-1, 128, 44, 60] 8,192
BatchNorm2d-20 [-1, 128, 44, 60] 256
ReLU-21 [-1, 128, 44, 60] 0
DepthSepConv-22 [-1, 128, 44, 60] 0
Conv2d-23 [-1, 128, 44, 60] 1,152
BatchNorm2d-24 [-1, 128, 44, 60] 256
ReLU-25 [-1, 128, 44, 60] 0
Conv2d-26 [-1, 128, 44, 60] 16,384
BatchNorm2d-27 [-1, 128, 44, 60] 256
ReLU-28 [-1, 128, 44, 60] 0
DepthSepConv-29 [-1, 128, 44, 60] 0
Shallow_net-30 [-1, 128, 44, 60] 0
Conv2d-31 [-1, 3, 89, 121] 36
BatchNorm2d-32 [-1, 3, 89, 121] 6
ReLU-33 [-1, 3, 89, 121] 0
Custom_Conv-34 [-1, 3, 89, 121] 0
Conv2d-35 [-1, 32, 44, 60] 864
BatchNorm2d-36 [-1, 32, 44, 60] 64
ReLU-37 [-1, 32, 44, 60] 0
Custom_Conv-38 [-1, 32, 44, 60] 0
Conv2d-39 [-1, 32, 44, 60] 1,024
BatchNorm2d-40 [-1, 32, 44, 60] 64
ReLU-41 [-1, 32, 44, 60] 0
Custom_Conv-42 [-1, 32, 44, 60] 0
Conv2d-43 [-1, 32, 44, 60] 288
BatchNorm2d-44 [-1, 32, 44, 60] 64
ReLU-45 [-1, 32, 44, 60] 0
DepthConv-46 [-1, 32, 44, 60] 0
Conv2d-47 [-1, 32, 44, 60] 1,024
BatchNorm2d-48 [-1, 32, 44, 60] 64
LinearBottleneck-49 [-1, 32, 44, 60] 0
Conv2d-50 [-1, 192, 44, 60] 6,144
BatchNorm2d-51 [-1, 192, 44, 60] 384
ReLU-52 [-1, 192, 44, 60] 0
Custom_Conv-53 [-1, 192, 44, 60] 0
Conv2d-54 [-1, 192, 44, 60] 1,728
BatchNorm2d-55 [-1, 192, 44, 60] 384
ReLU-56 [-1, 192, 44, 60] 0
DepthConv-57 [-1, 192, 44, 60] 0
Conv2d-58 [-1, 32, 44, 60] 6,144
BatchNorm2d-59 [-1, 32, 44, 60] 64
LinearBottleneck-60 [-1, 32, 44, 60] 0
Conv2d-61 [-1, 192, 44, 60] 6,144
BatchNorm2d-62 [-1, 192, 44, 60] 384
ReLU-63 [-1, 192, 44, 60] 0
Custom_Conv-64 [-1, 192, 44, 60] 0
Conv2d-65 [-1, 192, 22, 30] 1,728
BatchNorm2d-66 [-1, 192, 22, 30] 384
ReLU-67 [-1, 192, 22, 30] 0
DepthConv-68 [-1, 192, 22, 30] 0
Conv2d-69 [-1, 48, 22, 30] 9,216
BatchNorm2d-70 [-1, 48, 22, 30] 96
LinearBottleneck-71 [-1, 48, 22, 30] 0
Conv2d-72 [-1, 288, 22, 30] 13,824
BatchNorm2d-73 [-1, 288, 22, 30] 576
ReLU-74 [-1, 288, 22, 30] 0
Custom_Conv-75 [-1, 288, 22, 30] 0
Conv2d-76 [-1, 288, 22, 30] 2,592
BatchNorm2d-77 [-1, 288, 22, 30] 576
ReLU-78 [-1, 288, 22, 30] 0
DepthConv-79 [-1, 288, 22, 30] 0
Conv2d-80 [-1, 48, 22, 30] 13,824
BatchNorm2d-81 [-1, 48, 22, 30] 96
LinearBottleneck-82 [-1, 48, 22, 30] 0
Conv2d-83 [-1, 288, 22, 30] 13,824
BatchNorm2d-84 [-1, 288, 22, 30] 576
ReLU-85 [-1, 288, 22, 30] 0
Custom_Conv-86 [-1, 288, 22, 30] 0
Conv2d-87 [-1, 288, 22, 30] 2,592
BatchNorm2d-88 [-1, 288, 22, 30] 576
ReLU-89 [-1, 288, 22, 30] 0
DepthConv-90 [-1, 288, 22, 30] 0
Conv2d-91 [-1, 48, 22, 30] 13,824
BatchNorm2d-92 [-1, 48, 22, 30] 96
LinearBottleneck-93 [-1, 48, 22, 30] 0
Conv2d-94 [-1, 288, 22, 30] 13,824
BatchNorm2d-95 [-1, 288, 22, 30] 576
ReLU-96 [-1, 288, 22, 30] 0
Custom_Conv-97 [-1, 288, 22, 30] 0
Conv2d-98 [-1, 288, 11, 15] 2,592
BatchNorm2d-99 [-1, 288, 11, 15] 576
ReLU-100 [-1, 288, 11, 15] 0
DepthConv-101 [-1, 288, 11, 15] 0
Conv2d-102 [-1, 64, 11, 15] 18,432
BatchNorm2d-103 [-1, 64, 11, 15] 128
LinearBottleneck-104 [-1, 64, 11, 15] 0
Conv2d-105 [-1, 384, 11, 15] 24,576
BatchNorm2d-106 [-1, 384, 11, 15] 768
ReLU-107 [-1, 384, 11, 15] 0
Custom_Conv-108 [-1, 384, 11, 15] 0
Conv2d-109 [-1, 384, 11, 15] 3,456
BatchNorm2d-110 [-1, 384, 11, 15] 768
ReLU-111 [-1, 384, 11, 15] 0
DepthConv-112 [-1, 384, 11, 15] 0
Conv2d-113 [-1, 64, 11, 15] 24,576
BatchNorm2d-114 [-1, 64, 11, 15] 128
LinearBottleneck-115 [-1, 64, 11, 15] 0
Conv2d-116 [-1, 384, 11, 15] 24,576
BatchNorm2d-117 [-1, 384, 11, 15] 768
ReLU-118 [-1, 384, 11, 15] 0
Custom_Conv-119 [-1, 384, 11, 15] 0
Conv2d-120 [-1, 384, 11, 15] 3,456
BatchNorm2d-121 [-1, 384, 11, 15] 768
ReLU-122 [-1, 384, 11, 15] 0
DepthConv-123 [-1, 384, 11, 15] 0
Conv2d-124 [-1, 64, 11, 15] 24,576
BatchNorm2d-125 [-1, 64, 11, 15] 128
LinearBottleneck-126 [-1, 64, 11, 15] 0
Conv2d-127 [-1, 384, 11, 15] 24,576
BatchNorm2d-128 [-1, 384, 11, 15] 768
ReLU-129 [-1, 384, 11, 15] 0
Custom_Conv-130 [-1, 384, 11, 15] 0
Conv2d-131 [-1, 384, 11, 15] 3,456
BatchNorm2d-132 [-1, 384, 11, 15] 768
ReLU-133 [-1, 384, 11, 15] 0
DepthConv-134 [-1, 384, 11, 15] 0
Conv2d-135 [-1, 96, 11, 15] 36,864
BatchNorm2d-136 [-1, 96, 11, 15] 192
LinearBottleneck-137 [-1, 96, 11, 15] 0
Conv2d-138 [-1, 576, 11, 15] 55,296
BatchNorm2d-139 [-1, 576, 11, 15] 1,152
ReLU-140 [-1, 576, 11, 15] 0
Custom_Conv-141 [-1, 576, 11, 15] 0
Conv2d-142 [-1, 576, 11, 15] 5,184
BatchNorm2d-143 [-1, 576, 11, 15] 1,152
ReLU-144 [-1, 576, 11, 15] 0
DepthConv-145 [-1, 576, 11, 15] 0
Conv2d-146 [-1, 96, 11, 15] 55,296
BatchNorm2d-147 [-1, 96, 11, 15] 192
LinearBottleneck-148 [-1, 96, 11, 15] 0
Conv2d-149 [-1, 576, 11, 15] 55,296
BatchNorm2d-150 [-1, 576, 11, 15] 1,152
ReLU-151 [-1, 576, 11, 15] 0
Custom_Conv-152 [-1, 576, 11, 15] 0
Conv2d-153 [-1, 576, 11, 15] 5,184
BatchNorm2d-154 [-1, 576, 11, 15] 1,152
ReLU-155 [-1, 576, 11, 15] 0
DepthConv-156 [-1, 576, 11, 15] 0
Conv2d-157 [-1, 128, 11, 15] 73,728
BatchNorm2d-158 [-1, 128, 11, 15] 256
LinearBottleneck-159 [-1, 128, 11, 15] 0
Conv2d-160 [-1, 768, 11, 15] 98,304
BatchNorm2d-161 [-1, 768, 11, 15] 1,536
ReLU-162 [-1, 768, 11, 15] 0
Custom_Conv-163 [-1, 768, 11, 15] 0
Conv2d-164 [-1, 768, 11, 15] 6,912
BatchNorm2d-165 [-1, 768, 11, 15] 1,536
ReLU-166 [-1, 768, 11, 15] 0
DepthConv-167 [-1, 768, 11, 15] 0
Conv2d-168 [-1, 128, 11, 15] 98,304
BatchNorm2d-169 [-1, 128, 11, 15] 256
LinearBottleneck-170 [-1, 128, 11, 15] 0
Deep_net-171 [-1, 128, 11, 15] 0
Conv2d-172 [-1, 128, 44, 60] 1,152
BatchNorm2d-173 [-1, 128, 44, 60] 256
ReLU-174 [-1, 128, 44, 60] 0
DepthConv-175 [-1, 128, 44, 60] 0
Conv2d-176 [-1, 128, 44, 60] 16,512
BatchNorm2d-177 [-1, 128, 44, 60] 256
Conv2d-178 [-1, 128, 44, 60] 16,512
BatchNorm2d-179 [-1, 128, 44, 60] 256
ReLU-180 [-1, 128, 44, 60] 0
FeatureFusionModule-181 [-1, 128, 44, 60] 0
Conv2d-182 [-1, 128, 44, 60] 1,152
BatchNorm2d-183 [-1, 128, 44, 60] 256
ReLU-184 [-1, 128, 44, 60] 0
Conv2d-185 [-1, 128, 44, 60] 16,384
BatchNorm2d-186 [-1, 128, 44, 60] 256
ReLU-187 [-1, 128, 44, 60] 0
DepthSepConv-188 [-1, 128, 44, 60] 0
Conv2d-189 [-1, 128, 44, 60] 1,152
BatchNorm2d-190 [-1, 128, 44, 60] 256
ReLU-191 [-1, 128, 44, 60] 0
Conv2d-192 [-1, 128, 44, 60] 16,384
BatchNorm2d-193 [-1, 128, 44, 60] 256
ReLU-194 [-1, 128, 44, 60] 0
DepthSepConv-195 [-1, 128, 44, 60] 0
Dropout-196 [-1, 128, 44, 60] 0
Conv2d-197 [-1, 11, 44, 60] 1,419
Classifer-198 [-1, 11, 44, 60] 0
================================================================
Total params: 875,615
Trainable params: 875,615
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 313.86
Params size (MB): 3.34
Estimated Total Size (MB): 319.14
----------------------------------------------------------------
ContextNetX10(
0.579 GMac, 100.000% MACs,
(spatial_detail): Shallow_net(
0.149 GMac, 25.694% MACs,
(conv0): Custom_Conv(
0.008 GMac, 1.319% MACs,
(conv): Sequential(
0.008 GMac, 1.319% MACs,
(0): Conv2d(0.006 GMac, 1.055% MACs, 3, 3, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.176% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.088% MACs, inplace=True)
)
)
(conv): Custom_Conv(
0.041 GMac, 6.998% MACs,
(conv): Sequential(
0.041 GMac, 6.998% MACs,
(0): Conv2d(0.036 GMac, 6.298% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.003 GMac, 0.467% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.233% MACs, inplace=True)
)
)
(dsconv1): DepthSepConv(
0.028 GMac, 4.782% MACs,
(conv): Sequential(
0.028 GMac, 4.782% MACs,
(0): Conv2d(0.003 GMac, 0.525% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
(3): Conv2d(0.022 GMac, 3.732% MACs, 32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.233% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.001 GMac, 0.117% MACs, inplace=True)
)
)
(dsconv2): DepthSepConv(
0.025 GMac, 4.257% MACs,
(conv): Sequential(
0.025 GMac, 4.257% MACs,
(0): Conv2d(0.002 GMac, 0.262% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.058% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.029% MACs, inplace=True)
(3): Conv2d(0.022 GMac, 3.732% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
)
(dsconv3): DepthSepConv(
0.048 GMac, 8.339% MACs,
(conv): Sequential(
0.048 GMac, 8.339% MACs,
(0): Conv2d(0.003 GMac, 0.525% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
(3): Conv2d(0.043 GMac, 7.464% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
)
)
(context_feature_extractor): Deep_net(
0.237 GMac, 40.944% MACs,
(conv_0): Custom_Conv(
0.0 GMac, 0.084% MACs,
(conv): Sequential(
0.0 GMac, 0.084% MACs,
(0): Conv2d(0.0 GMac, 0.067% MACs, 3, 3, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.011% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.006% MACs, inplace=True)
)
)
(conv_): Custom_Conv(
0.003 GMac, 0.437% MACs,
(conv): Sequential(
0.003 GMac, 0.437% MACs,
(0): Conv2d(0.002 GMac, 0.394% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.029% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.015% MACs, inplace=True)
)
)
(bottleneck1): Sequential(
0.007 GMac, 1.181% MACs,
(0): LinearBottleneck(
0.007 GMac, 1.181% MACs,
(block): Sequential(
0.007 GMac, 1.181% MACs,
(0): Custom_Conv(
0.003 GMac, 0.510% MACs,
(conv): Sequential(
0.003 GMac, 0.510% MACs,
(0): Conv2d(0.003 GMac, 0.467% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.029% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.015% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.175% MACs,
(conv): Sequential(
0.001 GMac, 0.175% MACs,
(0): Conv2d(0.001 GMac, 0.131% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.029% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.015% MACs, inplace=True)
)
)
(2): Conv2d(0.003 GMac, 0.467% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.029% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck2): Sequential(
0.04 GMac, 6.940% MACs,
(0): LinearBottleneck(
0.04 GMac, 6.940% MACs,
(block): Sequential(
0.04 GMac, 6.940% MACs,
(0): Custom_Conv(
0.018 GMac, 3.062% MACs,
(conv): Sequential(
0.018 GMac, 3.062% MACs,
(0): Conv2d(0.016 GMac, 2.799% MACs, 32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.175% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.087% MACs, inplace=True)
)
)
(1): DepthConv(
0.006 GMac, 1.050% MACs,
(conv): Sequential(
0.006 GMac, 1.050% MACs,
(0): Conv2d(0.005 GMac, 0.787% MACs, 192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.175% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.087% MACs, inplace=True)
)
)
(2): Conv2d(0.016 GMac, 2.799% MACs, 192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.029% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck3): Sequential(
0.068 GMac, 11.689% MACs,
(0): LinearBottleneck(
0.025 GMac, 4.385% MACs,
(block): Sequential(
0.025 GMac, 4.385% MACs,
(0): Custom_Conv(
0.018 GMac, 3.062% MACs,
(conv): Sequential(
0.018 GMac, 3.062% MACs,
(0): Conv2d(0.016 GMac, 2.799% MACs, 32, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.001 GMac, 0.175% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.001 GMac, 0.087% MACs, inplace=True)
)
)
(1): DepthConv(
0.002 GMac, 0.262% MACs,
(conv): Sequential(
0.002 GMac, 0.262% MACs,
(0): Conv2d(0.001 GMac, 0.197% MACs, 192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.044% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.022% MACs, inplace=True)
)
)
(2): Conv2d(0.006 GMac, 1.050% MACs, 192, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.011% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.021 GMac, 3.652% MACs,
(block): Sequential(
0.021 GMac, 3.652% MACs,
(0): Custom_Conv(
0.01 GMac, 1.673% MACs,
(conv): Sequential(
0.01 GMac, 1.673% MACs,
(0): Conv2d(0.009 GMac, 1.575% MACs, 48, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.066% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.033% MACs, inplace=True)
)
)
(1): DepthConv(
0.002 GMac, 0.394% MACs,
(conv): Sequential(
0.002 GMac, 0.394% MACs,
(0): Conv2d(0.002 GMac, 0.295% MACs, 288, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=288, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.066% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.033% MACs, inplace=True)
)
)
(2): Conv2d(0.009 GMac, 1.575% MACs, 288, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.011% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.021 GMac, 3.652% MACs,
(block): Sequential(
0.021 GMac, 3.652% MACs,
(0): Custom_Conv(
0.01 GMac, 1.673% MACs,
(conv): Sequential(
0.01 GMac, 1.673% MACs,
(0): Conv2d(0.009 GMac, 1.575% MACs, 48, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.066% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.033% MACs, inplace=True)
)
)
(1): DepthConv(
0.002 GMac, 0.394% MACs,
(conv): Sequential(
0.002 GMac, 0.394% MACs,
(0): Conv2d(0.002 GMac, 0.295% MACs, 288, 288, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=288, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.066% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.033% MACs, inplace=True)
)
)
(2): Conv2d(0.009 GMac, 1.575% MACs, 288, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.011% MACs, 48, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck4): Sequential(
0.031 GMac, 5.434% MACs,
(0): LinearBottleneck(
0.013 GMac, 2.300% MACs,
(block): Sequential(
0.013 GMac, 2.300% MACs,
(0): Custom_Conv(
0.01 GMac, 1.673% MACs,
(conv): Sequential(
0.01 GMac, 1.673% MACs,
(0): Conv2d(0.009 GMac, 1.575% MACs, 48, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.066% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.033% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.098% MACs,
(conv): Sequential(
0.001 GMac, 0.098% MACs,
(0): Conv2d(0.0 GMac, 0.074% MACs, 288, 288, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=288, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.016% MACs, 288, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.008% MACs, inplace=True)
)
)
(2): Conv2d(0.003 GMac, 0.525% MACs, 288, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.009 GMac, 1.567% MACs,
(block): Sequential(
0.009 GMac, 1.567% MACs,
(0): Custom_Conv(
0.004 GMac, 0.733% MACs,
(conv): Sequential(
0.004 GMac, 0.733% MACs,
(0): Conv2d(0.004 GMac, 0.700% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.131% MACs,
(conv): Sequential(
0.001 GMac, 0.131% MACs,
(0): Conv2d(0.001 GMac, 0.098% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(2): Conv2d(0.004 GMac, 0.700% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(2): LinearBottleneck(
0.009 GMac, 1.567% MACs,
(block): Sequential(
0.009 GMac, 1.567% MACs,
(0): Custom_Conv(
0.004 GMac, 0.733% MACs,
(conv): Sequential(
0.004 GMac, 0.733% MACs,
(0): Conv2d(0.004 GMac, 0.700% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.131% MACs,
(conv): Sequential(
0.001 GMac, 0.131% MACs,
(0): Conv2d(0.001 GMac, 0.098% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(2): Conv2d(0.004 GMac, 0.700% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.004% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck5): Sequential(
0.031 GMac, 5.319% MACs,
(0): LinearBottleneck(
0.011 GMac, 1.919% MACs,
(block): Sequential(
0.011 GMac, 1.919% MACs,
(0): Custom_Conv(
0.004 GMac, 0.733% MACs,
(conv): Sequential(
0.004 GMac, 0.733% MACs,
(0): Conv2d(0.004 GMac, 0.700% MACs, 64, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.131% MACs,
(conv): Sequential(
0.001 GMac, 0.131% MACs,
(0): Conv2d(0.001 GMac, 0.098% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.022% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.011% MACs, inplace=True)
)
)
(2): Conv2d(0.006 GMac, 1.050% MACs, 384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.02 GMac, 3.401% MACs,
(block): Sequential(
0.02 GMac, 3.401% MACs,
(0): Custom_Conv(
0.009 GMac, 1.624% MACs,
(conv): Sequential(
0.009 GMac, 1.624% MACs,
(0): Conv2d(0.009 GMac, 1.575% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.033% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.016% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.197% MACs,
(conv): Sequential(
0.001 GMac, 0.197% MACs,
(0): Conv2d(0.001 GMac, 0.148% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.033% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.016% MACs, inplace=True)
)
)
(2): Conv2d(0.009 GMac, 1.575% MACs, 576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.005% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
(bottleneck6): Sequential(
0.057 GMac, 9.861% MACs,
(0): LinearBottleneck(
0.023 GMac, 3.927% MACs,
(block): Sequential(
0.023 GMac, 3.927% MACs,
(0): Custom_Conv(
0.009 GMac, 1.624% MACs,
(conv): Sequential(
0.009 GMac, 1.624% MACs,
(0): Conv2d(0.009 GMac, 1.575% MACs, 96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.033% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.016% MACs, inplace=True)
)
)
(1): DepthConv(
0.001 GMac, 0.197% MACs,
(conv): Sequential(
0.001 GMac, 0.197% MACs,
(0): Conv2d(0.001 GMac, 0.148% MACs, 576, 576, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=576, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.033% MACs, 576, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.016% MACs, inplace=True)
)
)
(2): Conv2d(0.012 GMac, 2.099% MACs, 576, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.007% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): LinearBottleneck(
0.034 GMac, 5.934% MACs,
(block): Sequential(
0.034 GMac, 5.934% MACs,
(0): Custom_Conv(
0.017 GMac, 2.865% MACs,
(conv): Sequential(
0.017 GMac, 2.865% MACs,
(0): Conv2d(0.016 GMac, 2.799% MACs, 128, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): BatchNorm2d(0.0 GMac, 0.044% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.022% MACs, inplace=True)
)
)
(1): DepthConv(
0.002 GMac, 0.262% MACs,
(conv): Sequential(
0.002 GMac, 0.262% MACs,
(0): Conv2d(0.001 GMac, 0.197% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(1): BatchNorm2d(0.0 GMac, 0.044% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.022% MACs, inplace=True)
)
)
(2): Conv2d(0.016 GMac, 2.799% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(3): BatchNorm2d(0.0 GMac, 0.007% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
)
)
(feature_fusion): FeatureFusionModule(
0.093 GMac, 16.037% MACs,
(dwconv): DepthConv(
0.004 GMac, 0.700% MACs,
(conv): Sequential(
0.004 GMac, 0.700% MACs,
(0): Conv2d(0.003 GMac, 0.525% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
)
(conv_lower_res): Sequential(
0.044 GMac, 7.639% MACs,
(0): Conv2d(0.044 GMac, 7.523% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(conv_higher_res): Sequential(
0.044 GMac, 7.639% MACs,
(0): Conv2d(0.044 GMac, 7.523% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1))
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(relu): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
(classifier): Classifer(
0.1 GMac, 17.325% MACs,
(dsconv1): DepthSepConv(
0.048 GMac, 8.339% MACs,
(conv): Sequential(
0.048 GMac, 8.339% MACs,
(0): Conv2d(0.003 GMac, 0.525% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
(3): Conv2d(0.043 GMac, 7.464% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
)
(dsconv2): DepthSepConv(
0.048 GMac, 8.339% MACs,
(conv): Sequential(
0.048 GMac, 8.339% MACs,
(0): Conv2d(0.003 GMac, 0.525% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(1): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
(3): Conv2d(0.043 GMac, 7.464% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(4): BatchNorm2d(0.001 GMac, 0.117% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): ReLU(0.0 GMac, 0.058% MACs, inplace=True)
)
)
(conv): Sequential(
0.004 GMac, 0.646% MACs,
(0): Dropout(0.0 GMac, 0.000% MACs, p=0.1, inplace=False)
(1): Conv2d(0.004 GMac, 0.646% MACs, 128, 11, kernel_size=(1, 1), stride=(1, 1))
)
)
)
0.57947145 GMac 0.875615 3.3402061462402344 MB
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.Custom_Conv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.DepthSepConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.Shallow_net'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.DepthConv'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.LinearBottleneck'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.Deep_net'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.FeatureFusionModule'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.dropout.Dropout'>.
[WARN] Cannot find rule for <class '__main__.Classifer'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.ContextNetX10'>. Treat it as zero Macs and zero Params.
0.569441604 GMac 0.875615 3.3402061462402344 MB
Process finished with exit code 0
'''
| [
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.randn"
] | 1.1.0 | Ethan-ye/Efficient-Segmentation-Networks | 27272e43126a507a6d93b21cd2372f5432f61237 |
1.1 | ###################################################################################################
#ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network
#Paper-Link: https://arxiv.org/pdf/1811.11431.pdf
###################################################################################################
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from model.ESPNet_v2.ModelX4 import EESPNet, EESP
from model.ESPNet_v2.cnn_utilsX4 import *
from fvcore.nn.flop_count import flop_count #https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile #https://github.com/Lyken17/pytorch-OpCounter
__all__ = ["EESPNet_SegX4"]
class EESPNet_SegX4(nn.Module):
def __init__(self, classes=19, s=2, pretrained=None, gpus=1):
super().__init__()
classificationNet = EESPNet(classes=1000, s=s)
if gpus >=1:
classificationNet = nn.DataParallel(classificationNet)
# print(classificationNet)
# load the pretrained weights
if pretrained:
if not os.path.isfile(pretrained):
print('Weight file does not exist. Training without pre-trained weights')
print('Model initialized with pretrained weights')
classificationNet.load_state_dict(torch.load(pretrained))
self.net = classificationNet.module
del classificationNet
# delete last few layers
del self.net.classifier
del self.net.level5
del self.net.level5_0
if s <=0.5:
p = 0.1
else:
p=0.2
self.proj_L4_C = CBR(self.net.level4[-1].module_act.num_parameters, self.net.level3[-1].module_act.num_parameters, 1, 1)
pspSize = 2*self.net.level3[-1].module_act.num_parameters
self.pspMod = nn.Sequential(EESP(pspSize, pspSize //2, stride=1, k=4, r_lim=7),
PSPModule(pspSize // 2, pspSize //2))
self.project_l3 = nn.Sequential(nn.Dropout2d(p=p), C(pspSize // 2, classes, 1, 1))
self.act_l3 = BR(classes)
self.project_l2 = CBR(self.net.level2_0.act.num_parameters + classes, classes, 1, 1)
self.project_l1 = nn.Sequential(nn.Dropout2d(p=p), C(self.net.level1.act.num_parameters + classes, classes, 1, 1))
def hierarchicalUpsample(self, x, factor=3):
for i in range(factor):
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)
return x
def forward(self, input):
out_l1, out_l2, out_l3, out_l4 = self.net(input, seg=True)
out_l4_proj = self.proj_L4_C(out_l4)
up_l4_to_l3 = F.interpolate(out_l4_proj, size=out_l3.size()[2:], mode='bilinear', align_corners=True)
merged_l3_upl4 = self.pspMod(torch.cat([out_l3, up_l4_to_l3], 1))
proj_merge_l3_bef_act = self.project_l3(merged_l3_upl4)
proj_merge_l3 = self.act_l3(proj_merge_l3_bef_act)
out_up_l3 = F.interpolate(proj_merge_l3, scale_factor=2, mode='bilinear', align_corners=True)
merge_l2 = self.project_l2(torch.cat([out_l2, out_up_l3], 1))
out_up_l2 = F.interpolate(merge_l2, scale_factor=2, mode='bilinear', align_corners=True)
merge_l1 = self.project_l1(torch.cat([out_l1, out_up_l2], 1))
# if self.training:
# return F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True), self.hierarchicalUpsample(proj_merge_l3_bef_act)
# else:
# return F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True)
output = F.interpolate(merge_l1, scale_factor=2, mode='bilinear', align_corners=True)
return output
if __name__ == '__main__':
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = EESPNet_SegX4(classes=11, s=2).to(device)
summary(model, (3, 352, 480))
flops_count, params_count = get_model_complexity_info(model, (3, 352, 480),
as_strings=False,
print_per_layer_stat=True)
print(flops_count / 1000000000, 'GMac', params_count / 1000000, params_count / 1024 / 1024 * 4, 'MB')
x = torch.randn(2, 3, 352, 480).to(device)
input = x
macs, params = profile(model, inputs=(input,))
print(macs / 2000000000, 'GMac', params / 1000000, params / 1024 / 1024 * 4, 'MB')
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/ESPNet_v2/SegmentationModelX4.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 32, 176, 240] 864
BatchNorm2d-2 [-1, 32, 176, 240] 64
PReLU-3 [-1, 32, 176, 240] 32
CBR-4 [-1, 32, 176, 240] 0
AvgPool2d-5 [-1, 32, 88, 120] 0
Conv2d-6 [-1, 32, 177, 241] 128
BatchNorm2d-7 [-1, 32, 177, 241] 64
PReLU-8 [-1, 32, 177, 241] 32
Conv2d-9 [-1, 16, 177, 241] 128
BatchNorm2d-10 [-1, 16, 177, 241] 32
PReLU-11 [-1, 16, 177, 241] 16
CBR1-12 [-1, 16, 177, 241] 0
Conv2d-13 [-1, 16, 88, 120] 144
CDilated1-14 [-1, 16, 88, 120] 0
Conv2d-15 [-1, 16, 88, 120] 144
CDilated1-16 [-1, 16, 88, 120] 0
Conv2d-17 [-1, 16, 88, 120] 144
CDilated1-18 [-1, 16, 88, 120] 0
Conv2d-19 [-1, 16, 88, 120] 144
CDilated1-20 [-1, 16, 88, 120] 0
BatchNorm2d-21 [-1, 64, 88, 120] 128
PReLU-22 [-1, 64, 88, 120] 64
BR-23 [-1, 64, 88, 120] 0
Conv2d-24 [-1, 96, 88, 120] 1,536
BatchNorm2d-25 [-1, 96, 88, 120] 192
CB-26 [-1, 96, 88, 120] 0
EESP1-27 [-1, 96, 88, 120] 0
Conv2d-28 [-1, 3, 88, 120] 81
BatchNorm2d-29 [-1, 3, 88, 120] 6
PReLU-30 [-1, 3, 88, 120] 3
CBR-31 [-1, 3, 88, 120] 0
Conv2d-32 [-1, 128, 88, 120] 384
BatchNorm2d-33 [-1, 128, 88, 120] 256
CB-34 [-1, 128, 88, 120] 0
PReLU-35 [-1, 128, 88, 120] 128
DownSampler-36 [-1, 128, 88, 120] 0
AvgPool2d-37 [-1, 128, 44, 60] 0
Conv2d-38 [-1, 128, 89, 121] 512
BatchNorm2d-39 [-1, 128, 89, 121] 256
PReLU-40 [-1, 128, 89, 121] 128
Conv2d-41 [-1, 20, 89, 121] 640
BatchNorm2d-42 [-1, 20, 89, 121] 40
PReLU-43 [-1, 20, 89, 121] 20
CBR1-44 [-1, 20, 89, 121] 0
Conv2d-45 [-1, 20, 44, 60] 180
CDilated1-46 [-1, 20, 44, 60] 0
Conv2d-47 [-1, 20, 44, 60] 180
CDilated1-48 [-1, 20, 44, 60] 0
Conv2d-49 [-1, 20, 44, 60] 180
CDilated1-50 [-1, 20, 44, 60] 0
Conv2d-51 [-1, 20, 44, 60] 180
CDilated1-52 [-1, 20, 44, 60] 0
BatchNorm2d-53 [-1, 80, 44, 60] 160
PReLU-54 [-1, 80, 44, 60] 80
BR-55 [-1, 80, 44, 60] 0
Conv2d-56 [-1, 128, 44, 60] 2,560
BatchNorm2d-57 [-1, 128, 44, 60] 256
CB-58 [-1, 128, 44, 60] 0
EESP1-59 [-1, 128, 44, 60] 0
Conv2d-60 [-1, 3, 44, 60] 81
BatchNorm2d-61 [-1, 3, 44, 60] 6
PReLU-62 [-1, 3, 44, 60] 3
CBR-63 [-1, 3, 44, 60] 0
Conv2d-64 [-1, 256, 44, 60] 768
BatchNorm2d-65 [-1, 256, 44, 60] 512
CB-66 [-1, 256, 44, 60] 0
PReLU-67 [-1, 256, 44, 60] 256
DownSampler-68 [-1, 256, 44, 60] 0
Conv2d-69 [-1, 64, 44, 60] 4,096
BatchNorm2d-70 [-1, 64, 44, 60] 128
PReLU-71 [-1, 64, 44, 60] 64
CBR-72 [-1, 64, 44, 60] 0
Conv2d-73 [-1, 64, 44, 60] 576
CDilated-74 [-1, 64, 44, 60] 0
Conv2d-75 [-1, 64, 44, 60] 576
CDilated-76 [-1, 64, 44, 60] 0
Conv2d-77 [-1, 64, 44, 60] 576
CDilated-78 [-1, 64, 44, 60] 0
Conv2d-79 [-1, 64, 44, 60] 576
CDilated-80 [-1, 64, 44, 60] 0
BatchNorm2d-81 [-1, 256, 44, 60] 512
PReLU-82 [-1, 256, 44, 60] 256
BR-83 [-1, 256, 44, 60] 0
Conv2d-84 [-1, 256, 44, 60] 16,384
BatchNorm2d-85 [-1, 256, 44, 60] 512
CB-86 [-1, 256, 44, 60] 0
PReLU-87 [-1, 256, 44, 60] 256
EESP-88 [-1, 256, 44, 60] 0
Conv2d-89 [-1, 64, 44, 60] 4,096
BatchNorm2d-90 [-1, 64, 44, 60] 128
PReLU-91 [-1, 64, 44, 60] 64
CBR-92 [-1, 64, 44, 60] 0
Conv2d-93 [-1, 64, 44, 60] 576
CDilated-94 [-1, 64, 44, 60] 0
Conv2d-95 [-1, 64, 44, 60] 576
CDilated-96 [-1, 64, 44, 60] 0
Conv2d-97 [-1, 64, 44, 60] 576
CDilated-98 [-1, 64, 44, 60] 0
Conv2d-99 [-1, 64, 44, 60] 576
CDilated-100 [-1, 64, 44, 60] 0
BatchNorm2d-101 [-1, 256, 44, 60] 512
PReLU-102 [-1, 256, 44, 60] 256
BR-103 [-1, 256, 44, 60] 0
Conv2d-104 [-1, 256, 44, 60] 16,384
BatchNorm2d-105 [-1, 256, 44, 60] 512
CB-106 [-1, 256, 44, 60] 0
PReLU-107 [-1, 256, 44, 60] 256
EESP-108 [-1, 256, 44, 60] 0
Conv2d-109 [-1, 64, 44, 60] 4,096
BatchNorm2d-110 [-1, 64, 44, 60] 128
PReLU-111 [-1, 64, 44, 60] 64
CBR-112 [-1, 64, 44, 60] 0
Conv2d-113 [-1, 64, 44, 60] 576
CDilated-114 [-1, 64, 44, 60] 0
Conv2d-115 [-1, 64, 44, 60] 576
CDilated-116 [-1, 64, 44, 60] 0
Conv2d-117 [-1, 64, 44, 60] 576
CDilated-118 [-1, 64, 44, 60] 0
Conv2d-119 [-1, 64, 44, 60] 576
CDilated-120 [-1, 64, 44, 60] 0
BatchNorm2d-121 [-1, 256, 44, 60] 512
PReLU-122 [-1, 256, 44, 60] 256
BR-123 [-1, 256, 44, 60] 0
Conv2d-124 [-1, 256, 44, 60] 16,384
BatchNorm2d-125 [-1, 256, 44, 60] 512
CB-126 [-1, 256, 44, 60] 0
PReLU-127 [-1, 256, 44, 60] 256
EESP-128 [-1, 256, 44, 60] 0
AvgPool2d-129 [-1, 256, 22, 30] 0
Conv2d-130 [-1, 256, 45, 61] 1,024
BatchNorm2d-131 [-1, 256, 45, 61] 512
PReLU-132 [-1, 256, 45, 61] 256
Conv2d-133 [-1, 44, 45, 61] 2,816
BatchNorm2d-134 [-1, 44, 45, 61] 88
PReLU-135 [-1, 44, 45, 61] 44
CBR1-136 [-1, 44, 45, 61] 0
Conv2d-137 [-1, 44, 22, 30] 396
CDilated1-138 [-1, 44, 22, 30] 0
Conv2d-139 [-1, 44, 22, 30] 396
CDilated1-140 [-1, 44, 22, 30] 0
Conv2d-141 [-1, 44, 22, 30] 396
CDilated1-142 [-1, 44, 22, 30] 0
Conv2d-143 [-1, 44, 22, 30] 396
CDilated1-144 [-1, 44, 22, 30] 0
BatchNorm2d-145 [-1, 176, 22, 30] 352
PReLU-146 [-1, 176, 22, 30] 176
BR-147 [-1, 176, 22, 30] 0
Conv2d-148 [-1, 256, 22, 30] 11,264
BatchNorm2d-149 [-1, 256, 22, 30] 512
CB-150 [-1, 256, 22, 30] 0
EESP1-151 [-1, 256, 22, 30] 0
Conv2d-152 [-1, 3, 22, 30] 81
BatchNorm2d-153 [-1, 3, 22, 30] 6
PReLU-154 [-1, 3, 22, 30] 3
CBR-155 [-1, 3, 22, 30] 0
Conv2d-156 [-1, 512, 22, 30] 1,536
BatchNorm2d-157 [-1, 512, 22, 30] 1,024
CB-158 [-1, 512, 22, 30] 0
PReLU-159 [-1, 512, 22, 30] 512
DownSampler-160 [-1, 512, 22, 30] 0
Conv2d-161 [-1, 128, 22, 30] 16,384
BatchNorm2d-162 [-1, 128, 22, 30] 256
PReLU-163 [-1, 128, 22, 30] 128
CBR-164 [-1, 128, 22, 30] 0
Conv2d-165 [-1, 128, 22, 30] 1,152
CDilated-166 [-1, 128, 22, 30] 0
Conv2d-167 [-1, 128, 22, 30] 1,152
CDilated-168 [-1, 128, 22, 30] 0
Conv2d-169 [-1, 128, 22, 30] 1,152
CDilated-170 [-1, 128, 22, 30] 0
Conv2d-171 [-1, 128, 22, 30] 1,152
CDilated-172 [-1, 128, 22, 30] 0
BatchNorm2d-173 [-1, 512, 22, 30] 1,024
PReLU-174 [-1, 512, 22, 30] 512
BR-175 [-1, 512, 22, 30] 0
Conv2d-176 [-1, 512, 22, 30] 65,536
BatchNorm2d-177 [-1, 512, 22, 30] 1,024
CB-178 [-1, 512, 22, 30] 0
PReLU-179 [-1, 512, 22, 30] 512
EESP-180 [-1, 512, 22, 30] 0
Conv2d-181 [-1, 128, 22, 30] 16,384
BatchNorm2d-182 [-1, 128, 22, 30] 256
PReLU-183 [-1, 128, 22, 30] 128
CBR-184 [-1, 128, 22, 30] 0
Conv2d-185 [-1, 128, 22, 30] 1,152
CDilated-186 [-1, 128, 22, 30] 0
Conv2d-187 [-1, 128, 22, 30] 1,152
CDilated-188 [-1, 128, 22, 30] 0
Conv2d-189 [-1, 128, 22, 30] 1,152
CDilated-190 [-1, 128, 22, 30] 0
Conv2d-191 [-1, 128, 22, 30] 1,152
CDilated-192 [-1, 128, 22, 30] 0
BatchNorm2d-193 [-1, 512, 22, 30] 1,024
PReLU-194 [-1, 512, 22, 30] 512
BR-195 [-1, 512, 22, 30] 0
Conv2d-196 [-1, 512, 22, 30] 65,536
BatchNorm2d-197 [-1, 512, 22, 30] 1,024
CB-198 [-1, 512, 22, 30] 0
PReLU-199 [-1, 512, 22, 30] 512
EESP-200 [-1, 512, 22, 30] 0
Conv2d-201 [-1, 128, 22, 30] 16,384
BatchNorm2d-202 [-1, 128, 22, 30] 256
PReLU-203 [-1, 128, 22, 30] 128
CBR-204 [-1, 128, 22, 30] 0
Conv2d-205 [-1, 128, 22, 30] 1,152
CDilated-206 [-1, 128, 22, 30] 0
Conv2d-207 [-1, 128, 22, 30] 1,152
CDilated-208 [-1, 128, 22, 30] 0
Conv2d-209 [-1, 128, 22, 30] 1,152
CDilated-210 [-1, 128, 22, 30] 0
Conv2d-211 [-1, 128, 22, 30] 1,152
CDilated-212 [-1, 128, 22, 30] 0
BatchNorm2d-213 [-1, 512, 22, 30] 1,024
PReLU-214 [-1, 512, 22, 30] 512
BR-215 [-1, 512, 22, 30] 0
Conv2d-216 [-1, 512, 22, 30] 65,536
BatchNorm2d-217 [-1, 512, 22, 30] 1,024
CB-218 [-1, 512, 22, 30] 0
PReLU-219 [-1, 512, 22, 30] 512
EESP-220 [-1, 512, 22, 30] 0
Conv2d-221 [-1, 128, 22, 30] 16,384
BatchNorm2d-222 [-1, 128, 22, 30] 256
PReLU-223 [-1, 128, 22, 30] 128
CBR-224 [-1, 128, 22, 30] 0
Conv2d-225 [-1, 128, 22, 30] 1,152
CDilated-226 [-1, 128, 22, 30] 0
Conv2d-227 [-1, 128, 22, 30] 1,152
CDilated-228 [-1, 128, 22, 30] 0
Conv2d-229 [-1, 128, 22, 30] 1,152
CDilated-230 [-1, 128, 22, 30] 0
Conv2d-231 [-1, 128, 22, 30] 1,152
CDilated-232 [-1, 128, 22, 30] 0
BatchNorm2d-233 [-1, 512, 22, 30] 1,024
PReLU-234 [-1, 512, 22, 30] 512
BR-235 [-1, 512, 22, 30] 0
Conv2d-236 [-1, 512, 22, 30] 65,536
BatchNorm2d-237 [-1, 512, 22, 30] 1,024
CB-238 [-1, 512, 22, 30] 0
PReLU-239 [-1, 512, 22, 30] 512
EESP-240 [-1, 512, 22, 30] 0
Conv2d-241 [-1, 128, 22, 30] 16,384
BatchNorm2d-242 [-1, 128, 22, 30] 256
PReLU-243 [-1, 128, 22, 30] 128
CBR-244 [-1, 128, 22, 30] 0
Conv2d-245 [-1, 128, 22, 30] 1,152
CDilated-246 [-1, 128, 22, 30] 0
Conv2d-247 [-1, 128, 22, 30] 1,152
CDilated-248 [-1, 128, 22, 30] 0
Conv2d-249 [-1, 128, 22, 30] 1,152
CDilated-250 [-1, 128, 22, 30] 0
Conv2d-251 [-1, 128, 22, 30] 1,152
CDilated-252 [-1, 128, 22, 30] 0
BatchNorm2d-253 [-1, 512, 22, 30] 1,024
PReLU-254 [-1, 512, 22, 30] 512
BR-255 [-1, 512, 22, 30] 0
Conv2d-256 [-1, 512, 22, 30] 65,536
BatchNorm2d-257 [-1, 512, 22, 30] 1,024
CB-258 [-1, 512, 22, 30] 0
PReLU-259 [-1, 512, 22, 30] 512
EESP-260 [-1, 512, 22, 30] 0
Conv2d-261 [-1, 128, 22, 30] 16,384
BatchNorm2d-262 [-1, 128, 22, 30] 256
PReLU-263 [-1, 128, 22, 30] 128
CBR-264 [-1, 128, 22, 30] 0
Conv2d-265 [-1, 128, 22, 30] 1,152
CDilated-266 [-1, 128, 22, 30] 0
Conv2d-267 [-1, 128, 22, 30] 1,152
CDilated-268 [-1, 128, 22, 30] 0
Conv2d-269 [-1, 128, 22, 30] 1,152
CDilated-270 [-1, 128, 22, 30] 0
Conv2d-271 [-1, 128, 22, 30] 1,152
CDilated-272 [-1, 128, 22, 30] 0
BatchNorm2d-273 [-1, 512, 22, 30] 1,024
PReLU-274 [-1, 512, 22, 30] 512
BR-275 [-1, 512, 22, 30] 0
Conv2d-276 [-1, 512, 22, 30] 65,536
BatchNorm2d-277 [-1, 512, 22, 30] 1,024
CB-278 [-1, 512, 22, 30] 0
PReLU-279 [-1, 512, 22, 30] 512
EESP-280 [-1, 512, 22, 30] 0
Conv2d-281 [-1, 128, 22, 30] 16,384
BatchNorm2d-282 [-1, 128, 22, 30] 256
PReLU-283 [-1, 128, 22, 30] 128
CBR-284 [-1, 128, 22, 30] 0
Conv2d-285 [-1, 128, 22, 30] 1,152
CDilated-286 [-1, 128, 22, 30] 0
Conv2d-287 [-1, 128, 22, 30] 1,152
CDilated-288 [-1, 128, 22, 30] 0
Conv2d-289 [-1, 128, 22, 30] 1,152
CDilated-290 [-1, 128, 22, 30] 0
Conv2d-291 [-1, 128, 22, 30] 1,152
CDilated-292 [-1, 128, 22, 30] 0
BatchNorm2d-293 [-1, 512, 22, 30] 1,024
PReLU-294 [-1, 512, 22, 30] 512
BR-295 [-1, 512, 22, 30] 0
Conv2d-296 [-1, 512, 22, 30] 65,536
BatchNorm2d-297 [-1, 512, 22, 30] 1,024
CB-298 [-1, 512, 22, 30] 0
PReLU-299 [-1, 512, 22, 30] 512
EESP-300 [-1, 512, 22, 30] 0
EESPNet-301 [[-1, 32, 176, 240], [-1, 128, 88, 120], [-1, 256, 44, 60], [-1, 512, 22, 30]] 0
Conv2d-302 [-1, 256, 22, 30] 131,072
BatchNorm2d-303 [-1, 256, 22, 30] 512
PReLU-304 [-1, 256, 22, 30] 256
CBR-305 [-1, 256, 22, 30] 0
Conv2d-306 [-1, 64, 44, 60] 8,192
BatchNorm2d-307 [-1, 64, 44, 60] 128
PReLU-308 [-1, 64, 44, 60] 64
CBR-309 [-1, 64, 44, 60] 0
Conv2d-310 [-1, 64, 44, 60] 576
CDilated-311 [-1, 64, 44, 60] 0
Conv2d-312 [-1, 64, 44, 60] 576
CDilated-313 [-1, 64, 44, 60] 0
Conv2d-314 [-1, 64, 44, 60] 576
CDilated-315 [-1, 64, 44, 60] 0
Conv2d-316 [-1, 64, 44, 60] 576
CDilated-317 [-1, 64, 44, 60] 0
BatchNorm2d-318 [-1, 256, 44, 60] 512
PReLU-319 [-1, 256, 44, 60] 256
BR-320 [-1, 256, 44, 60] 0
Conv2d-321 [-1, 256, 44, 60] 16,384
BatchNorm2d-322 [-1, 256, 44, 60] 512
CB-323 [-1, 256, 44, 60] 0
PReLU-324 [-1, 256, 44, 60] 256
EESP-325 [-1, 256, 44, 60] 0
Conv2d-326 [-1, 256, 22, 30] 2,304
C-327 [-1, 256, 22, 30] 0
Conv2d-328 [-1, 256, 11, 15] 2,304
C-329 [-1, 256, 11, 15] 0
Conv2d-330 [-1, 256, 6, 8] 2,304
C-331 [-1, 256, 6, 8] 0
Conv2d-332 [-1, 256, 3, 4] 2,304
C-333 [-1, 256, 3, 4] 0
Conv2d-334 [-1, 256, 44, 60] 327,680
BatchNorm2d-335 [-1, 256, 44, 60] 512
PReLU-336 [-1, 256, 44, 60] 256
CBR-337 [-1, 256, 44, 60] 0
PSPModule-338 [-1, 256, 44, 60] 0
Dropout2d-339 [-1, 256, 44, 60] 0
Conv2d-340 [-1, 11, 44, 60] 2,816
C-341 [-1, 11, 44, 60] 0
BatchNorm2d-342 [-1, 11, 44, 60] 22
PReLU-343 [-1, 11, 44, 60] 11
BR-344 [-1, 11, 44, 60] 0
Conv2d-345 [-1, 11, 88, 120] 1,529
BatchNorm2d-346 [-1, 11, 88, 120] 22
PReLU-347 [-1, 11, 88, 120] 11
CBR-348 [-1, 11, 88, 120] 0
Dropout2d-349 [-1, 43, 176, 240] 0
Conv2d-350 [-1, 11, 176, 240] 473
C-351 [-1, 11, 176, 240] 0
================================================================
Total params: 1,239,922
Trainable params: 1,239,922
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 49542447760184.38
Params size (MB): 4.73
Estimated Total Size (MB): 49542447760191.05
----------------------------------------------------------------
EESPNet_SegX4(
1.838 GMac, 100.000% MACs,
(net): EESPNet(
0.762 GMac, 41.465% MACs,
(level1): CBR(
0.041 GMac, 2.207% MACs,
(conv): Conv2d(0.036 GMac, 1.986% MACs, 3, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.003 GMac, 0.147% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.074% MACs, num_parameters=32)
)
(level2_0): DownSampler(
0.054 GMac, 2.929% MACs,
(eesp): EESP1(
0.043 GMac, 2.363% MACs,
(proj_1x1): CBR1(
0.017 GMac, 0.929% MACs,
(conv0): Conv2d(0.005 GMac, 0.297% MACs, 32, 32, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=32, bias=False)
(bn0): BatchNorm2d(0.003 GMac, 0.149% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.074% MACs, num_parameters=32)
(conv): Conv2d(0.005 GMac, 0.297% MACs, 32, 16, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=16)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), groups=16, bias=False)
)
(1): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=16, bias=False)
)
(2): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=16, bias=False)
)
(3): CDilated1(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=16, bias=False)
)
)
(conv_1x1_exp): CB(
0.018 GMac, 0.993% MACs,
(conv): Conv2d(0.016 GMac, 0.883% MACs, 64, 96, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.002 GMac, 0.110% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=64)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=96)
)
(avg): AvgPool2d(0.001 GMac, 0.074% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.008 GMac, 0.419% MACs,
(0): CBR(
0.001 GMac, 0.052% MACs,
(conv): Conv2d(0.001 GMac, 0.047% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.002% MACs, num_parameters=3)
)
(1): CB(
0.007 GMac, 0.368% MACs,
(conv): Conv2d(0.004 GMac, 0.221% MACs, 3, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.003 GMac, 0.147% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.001 GMac, 0.074% MACs, num_parameters=128)
)
(level3_0): DownSampler(
0.033 GMac, 1.785% MACs,
(eesp): EESP1(
0.027 GMac, 1.478% MACs,
(proj_1x1): CBR1(
0.017 GMac, 0.935% MACs,
(conv0): Conv2d(0.006 GMac, 0.300% MACs, 128, 128, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn0): BatchNorm2d(0.003 GMac, 0.150% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.075% MACs, num_parameters=128)
(conv): Conv2d(0.007 GMac, 0.375% MACs, 128, 20, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.023% MACs, 20, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.012% MACs, num_parameters=20)
)
(spp_dw): ModuleList(
0.002 GMac, 0.103% MACs,
(0): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), groups=20, bias=False)
)
(1): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=20, bias=False)
)
(2): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=20, bias=False)
)
(3): CDilated1(
0.0 GMac, 0.026% MACs,
(conv): Conv2d(0.0 GMac, 0.026% MACs, 20, 20, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=20, bias=False)
)
)
(conv_1x1_exp): CB(
0.007 GMac, 0.405% MACs,
(conv): Conv2d(0.007 GMac, 0.368% MACs, 80, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.034% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.023% MACs, 80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.011% MACs, num_parameters=80)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=128)
)
(avg): AvgPool2d(0.001 GMac, 0.074% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.004 GMac, 0.197% MACs,
(0): CBR(
0.0 GMac, 0.013% MACs,
(conv): Conv2d(0.0 GMac, 0.012% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=3)
)
(1): CB(
0.003 GMac, 0.184% MACs,
(conv): Conv2d(0.002 GMac, 0.110% MACs, 3, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(level3): ModuleList(
0.194 GMac, 10.564% MACs,
(0): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(1): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(2): EESP(
0.065 GMac, 3.521% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.616% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 256, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(4, 4), dilation=(4, 4), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
)
(level4_0): DownSampler(
0.025 GMac, 1.357% MACs,
(eesp): EESP1(
0.022 GMac, 1.207% MACs,
(proj_1x1): CBR1(
0.013 GMac, 0.708% MACs,
(conv0): Conv2d(0.003 GMac, 0.153% MACs, 256, 256, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
(bn0): BatchNorm2d(0.001 GMac, 0.076% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act0): PReLU(0.001 GMac, 0.038% MACs, num_parameters=256)
(conv): Conv2d(0.008 GMac, 0.421% MACs, 256, 44, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 44, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.007% MACs, num_parameters=44)
)
(spp_dw): ModuleList(
0.001 GMac, 0.057% MACs,
(0): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), groups=44, bias=False)
)
(1): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), dilation=(2, 2), groups=44, bias=False)
)
(2): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(2, 2), dilation=(3, 3), groups=44, bias=False)
)
(3): CDilated1(
0.0 GMac, 0.014% MACs,
(conv): Conv2d(0.0 GMac, 0.014% MACs, 44, 44, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), dilation=(4, 4), groups=44, bias=False)
)
)
(conv_1x1_exp): CB(
0.008 GMac, 0.423% MACs,
(conv): Conv2d(0.007 GMac, 0.405% MACs, 176, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.0 GMac, 0.019% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 176, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.006% MACs, num_parameters=176)
)
(module_act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=256)
)
(avg): AvgPool2d(0.001 GMac, 0.037% MACs, kernel_size=3, stride=2, padding=1)
(inp_reinf): Sequential(
0.002 GMac, 0.095% MACs,
(0): CBR(
0.0 GMac, 0.003% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.000% MACs, num_parameters=3)
)
(1): CB(
0.002 GMac, 0.092% MACs,
(conv): Conv2d(0.001 GMac, 0.055% MACs, 3, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(level4): ModuleList(
0.416 GMac, 22.623% MACs,
(0): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(1): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(2): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(3): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(4): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(5): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(6): EESP(
0.059 GMac, 3.232% MACs,
(proj_1x1): CBR(
0.011 GMac, 0.602% MACs,
(conv): Conv2d(0.011 GMac, 0.588% MACs, 512, 128, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.009% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.005% MACs, num_parameters=128)
)
(spp_dw): ModuleList(
0.003 GMac, 0.165% MACs,
(0): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(1): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
)
(2): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=128, bias=False)
)
(3): CDilated(
0.001 GMac, 0.041% MACs,
(conv): Conv2d(0.001 GMac, 0.041% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=128, bias=False)
)
)
(conv_1x1_exp): CB(
0.044 GMac, 2.391% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 512, 512, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.001 GMac, 0.055% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.037% MACs, 512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
(module_act): PReLU(0.0 GMac, 0.018% MACs, num_parameters=512)
)
)
)
(proj_L4_C): CBR(
0.087 GMac, 4.735% MACs,
(conv): Conv2d(0.087 GMac, 4.707% MACs, 512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=256)
)
(pspMod): Sequential(
0.945 GMac, 51.406% MACs,
(0): EESP(
0.076 GMac, 4.110% MACs,
(proj_1x1): CBR(
0.022 GMac, 1.204% MACs,
(conv): Conv2d(0.022 GMac, 1.177% MACs, 512, 64, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.018% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.009% MACs, num_parameters=64)
)
(spp_dw): ModuleList(
0.006 GMac, 0.331% MACs,
(0): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(1): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64, bias=False)
)
(2): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2), dilation=(2, 2), groups=64, bias=False)
)
(3): CDilated(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(3, 3), dilation=(3, 3), groups=64, bias=False)
)
)
(conv_1x1_exp): CB(
0.045 GMac, 2.427% MACs,
(conv): Conv2d(0.043 GMac, 2.354% MACs, 256, 256, kernel_size=(1, 1), stride=(1, 1), groups=4, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(br_after_cat): BR(
0.002 GMac, 0.110% MACs,
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(module_act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
(1): PSPModule(
0.869 GMac, 47.296% MACs,
(stages): ModuleList(
0.002 GMac, 0.111% MACs,
(0): C(
0.002 GMac, 0.083% MACs,
(conv): Conv2d(0.002 GMac, 0.083% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(1): C(
0.0 GMac, 0.021% MACs,
(conv): Conv2d(0.0 GMac, 0.021% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(2): C(
0.0 GMac, 0.006% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
(3): C(
0.0 GMac, 0.002% MACs,
(conv): Conv2d(0.0 GMac, 0.002% MACs, 256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)
)
)
(project): CBR(
0.867 GMac, 47.185% MACs,
(conv): Conv2d(0.865 GMac, 47.075% MACs, 1280, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.074% MACs, 256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.001 GMac, 0.037% MACs, num_parameters=256)
)
)
)
(project_l3): Sequential(
0.007 GMac, 0.405% MACs,
(0): Dropout2d(0.0 GMac, 0.000% MACs, p=0.2, inplace=False)
(1): C(
0.007 GMac, 0.405% MACs,
(conv): Conv2d(0.007 GMac, 0.405% MACs, 256, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
)
(act_l3): BR(
0.0 GMac, 0.005% MACs,
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.002% MACs, num_parameters=11)
)
(project_l2): CBR(
0.016 GMac, 0.898% MACs,
(conv): Conv2d(0.016 GMac, 0.879% MACs, 139, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.013% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(act): PReLU(0.0 GMac, 0.006% MACs, num_parameters=11)
)
(project_l1): Sequential(
0.02 GMac, 1.087% MACs,
(0): Dropout2d(0.0 GMac, 0.000% MACs, p=0.2, inplace=False)
(1): C(
0.02 GMac, 1.087% MACs,
(conv): Conv2d(0.02 GMac, 1.087% MACs, 43, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
)
)
)
1.837661544 GMac 1.240402 4.731758117675781 MB
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.activation.PReLU'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CBR'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CBR1'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CDilated1'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.ModuleList'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CB'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.BR'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESP1'>. Treat it as zero Macs and zero Params.
[INFO] Register count_avgpool() for <class 'torch.nn.modules.pooling.AvgPool2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.DownSampler'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.CDilated'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESP'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.ModelX4.EESPNet'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.C'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'model.ESPNet_v2.cnn_utilsX4.PSPModule'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class 'torch.nn.modules.dropout.Dropout2d'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.EESPNet_SegX4'>. Treat it as zero Macs and zero Params.
1.813505596 GMac 1.227267 4.681652069091797 MB
Process finished with exit code 0
''' | [
"torch.cat",
"torch.nn.functional.interpolate",
"torch.cuda.is_available",
"torch.load",
"torch.nn.Dropout2d",
"torch.randn",
"torch.nn.DataParallel"
] | 1.1.0 | Ethan-ye/Efficient-Segmentation-Networks | 27272e43126a507a6d93b21cd2372f5432f61237 |
1.1 | # *- coding: utf-8 -*
###########################################################################
# https://github.com/Soulempty/BiSeNetV2-pytorch
import torch
import torch.nn as nn
from torch.nn import functional as F
from torchsummary import summary
from utils.activations import NON_LINEARITY
from fvcore.nn.flop_count import flop_count # https://github.com/facebookresearch/fvcore
from tools.flops_counter.ptflops import get_model_complexity_info
from thop import profile # https://github.com/Lyken17/pytorch-OpCounter
__all__ = ['BiSeNetV2X28']
class conv2d(nn.Module):
def __init__(self, in_dim, out_dim, k, pad, stride, groups=1, bias=False, use_bn=True, use_rl=True):
super(conv2d, self).__init__()
self.use_bn = use_bn
self.use_rl = use_rl
self.conv = nn.Conv2d(in_dim, out_dim, k, padding=pad, stride=stride, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_dim)
self.relu = nn.ReLU(inplace=True)
def forward(self, bottom):
if self.use_bn and self.use_rl:
return self.relu(self.bn(self.conv(bottom)))
elif self.use_bn:
return self.bn(self.conv(bottom))
else:
return self.conv(bottom)
class StemBlock(nn.Module):
def __init__(self):
super(StemBlock, self).__init__()
self.conv0 = conv2d(3, 3, 2, 1, 1)
self.conv1 = conv2d(3, 16, 3, 0, 2)
# self.conv_1x1 = conv2d(16, 8, 1, 0, 1)
self.conv_2x2 = conv2d(16, 8, 2, 1, 1)
self.conv_3x3 = conv2d(8, 16, 3, 0, 2)
self.mpooling = nn.MaxPool2d(3, 2, 1)
self.conv2 = conv2d(32, 16, 3, 1, 1)
def forward(self, bottom):
base = self.conv0(bottom)
base = self.conv1(base)
# conv_1 = self.conv_1x1(base)
conv_2 = self.conv_2x2(base)
conv_3 = self.conv_3x3(conv_2)
pool = self.mpooling(base)
cat = torch.cat([conv_3, pool], 1)
res = self.conv2(cat)
return res
class ContextEmbeddingBlock(nn.Module):
def __init__(self, in_dim):
super(ContextEmbeddingBlock, self).__init__()
self.gap = nn.AdaptiveAvgPool2d(1) # 1
self.bn1 = nn.BatchNorm2d(in_dim)
self.conv1 = conv2d(in_dim, in_dim, 1, 0, 1)
self.conv2 = conv2d(in_dim, in_dim, 3, 1, 1, use_bn=False, use_rl=False)
def forward(self, bottom):
gap = self.gap(bottom)
bn = self.bn1(gap)
conv1 = self.conv1(bn)
feat = bottom + conv1
res = self.conv2(feat)
return res
class GatherExpansion(nn.Module):
def __init__(self, in_dim, out_dim, stride=1, exp=6):
super(GatherExpansion, self).__init__()
exp_dim = in_dim * exp
self.stride = stride
self.conv1 = conv2d(in_dim, exp_dim, 3, 1, 1)
self.dwconv2 = conv2d(exp_dim, exp_dim, 3, 1, 1, exp_dim, use_rl=False)
self.conv_11 = conv2d(exp_dim, out_dim, 1, 0, 1, use_rl=False)
self.dwconv1 = conv2d(exp_dim, exp_dim, 3, 1, 2, exp_dim, use_rl=False)
self.dwconv3 = conv2d(in_dim, in_dim, 3, 1, 2, in_dim, use_rl=False)
self.conv_12 = conv2d(in_dim, out_dim, 1, 0, 1, use_rl=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, bottom):
base = self.conv1(bottom)
if self.stride == 2:
base = self.dwconv1(base)
bottom = self.dwconv3(bottom)
bottom = self.conv_12(bottom)
x = self.dwconv2(base)
x = self.conv_11(x)
res = self.relu(x + bottom)
return res
class BGA(nn.Module):
def __init__(self, in_dim):
super(BGA, self).__init__()
self.in_dim = in_dim
self.db_dwconv = conv2d(in_dim, in_dim, 3, 1, 1, in_dim, use_rl=False)
self.db_conv1x1 = conv2d(in_dim, in_dim, 1, 0, 1, use_rl=False, use_bn=False)
self.db_conv = conv2d(in_dim, in_dim, 3, 1, 2, use_rl=False)
self.db_apooling = nn.AvgPool2d(3, 2, 1)
self.sb_dwconv = conv2d(in_dim, in_dim, 3, 1, 1, in_dim, use_rl=False)
self.sb_conv1x1 = conv2d(in_dim, in_dim, 1, 0, 1, use_rl=False, use_bn=False)
self.sb_conv = conv2d(in_dim, in_dim, 3, 1, 1, use_rl=False)
self.sb_sigmoid = nn.Sigmoid()
self.conv = conv2d(in_dim, in_dim, 3, 1, 1, use_rl=False)
def forward(self, db, sb):
db_dwc = self.db_dwconv(db)
db_out = self.db_conv1x1(db_dwc) #
db_conv = self.db_conv(db)
db_pool = self.db_apooling(db_conv)
sb_dwc = self.sb_dwconv(sb)
sb_out = self.sb_sigmoid(self.sb_conv1x1(sb_dwc)) #
sb_conv = self.sb_conv(sb)
sb_up = self.sb_sigmoid(F.interpolate(sb_conv, size=db_out.size()[2:], mode="bilinear", align_corners=True))
db_l = db_out * sb_up
sb_r = F.interpolate(sb_out * db_pool, size=db_out.size()[2:], mode="bilinear", align_corners=True)
res = self.conv(db_l + sb_r)
return res
class SegHead(nn.Module):
def __init__(self, in_dim, out_dim, classes):
super(SegHead, self).__init__()
# self.size = size
self.conv = conv2d(in_dim, out_dim, 3, 1, 1)
self.classes = conv2d(out_dim, classes, 1, 0, 1, use_bn=False, use_rl=False)
def forward(self, feat,size):
x = self.conv(feat)
x = self.classes(x)
pred = F.interpolate(x, size=size, mode="bilinear", align_corners=True)
return pred
class DetailedBranch(nn.Module):
def __init__(self):
super(DetailedBranch, self).__init__()
self.s1_conv1 = conv2d(3, 64, 3, 1, 2)
self.s1_conv2 = conv2d(64, 64, 3, 1, 1)
self.s2_conv1 = conv2d(64, 64, 3, 1, 2)
self.s2_conv2 = conv2d(64, 64, 3, 1, 1)
self.s2_conv3 = conv2d(64, 64, 3, 1, 1)
self.s3_conv1 = conv2d(64, 128, 3, 1, 2)
self.s3_conv2 = conv2d(128, 128, 3, 1, 1)
self.s3_conv3 = conv2d(128, 128, 3, 1, 1)
def forward(self, bottom):
s1_1 = self.s1_conv1(bottom)
s1_2 = self.s1_conv2(s1_1)
s2_1 = self.s2_conv1(s1_2)
s2_2 = self.s2_conv2(s2_1)
s2_3 = self.s2_conv3(s2_2)
s3_1 = self.s3_conv1(s2_3)
s3_2 = self.s3_conv2(s3_1)
s3_3 = self.s3_conv3(s3_2)
return s3_3
class SemanticBranch(nn.Module):
def __init__(self, classes):
super(SemanticBranch, self).__init__()
# self.training = True
self.stem = StemBlock()
self.s3_ge1 = GatherExpansion(16, 32, 2)
self.s3_ge2 = GatherExpansion(32, 32)
self.s4_ge1 = GatherExpansion(32, 64, 2)
self.s4_ge2 = GatherExpansion(64, 64)
self.s5_ge1 = GatherExpansion(64, 128, 2)
self.s5_ge2 = GatherExpansion(128, 128)
self.s5_ge3 = GatherExpansion(128, 128)
self.s5_ge4 = GatherExpansion(128, 128)
self.s5_ge5 = GatherExpansion(128, 128, exp=1)
# if self.training:
self.seghead1 = SegHead(16, 16, classes)
self.seghead2 = SegHead(32, 32, classes)
self.seghead3 = SegHead(64, 64, classes)
self.seghead4 = SegHead(128, 128, classes)
self.ceb = ContextEmbeddingBlock(128)
def forward(self, bottom, size):
stg12 = self.stem(bottom)
# print(stg12.size())
stg3 = self.s3_ge1(stg12)
stg3 = self.s3_ge2(stg3)
# print(stg3.size())
stg4 = self.s4_ge1(stg3)
stg4 = self.s4_ge2(stg4)
# print(stg4.size())
stg5 = self.s5_ge1(stg4)
stg5 = self.s5_ge2(stg5)
stg5 = self.s5_ge3(stg5)
stg5 = self.s5_ge4(stg5)
stg5 = self.s5_ge5(stg5)
# print(stg5.size())
out = self.ceb(stg5)
# if self.training:
seghead1 = self.seghead1(stg12,size)
seghead2 = self.seghead2(stg3,size)
seghead3 = self.seghead3(stg4,size)
seghead4 = self.seghead4(stg5,size)
return out, seghead1, seghead2, seghead3, seghead4
# else:
# return out
class BiSeNetV2X28(nn.Module):
def __init__(self, classes):
super(BiSeNetV2X28, self).__init__()
# self.training = True
self.db = DetailedBranch()
self.sb = SemanticBranch(classes)
self.bga = BGA(128)
self.seghead = SegHead(128, 128, classes)
self._init_params()
# self.criterion = nn.CrossEntropyLoss(ignore_index=255)
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, data, y=None):
size = data.size()[2:]
db = self.db(data)
# if self.training:
sb, head1, head2, head3, head4 = self.sb(data,size)
# else:
# sb = self.sb(data,size)
bga = self.bga(db, sb)
pred = self.seghead(bga,size)
# if self.training:
# main_loss = self.criterion(pred, y)
# aux1_loss = self.criterion(head1, y)
# aux2_loss = self.criterion(head2, y)
# aux3_loss = self.criterion(head3, y)
# aux4_loss = self.criterion(head4, y)
# return pred.max(1)[1], main_loss, (aux1_loss, aux2_loss, aux3_loss, aux4_loss)
return [pred,head1, head2, head3, head4]
if __name__ == "__main__":
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = BiSeNetV2X28(classes=11).to(device)
summary(model,(3, 352, 480))
flops_count, params_count = get_model_complexity_info(model, (3, 352, 480),
as_strings=False,
print_per_layer_stat=True)
print(flops_count/1000000000,'GMac', params_count/1000000, params_count/1024/1024*4,'MB')
x = torch.randn(2, 3, 352, 480).to(device)
input = x
macs, params = profile(model, inputs=(input,))
print(macs/2000000000,'GMac', params/1000000, params/1024/1024*4,'MB')
# from fvcore.nn.jit_handles import batchnorm_flop_jit
# from fvcore.nn.jit_handles import generic_activation_jit
#
# supported_ops = {
# "aten::batch_norm": batchnorm_flop_jit,
# }
# flop_dict, _ = flop_count(model, (x,), supported_ops)
# print(flop_dict)
'''
/home/ethan/anaconda3/envs/py36_cuda101/bin/python /home/ethan/codes/Efficient-Segmentation-Networks/model/BiSeNetV2X28.py
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 176, 240] 1,728
BatchNorm2d-2 [-1, 64, 176, 240] 128
ReLU-3 [-1, 64, 176, 240] 0
conv2d-4 [-1, 64, 176, 240] 0
Conv2d-5 [-1, 64, 176, 240] 36,864
BatchNorm2d-6 [-1, 64, 176, 240] 128
ReLU-7 [-1, 64, 176, 240] 0
conv2d-8 [-1, 64, 176, 240] 0
Conv2d-9 [-1, 64, 88, 120] 36,864
BatchNorm2d-10 [-1, 64, 88, 120] 128
ReLU-11 [-1, 64, 88, 120] 0
conv2d-12 [-1, 64, 88, 120] 0
Conv2d-13 [-1, 64, 88, 120] 36,864
BatchNorm2d-14 [-1, 64, 88, 120] 128
ReLU-15 [-1, 64, 88, 120] 0
conv2d-16 [-1, 64, 88, 120] 0
Conv2d-17 [-1, 64, 88, 120] 36,864
BatchNorm2d-18 [-1, 64, 88, 120] 128
ReLU-19 [-1, 64, 88, 120] 0
conv2d-20 [-1, 64, 88, 120] 0
Conv2d-21 [-1, 128, 44, 60] 73,728
BatchNorm2d-22 [-1, 128, 44, 60] 256
ReLU-23 [-1, 128, 44, 60] 0
conv2d-24 [-1, 128, 44, 60] 0
Conv2d-25 [-1, 128, 44, 60] 147,456
BatchNorm2d-26 [-1, 128, 44, 60] 256
ReLU-27 [-1, 128, 44, 60] 0
conv2d-28 [-1, 128, 44, 60] 0
Conv2d-29 [-1, 128, 44, 60] 147,456
BatchNorm2d-30 [-1, 128, 44, 60] 256
ReLU-31 [-1, 128, 44, 60] 0
conv2d-32 [-1, 128, 44, 60] 0
DetailedBranch-33 [-1, 128, 44, 60] 0
Conv2d-34 [-1, 3, 353, 481] 36
BatchNorm2d-35 [-1, 3, 353, 481] 6
ReLU-36 [-1, 3, 353, 481] 0
conv2d-37 [-1, 3, 353, 481] 0
Conv2d-38 [-1, 16, 176, 240] 432
BatchNorm2d-39 [-1, 16, 176, 240] 32
ReLU-40 [-1, 16, 176, 240] 0
conv2d-41 [-1, 16, 176, 240] 0
Conv2d-42 [-1, 8, 176, 240] 128
BatchNorm2d-43 [-1, 8, 176, 240] 16
ReLU-44 [-1, 8, 176, 240] 0
conv2d-45 [-1, 8, 176, 240] 0
Conv2d-46 [-1, 8, 177, 241] 256
BatchNorm2d-47 [-1, 8, 177, 241] 16
ReLU-48 [-1, 8, 177, 241] 0
conv2d-49 [-1, 8, 177, 241] 0
Conv2d-50 [-1, 16, 88, 120] 1,152
BatchNorm2d-51 [-1, 16, 88, 120] 32
ReLU-52 [-1, 16, 88, 120] 0
conv2d-53 [-1, 16, 88, 120] 0
MaxPool2d-54 [-1, 16, 88, 120] 0
Conv2d-55 [-1, 16, 88, 120] 4,608
BatchNorm2d-56 [-1, 16, 88, 120] 32
ReLU-57 [-1, 16, 88, 120] 0
conv2d-58 [-1, 16, 88, 120] 0
StemBlock-59 [-1, 16, 88, 120] 0
Conv2d-60 [-1, 96, 88, 120] 13,824
BatchNorm2d-61 [-1, 96, 88, 120] 192
ReLU-62 [-1, 96, 88, 120] 0
conv2d-63 [-1, 96, 88, 120] 0
Conv2d-64 [-1, 96, 44, 60] 864
BatchNorm2d-65 [-1, 96, 44, 60] 192
conv2d-66 [-1, 96, 44, 60] 0
Conv2d-67 [-1, 16, 44, 60] 144
BatchNorm2d-68 [-1, 16, 44, 60] 32
conv2d-69 [-1, 16, 44, 60] 0
Conv2d-70 [-1, 32, 44, 60] 512
BatchNorm2d-71 [-1, 32, 44, 60] 64
conv2d-72 [-1, 32, 44, 60] 0
Conv2d-73 [-1, 96, 44, 60] 864
BatchNorm2d-74 [-1, 96, 44, 60] 192
conv2d-75 [-1, 96, 44, 60] 0
Conv2d-76 [-1, 32, 44, 60] 3,072
BatchNorm2d-77 [-1, 32, 44, 60] 64
conv2d-78 [-1, 32, 44, 60] 0
ReLU-79 [-1, 32, 44, 60] 0
GatherExpansion-80 [-1, 32, 44, 60] 0
Conv2d-81 [-1, 192, 44, 60] 55,296
BatchNorm2d-82 [-1, 192, 44, 60] 384
ReLU-83 [-1, 192, 44, 60] 0
conv2d-84 [-1, 192, 44, 60] 0
Conv2d-85 [-1, 192, 44, 60] 1,728
BatchNorm2d-86 [-1, 192, 44, 60] 384
conv2d-87 [-1, 192, 44, 60] 0
Conv2d-88 [-1, 32, 44, 60] 6,144
BatchNorm2d-89 [-1, 32, 44, 60] 64
conv2d-90 [-1, 32, 44, 60] 0
ReLU-91 [-1, 32, 44, 60] 0
GatherExpansion-92 [-1, 32, 44, 60] 0
Conv2d-93 [-1, 192, 44, 60] 55,296
BatchNorm2d-94 [-1, 192, 44, 60] 384
ReLU-95 [-1, 192, 44, 60] 0
conv2d-96 [-1, 192, 44, 60] 0
Conv2d-97 [-1, 192, 22, 30] 1,728
BatchNorm2d-98 [-1, 192, 22, 30] 384
conv2d-99 [-1, 192, 22, 30] 0
Conv2d-100 [-1, 32, 22, 30] 288
BatchNorm2d-101 [-1, 32, 22, 30] 64
conv2d-102 [-1, 32, 22, 30] 0
Conv2d-103 [-1, 64, 22, 30] 2,048
BatchNorm2d-104 [-1, 64, 22, 30] 128
conv2d-105 [-1, 64, 22, 30] 0
Conv2d-106 [-1, 192, 22, 30] 1,728
BatchNorm2d-107 [-1, 192, 22, 30] 384
conv2d-108 [-1, 192, 22, 30] 0
Conv2d-109 [-1, 64, 22, 30] 12,288
BatchNorm2d-110 [-1, 64, 22, 30] 128
conv2d-111 [-1, 64, 22, 30] 0
ReLU-112 [-1, 64, 22, 30] 0
GatherExpansion-113 [-1, 64, 22, 30] 0
Conv2d-114 [-1, 384, 22, 30] 221,184
BatchNorm2d-115 [-1, 384, 22, 30] 768
ReLU-116 [-1, 384, 22, 30] 0
conv2d-117 [-1, 384, 22, 30] 0
Conv2d-118 [-1, 384, 22, 30] 3,456
BatchNorm2d-119 [-1, 384, 22, 30] 768
conv2d-120 [-1, 384, 22, 30] 0
Conv2d-121 [-1, 64, 22, 30] 24,576
BatchNorm2d-122 [-1, 64, 22, 30] 128
conv2d-123 [-1, 64, 22, 30] 0
ReLU-124 [-1, 64, 22, 30] 0
GatherExpansion-125 [-1, 64, 22, 30] 0
Conv2d-126 [-1, 384, 22, 30] 221,184
BatchNorm2d-127 [-1, 384, 22, 30] 768
ReLU-128 [-1, 384, 22, 30] 0
conv2d-129 [-1, 384, 22, 30] 0
Conv2d-130 [-1, 384, 11, 15] 3,456
BatchNorm2d-131 [-1, 384, 11, 15] 768
conv2d-132 [-1, 384, 11, 15] 0
Conv2d-133 [-1, 64, 11, 15] 576
BatchNorm2d-134 [-1, 64, 11, 15] 128
conv2d-135 [-1, 64, 11, 15] 0
Conv2d-136 [-1, 128, 11, 15] 8,192
BatchNorm2d-137 [-1, 128, 11, 15] 256
conv2d-138 [-1, 128, 11, 15] 0
Conv2d-139 [-1, 384, 11, 15] 3,456
BatchNorm2d-140 [-1, 384, 11, 15] 768
conv2d-141 [-1, 384, 11, 15] 0
Conv2d-142 [-1, 128, 11, 15] 49,152
BatchNorm2d-143 [-1, 128, 11, 15] 256
conv2d-144 [-1, 128, 11, 15] 0
ReLU-145 [-1, 128, 11, 15] 0
GatherExpansion-146 [-1, 128, 11, 15] 0
Conv2d-147 [-1, 768, 11, 15] 884,736
BatchNorm2d-148 [-1, 768, 11, 15] 1,536
ReLU-149 [-1, 768, 11, 15] 0
conv2d-150 [-1, 768, 11, 15] 0
Conv2d-151 [-1, 768, 11, 15] 6,912
BatchNorm2d-152 [-1, 768, 11, 15] 1,536
conv2d-153 [-1, 768, 11, 15] 0
Conv2d-154 [-1, 128, 11, 15] 98,304
BatchNorm2d-155 [-1, 128, 11, 15] 256
conv2d-156 [-1, 128, 11, 15] 0
ReLU-157 [-1, 128, 11, 15] 0
GatherExpansion-158 [-1, 128, 11, 15] 0
Conv2d-159 [-1, 768, 11, 15] 884,736
BatchNorm2d-160 [-1, 768, 11, 15] 1,536
ReLU-161 [-1, 768, 11, 15] 0
conv2d-162 [-1, 768, 11, 15] 0
Conv2d-163 [-1, 768, 11, 15] 6,912
BatchNorm2d-164 [-1, 768, 11, 15] 1,536
conv2d-165 [-1, 768, 11, 15] 0
Conv2d-166 [-1, 128, 11, 15] 98,304
BatchNorm2d-167 [-1, 128, 11, 15] 256
conv2d-168 [-1, 128, 11, 15] 0
ReLU-169 [-1, 128, 11, 15] 0
GatherExpansion-170 [-1, 128, 11, 15] 0
Conv2d-171 [-1, 768, 11, 15] 884,736
BatchNorm2d-172 [-1, 768, 11, 15] 1,536
ReLU-173 [-1, 768, 11, 15] 0
conv2d-174 [-1, 768, 11, 15] 0
Conv2d-175 [-1, 768, 11, 15] 6,912
BatchNorm2d-176 [-1, 768, 11, 15] 1,536
conv2d-177 [-1, 768, 11, 15] 0
Conv2d-178 [-1, 128, 11, 15] 98,304
BatchNorm2d-179 [-1, 128, 11, 15] 256
conv2d-180 [-1, 128, 11, 15] 0
ReLU-181 [-1, 128, 11, 15] 0
GatherExpansion-182 [-1, 128, 11, 15] 0
Conv2d-183 [-1, 128, 11, 15] 147,456
BatchNorm2d-184 [-1, 128, 11, 15] 256
ReLU-185 [-1, 128, 11, 15] 0
conv2d-186 [-1, 128, 11, 15] 0
Conv2d-187 [-1, 128, 11, 15] 1,152
BatchNorm2d-188 [-1, 128, 11, 15] 256
conv2d-189 [-1, 128, 11, 15] 0
Conv2d-190 [-1, 128, 11, 15] 16,384
BatchNorm2d-191 [-1, 128, 11, 15] 256
conv2d-192 [-1, 128, 11, 15] 0
ReLU-193 [-1, 128, 11, 15] 0
GatherExpansion-194 [-1, 128, 11, 15] 0
AdaptiveAvgPool2d-195 [-1, 128, 1, 1] 0
BatchNorm2d-196 [-1, 128, 1, 1] 256
Conv2d-197 [-1, 128, 1, 1] 16,384
BatchNorm2d-198 [-1, 128, 1, 1] 256
ReLU-199 [-1, 128, 1, 1] 0
conv2d-200 [-1, 128, 1, 1] 0
Conv2d-201 [-1, 128, 11, 15] 147,456
conv2d-202 [-1, 128, 11, 15] 0
ContextEmbeddingBlock-203 [-1, 128, 11, 15] 0
Conv2d-204 [-1, 16, 88, 120] 2,304
BatchNorm2d-205 [-1, 16, 88, 120] 32
ReLU-206 [-1, 16, 88, 120] 0
conv2d-207 [-1, 16, 88, 120] 0
Conv2d-208 [-1, 11, 88, 120] 176
conv2d-209 [-1, 11, 88, 120] 0
SegHead-210 [-1, 11, 352, 480] 0
Conv2d-211 [-1, 32, 44, 60] 9,216
BatchNorm2d-212 [-1, 32, 44, 60] 64
ReLU-213 [-1, 32, 44, 60] 0
conv2d-214 [-1, 32, 44, 60] 0
Conv2d-215 [-1, 11, 44, 60] 352
conv2d-216 [-1, 11, 44, 60] 0
SegHead-217 [-1, 11, 352, 480] 0
Conv2d-218 [-1, 64, 22, 30] 36,864
BatchNorm2d-219 [-1, 64, 22, 30] 128
ReLU-220 [-1, 64, 22, 30] 0
conv2d-221 [-1, 64, 22, 30] 0
Conv2d-222 [-1, 11, 22, 30] 704
conv2d-223 [-1, 11, 22, 30] 0
SegHead-224 [-1, 11, 352, 480] 0
Conv2d-225 [-1, 128, 11, 15] 147,456
BatchNorm2d-226 [-1, 128, 11, 15] 256
ReLU-227 [-1, 128, 11, 15] 0
conv2d-228 [-1, 128, 11, 15] 0
Conv2d-229 [-1, 11, 11, 15] 1,408
conv2d-230 [-1, 11, 11, 15] 0
SegHead-231 [-1, 11, 352, 480] 0
SemanticBranch-232 [[-1, 128, 11, 15], [-1, 11, 352, 480], [-1, 11, 352, 480], [-1, 11, 352, 480], [-1, 11, 352, 480]] 0
Conv2d-233 [-1, 128, 44, 60] 1,152
BatchNorm2d-234 [-1, 128, 44, 60] 256
conv2d-235 [-1, 128, 44, 60] 0
Conv2d-236 [-1, 128, 44, 60] 16,384
conv2d-237 [-1, 128, 44, 60] 0
Conv2d-238 [-1, 128, 22, 30] 147,456
BatchNorm2d-239 [-1, 128, 22, 30] 256
conv2d-240 [-1, 128, 22, 30] 0
AvgPool2d-241 [-1, 128, 11, 15] 0
Conv2d-242 [-1, 128, 11, 15] 1,152
BatchNorm2d-243 [-1, 128, 11, 15] 256
conv2d-244 [-1, 128, 11, 15] 0
Conv2d-245 [-1, 128, 11, 15] 16,384
conv2d-246 [-1, 128, 11, 15] 0
Sigmoid-247 [-1, 128, 11, 15] 0
Conv2d-248 [-1, 128, 11, 15] 147,456
BatchNorm2d-249 [-1, 128, 11, 15] 256
conv2d-250 [-1, 128, 11, 15] 0
Sigmoid-251 [-1, 128, 44, 60] 0
Conv2d-252 [-1, 128, 44, 60] 147,456
BatchNorm2d-253 [-1, 128, 44, 60] 256
conv2d-254 [-1, 128, 44, 60] 0
BGA-255 [-1, 128, 44, 60] 0
Conv2d-256 [-1, 128, 44, 60] 147,456
BatchNorm2d-257 [-1, 128, 44, 60] 256
ReLU-258 [-1, 128, 44, 60] 0
conv2d-259 [-1, 128, 44, 60] 0
Conv2d-260 [-1, 11, 44, 60] 1,408
conv2d-261 [-1, 11, 44, 60] 0
SegHead-262 [-1, 11, 352, 480] 0
================================================================
Total params: 5,361,434
Trainable params: 5,361,434
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 1.93
Forward/backward pass size (MB): 62196495155808.92
Params size (MB): 20.45
Estimated Total Size (MB): 62196495155831.31
----------------------------------------------------------------
BiSeNetV2X28(
6.325 GMac, 100.000% MACs,
(db): DetailedBranch(
3.797 GMac, 60.023% MACs,
(s1_conv1): conv2d(
0.081 GMac, 1.282% MACs,
(conv): Conv2d(0.073 GMac, 1.154% MACs, 3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.005 GMac, 0.085% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.003 GMac, 0.043% MACs, inplace=True)
)
(s1_conv2): conv2d(
1.565 GMac, 24.746% MACs,
(conv): Conv2d(1.557 GMac, 24.618% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.005 GMac, 0.085% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.003 GMac, 0.043% MACs, inplace=True)
)
(s2_conv1): conv2d(
0.391 GMac, 6.187% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.021% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s2_conv2): conv2d(
0.391 GMac, 6.187% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.021% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s2_conv3): conv2d(
0.391 GMac, 6.187% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.021% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(s3_conv1): conv2d(
0.196 GMac, 3.093% MACs,
(conv): Conv2d(0.195 GMac, 3.077% MACs, 64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(s3_conv2): conv2d(
0.39 GMac, 6.171% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(s3_conv3): conv2d(
0.39 GMac, 6.171% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
)
(sb): SemanticBranch(
1.573 GMac, 24.866% MACs,
(stem): StemBlock(
0.109 GMac, 1.720% MACs,
(conv0): conv2d(
0.008 GMac, 0.121% MACs,
(conv): Conv2d(0.006 GMac, 0.097% MACs, 3, 3, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
)
(conv1): conv2d(
0.02 GMac, 0.321% MACs,
(conv): Conv2d(0.018 GMac, 0.288% MACs, 3, 16, kernel_size=(3, 3), stride=(2, 2), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.021% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.011% MACs, inplace=True)
)
(conv_1x1): conv2d(
0.006 GMac, 0.102% MACs,
(conv): Conv2d(0.005 GMac, 0.085% MACs, 16, 8, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(conv_2x2): conv2d(
0.012 GMac, 0.189% MACs,
(conv): Conv2d(0.011 GMac, 0.173% MACs, 8, 8, kernel_size=(2, 2), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 8, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(conv_3x3): conv2d(
0.013 GMac, 0.200% MACs,
(conv): Conv2d(0.012 GMac, 0.192% MACs, 8, 16, kernel_size=(3, 3), stride=(2, 2), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
(mpooling): MaxPool2d(0.001 GMac, 0.011% MACs, kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(conv2): conv2d(
0.049 GMac, 0.777% MACs,
(conv): Conv2d(0.049 GMac, 0.769% MACs, 32, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
)
(s3_ge1): GatherExpansion(
0.165 GMac, 2.608% MACs,
(conv1): conv2d(
0.149 GMac, 2.356% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 16, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.002 GMac, 0.032% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.016% MACs, inplace=True)
)
(dwconv2): conv2d(
0.003 GMac, 0.044% MACs,
(conv): Conv2d(0.002 GMac, 0.036% MACs, 96, 96, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=96, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.131% MACs,
(conv): Conv2d(0.008 GMac, 0.128% MACs, 96, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.003 GMac, 0.044% MACs,
(conv): Conv2d(0.002 GMac, 0.036% MACs, 96, 96, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=96, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.007% MACs,
(conv): Conv2d(0.0 GMac, 0.006% MACs, 16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=16, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.002 GMac, 0.024% MACs,
(conv): Conv2d(0.001 GMac, 0.021% MACs, 16, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s3_ge2): GatherExpansion(
0.17 GMac, 2.681% MACs,
(conv1): conv2d(
0.148 GMac, 2.332% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 32, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
)
(dwconv2): conv2d(
0.006 GMac, 0.088% MACs,
(conv): Conv2d(0.005 GMac, 0.072% MACs, 192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.259% MACs,
(conv): Conv2d(0.016 GMac, 0.256% MACs, 192, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 32, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s4_ge1): GatherExpansion(
0.16 GMac, 2.533% MACs,
(conv1): conv2d(
0.148 GMac, 2.332% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 32, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.016% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.001 GMac, 0.008% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.018% MACs, 192, 192, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.130% MACs,
(conv): Conv2d(0.008 GMac, 0.128% MACs, 192, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.018% MACs, 192, 192, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=192, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 32, 32, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=32, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.001 GMac, 0.023% MACs,
(conv): Conv2d(0.001 GMac, 0.021% MACs, 32, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s4_ge2): GatherExpansion(
0.166 GMac, 2.622% MACs,
(conv1): conv2d(
0.147 GMac, 2.320% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 64, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.004% MACs, inplace=True)
)
(dwconv2): conv2d(
0.003 GMac, 0.044% MACs,
(conv): Conv2d(0.002 GMac, 0.036% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.258% MACs,
(conv): Conv2d(0.016 GMac, 0.256% MACs, 384, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(s5_ge1): GatherExpansion(
0.158 GMac, 2.495% MACs,
(conv1): conv2d(
0.147 GMac, 2.320% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 64, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.008% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.004% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.011% MACs,
(conv): Conv2d(0.001 GMac, 0.009% MACs, 384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.002% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.008 GMac, 0.129% MACs,
(conv): Conv2d(0.008 GMac, 0.128% MACs, 384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.001 GMac, 0.011% MACs,
(conv): Conv2d(0.001 GMac, 0.009% MACs, 384, 384, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=384, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.002% MACs, 384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.002% MACs,
(conv): Conv2d(0.0 GMac, 0.002% MACs, 64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.021% MACs, 64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge2): GatherExpansion(
0.164 GMac, 2.593% MACs,
(conv1): conv2d(
0.146 GMac, 2.314% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.018% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.257% MACs,
(conv): Conv2d(0.016 GMac, 0.256% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge3): GatherExpansion(
0.164 GMac, 2.593% MACs,
(conv1): conv2d(
0.146 GMac, 2.314% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.018% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.257% MACs,
(conv): Conv2d(0.016 GMac, 0.256% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge4): GatherExpansion(
0.164 GMac, 2.593% MACs,
(conv1): conv2d(
0.146 GMac, 2.314% MACs,
(conv): Conv2d(0.146 GMac, 2.308% MACs, 128, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.002% MACs, inplace=True)
)
(dwconv2): conv2d(
0.001 GMac, 0.022% MACs,
(conv): Conv2d(0.001 GMac, 0.018% MACs, 768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.004% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.016 GMac, 0.257% MACs,
(conv): Conv2d(0.016 GMac, 0.256% MACs, 768, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 768, 768, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=768, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(s5_ge5): GatherExpansion(
0.027 GMac, 0.433% MACs,
(conv1): conv2d(
0.024 GMac, 0.386% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv2): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_11): conv2d(
0.003 GMac, 0.043% MACs,
(conv): Conv2d(0.003 GMac, 0.043% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(dwconv3): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv_12): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(seghead1): SegHead(
0.027 GMac, 0.422% MACs,
(conv): conv2d(
0.025 GMac, 0.393% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.005% MACs, 16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.003% MACs, inplace=True)
)
(classes): conv2d(
0.002 GMac, 0.029% MACs,
(conv): Conv2d(0.002 GMac, 0.029% MACs, 16, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead2): SegHead(
0.026 GMac, 0.403% MACs,
(conv): conv2d(
0.025 GMac, 0.389% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(classes): conv2d(
0.001 GMac, 0.015% MACs,
(conv): Conv2d(0.001 GMac, 0.015% MACs, 32, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead3): SegHead(
0.025 GMac, 0.394% MACs,
(conv): conv2d(
0.024 GMac, 0.387% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.001% MACs, inplace=True)
)
(classes): conv2d(
0.0 GMac, 0.007% MACs,
(conv): Conv2d(0.0 GMac, 0.007% MACs, 64, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead4): SegHead(
0.025 GMac, 0.389% MACs,
(conv): conv2d(
0.024 GMac, 0.386% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(classes): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.004% MACs, 128, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(ceb): ContextEmbeddingBlock(
0.024 GMac, 0.385% MACs,
(gap): AdaptiveAvgPool2d(0.0 GMac, 0.000% MACs, output_size=1)
(bn1): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv1): conv2d(
0.0 GMac, 0.000% MACs,
(conv): Conv2d(0.0 GMac, 0.000% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(conv2): conv2d(
0.024 GMac, 0.385% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
)
(bga): BGA(
0.562 GMac, 8.882% MACs,
(db_dwconv): conv2d(
0.004 GMac, 0.059% MACs,
(conv): Conv2d(0.003 GMac, 0.048% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(db_conv1x1): conv2d(
0.043 GMac, 0.684% MACs,
(conv): Conv2d(0.043 GMac, 0.684% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(db_conv): conv2d(
0.097 GMac, 1.541% MACs,
(conv): Conv2d(0.097 GMac, 1.539% MACs, 128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.003% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(db_apooling): AvgPool2d(0.0 GMac, 0.001% MACs, kernel_size=3, stride=2, padding=1)
(sb_dwconv): conv2d(
0.0 GMac, 0.004% MACs,
(conv): Conv2d(0.0 GMac, 0.003% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128, bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(sb_conv1x1): conv2d(
0.003 GMac, 0.043% MACs,
(conv): Conv2d(0.003 GMac, 0.043% MACs, 128, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(sb_conv): conv2d(
0.024 GMac, 0.385% MACs,
(conv): Conv2d(0.024 GMac, 0.385% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.001% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
(sb_sigmoid): Sigmoid(0.0 GMac, 0.000% MACs, )
(conv): conv2d(
0.39 GMac, 6.165% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
(seghead): SegHead(
0.394 GMac, 6.229% MACs,
(conv): conv2d(
0.39 GMac, 6.171% MACs,
(conv): Conv2d(0.389 GMac, 6.155% MACs, 128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn): BatchNorm2d(0.001 GMac, 0.011% MACs, 128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.005% MACs, inplace=True)
)
(classes): conv2d(
0.004 GMac, 0.059% MACs,
(conv): Conv2d(0.004 GMac, 0.059% MACs, 128, 11, kernel_size=(1, 1), stride=(1, 1), bias=False)
(bn): BatchNorm2d(0.0 GMac, 0.000% MACs, 11, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(0.0 GMac, 0.000% MACs, inplace=True)
)
)
)
6.325178309 GMac 5.47396 20.881500244140625 MB
[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.
[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.
[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.
[WARN] Cannot find rule for <class '__main__.conv2d'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.DetailedBranch'>. Treat it as zero Macs and zero Params.
[INFO] Register zero_ops() for <class 'torch.nn.modules.pooling.MaxPool2d'>.
[WARN] Cannot find rule for <class '__main__.StemBlock'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.GatherExpansion'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.SegHead'>. Treat it as zero Macs and zero Params.
[INFO] Register count_adap_avgpool() for <class 'torch.nn.modules.pooling.AdaptiveAvgPool2d'>.
[WARN] Cannot find rule for <class '__main__.ContextEmbeddingBlock'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.SemanticBranch'>. Treat it as zero Macs and zero Params.
[INFO] Register count_avgpool() for <class 'torch.nn.modules.pooling.AvgPool2d'>.
[WARN] Cannot find rule for <class 'torch.nn.modules.activation.Sigmoid'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.BGA'>. Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for <class '__main__.BiSeNetV2X28'>. Treat it as zero Macs and zero Params.
6.309839354 GMac 5.361434 20.452247619628906 MB
Process finished with exit code 0
'''
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.Sigmoid",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.interpolate",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.constant_",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.nn.init.normal_",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 1.1.0 | Ethan-ye/Efficient-Segmentation-Networks | 27272e43126a507a6d93b21cd2372f5432f61237 |
1.2 | from typing import Any, Iterator, Iterable, Tuple, List, Callable
import warnings
import _collections_abc
import random
import itertools
import lineflow as lf
from torch.utils.data import IterableDataset
from torch.utils.data import get_worker_info
class Dataset(IterableDataset):
def __init__(self, dataset: Iterable[Any]) -> None:
assert isinstance(dataset, _collections_abc.Iterable)
self._dataset = dataset
def __iter__(self) -> Iterator[Any]:
iterable, self._dataset = itertools.tee(self._dataset)
yield from iterable
def all(self) -> List[Any]:
return list(self)
def apply(self,
transformation_func: Callable[[Iterator[Any]], Iterator[Any]]
) -> 'ApplyDataset':
return ApplyDataset(self, transformation_func)
def batch(self, batch_size: int) -> 'BatchDataset':
return BatchDataset(self, batch_size)
def concat(self, *others: Tuple['Dataset']) -> 'ConcatDataset':
return ConcatDataset(self, *others)
def flat_map(self, map_func: Callable[[Any], Iterable[Any]]) -> 'FlatMapDataset':
return FlatMapDataset(self, map_func)
def filter(self, predicate: Callable[[Any], bool]) -> 'FilterDataset':
return FilterDataset(self, predicate)
def first(self) -> Any:
return next(iter(self))
def map(self, map_func: Callable[[Any], Any]) -> 'MapDataset':
return MapDataset(self, map_func)
def parallel(self) -> 'ParallelDataset':
return ParallelDataset(self)
def shard(self, num_shards, index) -> 'ShardDataset':
return ShardDataset(self, num_shards, index)
def shuffle(self, buffer_size: int = None) -> 'ShuffleDataset':
return ShuffleDataset(self, buffer_size)
def sort(self, sort_key: Callable, buffer_size: int = None) -> 'SortDataset':
return SortDataset(self, sort_key, buffer_size)
def take(self, n) -> List[Any]:
return list(itertools.islice(self, n))
def window(self, window_size: int, shift: int = None) -> 'WindowDataset':
return WindowDataset(self, window_size, shift)
def zip(self, *others: Tuple['Dataset']) -> 'ZipDataset':
return ZipDataset(self, *others)
__add__ = concat
class ApplyDataset(Dataset):
def __init__(self,
dataset: Dataset,
transformation_func: Callable[[Iterator[Any]], Iterator[Any]]
) -> None:
super(ApplyDataset, self).__init__(dataset)
assert callable(transformation_func)
self._transformation_func = transformation_func
def __iter__(self) -> Iterator[Any]:
return self._transformation_func(self._dataset)
class BatchDataset(Dataset):
def __init__(self, dataset: Dataset, batch_size: int) -> None:
super(BatchDataset, self).__init__(dataset)
self._batch_size = batch_size
def __iter__(self) -> Iterator[Any]:
batch = []
for x in self._dataset:
batch.append(x)
if len(batch) == self._batch_size:
yield batch
batch = []
if batch:
yield batch
class ConcatDataset(Dataset):
def __init__(self, dataset: Dataset, *others: Tuple[Dataset]) -> None:
super(ConcatDataset, self).__init__(dataset)
assert all(isinstance(d, Dataset) for d in others)
self._others = others
def __iter__(self):
yield from itertools.chain(self._dataset, *self._others)
class FlatMapDataset(Dataset):
def __init__(self, dataset: Dataset, map_func: Callable[[Any], Iterable[Any]]) -> None:
super(FlatMapDataset, self).__init__(dataset)
assert callable(map_func)
self._map_func = map_func
def __iter__(self) -> Iterator[Any]:
yield from lf.flat_map(self._map_func, self._dataset, lazy=True)
class FilterDataset(Dataset):
def __init__(self, dataset: Dataset, predicate: Callable[[Any], bool]) -> None:
super(FilterDataset, self).__init__(dataset)
assert callable(predicate)
self._predicate = predicate
def __iter__(self) -> Iterator[Any]:
yield from filter(self._predicate, self._dataset)
class MapDataset(Dataset):
def __init__(self, dataset: Dataset, map_func: Callable[[Any], Any]) -> None:
super(MapDataset, self).__init__(dataset)
assert callable(map_func)
self._map_func = map_func
def __iter__(self) -> Iterator[Any]:
yield from map(self._map_func, self._dataset)
class ParallelDataset(Dataset):
def __iter__(self) -> Iterator[Any]:
worker_info = get_worker_info()
if worker_info is None:
warnings.warn(
'Parallel is not activated. Please refer to '
'torch.utils.data.DataLoader.',
RuntimeWarning,
stacklevel=2
)
yield from self._dataset
else:
worker_id = worker_info.id
num_workers = worker_info.num_workers
yield from self._dataset.shard(num_workers, worker_id)
class ShardDataset(Dataset):
def __init__(self, dataset: Dataset, num_shards: int, index: int) -> None:
super(ShardDataset, self).__init__(dataset)
self._num_shards = num_shards
self._index = index
def __iter__(self) -> Iterator[Any]:
yield from itertools.islice(self._dataset, self._index, None, self._num_shards)
class ShuffleDataset(Dataset):
def __init__(self, dataset: Dataset, buffer_size: int = None) -> None:
super(ShuffleDataset, self).__init__(dataset)
self._buffer_size = buffer_size
def __iter__(self) -> Iterator[Any]:
chunk = []
if self._buffer_size is None:
for x in self._dataset:
chunk.append(x)
random.shuffle(chunk)
yield from chunk
else:
for x in self._dataset:
chunk.append(x)
if len(chunk) == self._buffer_size:
random.shuffle(chunk)
yield from chunk
chunk = []
if chunk:
random.shuffle(chunk)
yield from chunk
class SortDataset(Dataset):
def __init__(self, dataset: Dataset, sort_key: Callable, buffer_size: int = None) -> None:
super(SortDataset, self).__init__(dataset)
assert callable(sort_key)
self._sort_key = sort_key
self._buffer_size = buffer_size
def __iter__(self) -> Iterator[Any]:
if self._buffer_size is None:
yield from sorted(self._dataset, key=self._sort_key)
else:
chunk = []
for x in self._dataset:
chunk.append(x)
if len(chunk) == self._buffer_size:
chunk.sort(key=self._sort_key)
yield from chunk
chunk = []
if chunk:
chunk.sort(key=self._sort_key)
yield from chunk
class WindowDataset(Dataset):
def __init__(self, dataset: Dataset, window_size: int, shift: int = None) -> None:
super(WindowDataset, self).__init__(dataset)
self._window_size = window_size
self._shift = shift or window_size
def __iter__(self) -> Iterator[Any]:
yield from lf.window(self._dataset, self._window_size, self._shift, lazy=True)
class ZipDataset(Dataset):
def __init__(self, dataset: Dataset, *others: Tuple[Dataset]) -> None:
super(ZipDataset, self).__init__(dataset)
assert all(isinstance(d, Dataset) for d in others)
self._others = others
def __iter__(self):
yield from zip(self._dataset, *self._others)
| [
"torch.utils.data.get_worker_info"
] | 1.2.0 | yasufumy/torchdata | ed837afa366638fb19656bcc234903d266ac2910 |
1.6 | import os
import torch
from pathlib import Path
from args import get_parser
# set root path
ROOT_PATH = Path(os.path.dirname(__file__))
# read parser
parser = get_parser()
args = parser.parse_args()
# model name
MODEL_NAME = 'LASAGNE'
# define device
CUDA = 'cuda'
CPU = 'cpu'
DEVICE = torch.device(CUDA if torch.cuda.is_available() else CPU)
# fields
INPUT = 'input'
LOGICAL_FORM = 'logical_form'
NER = 'ner'
COREF = 'coref'
GRAPH = 'graph'
MULTITASK = 'multitask'
# helper tokens
START_TOKEN = '[START]'
END_TOKEN = '[END]'
CTX_TOKEN = '[CTX]'
PAD_TOKEN = '[PAD]'
UNK_TOKEN = '[UNK]'
SEP_TOKEN = '[SEP]'
NA_TOKEN = 'NA'
# ner tag
B = 'B'
I = 'I'
O = 'O'
# model
ENCODER_OUT = 'encoder_out'
DECODER_OUT = 'decoder_out'
# training
EPOCH = 'epoch'
STATE_DICT = 'state_dict'
BEST_VAL = 'best_val'
OPTIMIZER = 'optimizer'
CURR_VAL = 'curr_val'
# question types
TOTAL = 'total'
OVERALL = 'Overall'
CLARIFICATION = 'Clarification'
COMPARATIVE = 'Comparative Reasoning (All)'
LOGICAL = 'Logical Reasoning (All)'
QUANTITATIVE = 'Quantitative Reasoning (All)'
SIMPLE_COREFERENCED = 'Simple Question (Coreferenced)'
SIMPLE_DIRECT = 'Simple Question (Direct)'
SIMPLE_ELLIPSIS = 'Simple Question (Ellipsis)'
VERIFICATION = 'Verification (Boolean) (All)'
QUANTITATIVE_COUNT = 'Quantitative Reasoning (Count) (All)'
COMPARATIVE_COUNT = 'Comparative Reasoning (Count) (All)'
# action related
ENTITY = 'entity'
RELATION = 'relation'
TYPE = 'type'
VALUE = 'value'
PREV_ANSWER = 'prev_answer'
ACTION = 'action'
# other
QUESTION_TYPE = 'question_type'
IS_CORRECT = 'is_correct'
QUESTION = 'question'
ANSWER = 'answer'
ACTIONS = 'actions'
GOLD_ACTIONS = 'gold_actions'
RESULTS = 'results'
PREV_RESULTS = 'prev_results'
CONTEXT_QUESTION = 'context_question'
CONTEXT_ENTITIES = 'context_entities'
BERT_BASE_UNCASED = 'bert-base-uncased' | [
"torch.cuda.is_available"
] | 1.6.0 | endrikacupaj/LASAGNE | 6321ab5161999905b357bd9b67906dcac04b8644 |
1.6 | import argparse
import os
import numpy as np
import torch
from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import BertTokenizer, BertConfig
from model import Model
from utils.data_utils import NluDataset, glue_processor, prepare_data
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def evaluate(model, data_raw, id_to_label, mode='dev'):
model.eval()
test_data = NluDataset(data_raw)
test_dataloader = DataLoader(test_data, batch_size=32, collate_fn=test_data.collate_fn)
slot_labels = id_to_label['slot_labels']
joint_all = 0
joint_correct = 0
s_preds = []
s_labels = []
i_preds = []
i_labels = []
epoch_pbar = tqdm(test_dataloader, desc="Evaluation", disable=False)
for step, batch in enumerate(test_dataloader):
batch = [b.to(device) if not isinstance(b, int) else b for b in batch]
input_ids, segment_ids, input_mask, slot_ids, intent_id = batch
with torch.no_grad():
intent_output, slot_output = model(input_ids, segment_ids, input_mask)
# intent_evaluate
intent_output = intent_output.argmax(dim=1)
intent_output = intent_output.tolist()
intent_label = intent_id.tolist()
i_preds = i_preds + intent_output
i_labels = i_labels + intent_label
# slot_evaluate
slot_output = slot_output.argmax(dim=2)
slot_output = slot_output.tolist()
slot_ids = slot_ids.tolist()
for idx, (p, l) in enumerate(zip(slot_output, slot_ids)):
p_text, l_text = align_predictions(p, l, slot_labels)
joint_all += 1
if p_text == l_text and intent_label[idx] == intent_output[idx]:
joint_correct += 1
s_preds.append(p_text)
s_labels.append(l_text)
epoch_pbar.update(1)
epoch_pbar.close()
res = {
"joint_acc": joint_correct / joint_all,
"intent_accuracy": cal_acc(i_preds, i_labels),
"slot_accuracy_score": accuracy_score(s_labels, s_preds),
"slot_precision": precision_score(s_labels, s_preds),
"slot_recall": recall_score(s_labels, s_preds),
"slot_f1": f1_score(s_labels, s_preds),
}
print('Evaluation on ', mode, ' dataset: ', res)
return res
def cal_acc(preds, labels):
acc = sum([1 if p == l else 0 for p, l in zip(preds, labels)]) / len(labels)
return acc
def align_predictions(preds, slot_ids, id_to_label):
aligned_labels = []
aligned_preds = []
for p, l in zip(preds, slot_ids):
if l != -100:
aligned_preds.append(id_to_label[p])
aligned_labels.append(id_to_label[l])
return aligned_preds, aligned_labels
def set_seed(seed: int):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main(args):
# Init
set_seed(args.seed)
processor = glue_processor[args.task_name.lower()]
tokenizer = BertTokenizer(args.vocab_path, do_lower_case=True)
# Data
dev_examples = processor.get_dev_examples(args.data_dir)
test_examples = processor.get_test_examples(args.data_dir)
labels = processor.get_labels(args.data_dir)
dev_data_raw = prepare_data(dev_examples, args.max_seq_len, tokenizer, labels)
test_data_raw = prepare_data(test_examples, args.max_seq_len, tokenizer, labels)
# Model
model_config = BertConfig.from_json_file(args.bert_config_path)
model_config.dropout = args.dropout
model_config.num_intent = len(labels['intent_labels'])
model_config.num_slot = len(labels['slot_labels'])
model = Model(model_config)
ckpt = torch.load(args.model_ckpt_path, map_location='cpu')
model.load_state_dict(ckpt, strict=False)
model.to(device)
evaluate(model, dev_data_raw, labels, 'dev')
evaluate(model, test_data_raw, labels, 'test')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--seed", default=42, type=int)
parser.add_argument("--task_name", default='nlu', type=str)
parser.add_argument("--data_dir", default='data/atis/', type=str)
parser.add_argument("--model_path", default='assets/', type=str)
parser.add_argument("--model_ckpt_path", default='outputs/model_best.bin', type=str)
parser.add_argument("--max_seq_len", default=60, type=int)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--dropout", default=0.1, type=float)
args = parser.parse_args()
args.vocab_path = os.path.join(args.model_path, 'vocab.txt')
args.bert_config_path = os.path.join(args.model_path, 'config.json')
print(args)
main(args)
| [
"torch.cuda.manual_seed_all",
"torch.no_grad",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load"
] | 1.6.1 | Dog0320/BERT-NLU | c760a09faee141526dbb241040d73d0870118f6d |
1.9 | """
learn2learn examples: https://github.com/learnables/learn2learn/tree/master/examples/vision
4CNN l2l hack:
- since SL needs to have 64 output units, I unfortuantely, hardcoded mdl.cls = nn.Linear(...,64).
doing the setter does change the .classifier to point to the right thing (see the setter decorator, also, I asserted
the pointer to be the same and the weight norms, they are the same even print(self.model) shows a mismatch in out_features)
and doing .X = Y in pytorch populates the modules. So now all models will have a .classifier and .cls modules. This
means that the state_dict of the model will have both. So when you create the model you will either need to make sure
to repalce_final_layer so that .model.cls = nn.Linear(...) is set and thus when you load the checkpoint both the
cls and classifier layer will be registered by pytorch.
Or (which is the solution I choose) is to have self.cls = self.classifier in the init so that it always has both modules.
"""
import learn2learn
import torch
from learn2learn.vision.models import CNN4Backbone, maml_init_
from torch import nn
def cnn4_cifarsfs(ways: int,
hidden_size=64,
embedding_size=64 * 4,
) -> tuple[nn.Module, dict]:
"""
Based on: https://github.com/learnables/learn2learn/blob/master/examples/vision/anil_fc100.py
"""
model_hps: dict = dict(ways=ways, hidden_size=hidden_size, embedding_size=embedding_size)
# model = learn2learn.vision.models.CNN4(output_size=ways, hidden_size=hidden_size, embedding_size=embedding_size, )
model = CNN4(output_size=ways, hidden_size=hidden_size, embedding_size=embedding_size, )
return model, model_hps
class CNN4(torch.nn.Module):
"""
[[Source]](https://github.com/learnables/learn2learn/blob/master/learn2learn/vision/models/cnn4.py)
**Description**
The convolutional network commonly used for MiniImagenet, as described by Ravi et Larochelle, 2017.
This network assumes inputs of shapes (3, 84, 84).
Instantiate `CNN4Backbone` if you only need the feature extractor.
**References**
1. Ravi and Larochelle. 2017. “Optimization as a Model for Few-Shot Learning.” ICLR.
**Arguments**
* **output_size** (int) - The dimensionality of the network's output.
* **hidden_size** (int, *optional*, default=64) - The dimensionality of the hidden representation.
* **layers** (int, *optional*, default=4) - The number of convolutional layers.
* **channels** (int, *optional*, default=3) - The number of channels in input.
* **max_pool** (bool, *optional*, default=True) - Whether ConvBlocks use max-pooling.
* **embedding_size** (int, *optional*, default=None) - Size of feature embedding.
Defaults to 25 * hidden_size (for mini-Imagenet).
**Example**
~~~python
model = CNN4(output_size=20, hidden_size=128, layers=3)
~~~
"""
def __init__(
self,
output_size,
hidden_size=64,
layers=4,
channels=3,
max_pool=True,
embedding_size=None,
):
super().__init__()
if embedding_size is None:
embedding_size = 25 * hidden_size
self.features = CNN4Backbone(
hidden_size=hidden_size,
channels=channels,
max_pool=max_pool,
layers=layers,
max_pool_factor=4 // layers,
)
self.classifier = torch.nn.Linear(
embedding_size,
output_size,
bias=True,
)
maml_init_(self.classifier)
self.hidden_size = hidden_size
assert self.cls is self.classifier
def forward(self, x):
assert self.cls is self.classifier # this just makes sure that we are running final layer we want
x = self.features(x)
x = self.classifier(x)
assert self.cls is self.classifier # this just makes sure that we are running final layer we want
return x
# https://stackoverflow.com/questions/71654047/how-does-one-get-the-object-in-a-python-object-inside-a-decorated-function-witho
# unfortuantely needed, otherwise pytorch seems to add it to the modules and then if it does a backwards pass it
# think there are parameters not being trained, although self.cls is self.classifier should return True
@property
def cls(self):
return self.classifier
@cls.setter
def cls(self, new_cls):
self.classifier = new_cls
# - tests
def wider_net_test():
model, _ = cnn4_cifarsfs(ways=64, hidden_size=1024, embedding_size=1024 * 4)
x = torch.randn(8, 3, 32, 32)
y = model(x)
print(y)
print(y.size())
def _reproduce_bug():
model, _ = cnn4_cifarsfs(ways=64, hidden_size=1024, embedding_size=1024 * 4)
model.cls = model.classifier
print(model)
x = torch.randn(8, 3, 32, 32)
y = model(x)
print(y)
print(y.size())
y.sum().backward()
print()
if __name__ == '__main__':
# wider_net_test()
_reproduce_bug()
print('Done\a')
| [
"torch.nn.Linear",
"torch.randn"
] | 1.9.1 | patricks-lab/ultimate-utils | e32922d79eddba8cbe9f954a96ef2205491d8a4a |
1.9 | """
Notes:
- 1. For the conv layer we have H' = H since H' = H+2p-k+1 = H' for p=1, k=3. i.e. same as previous layer
- since stride=1 as default (so only moves by 1) since you want to see all the image for a conv.
- 2. For the avg pool layer we have H' = H/2 i.e. half of previous layer
- since stride=kernel_size as default (so you want to avg more significantly for pool layers e.g. for invariance)
- 3. H = W should the true for this model and data, unless you feed rectangular data for some reason.
For this model if H = 84, input layer H^(l)_{layer_type} = is the H at layer l for layer_type we have:
- H^(l)_{conv} = H/2**(l-1)
- H^(l)_{avg_pool} = H/2**(l)
since, for the conv layer the height don't change and the pooling layer halves for each spatial dimension.
"""
from __future__ import division, print_function, absolute_import
import pdb
import copy
from argparse import Namespace
from collections import OrderedDict
import torch
import torch.nn as nn
import numpy as np
from typing import Optional
# from automl.core.operations import SPP
from uutils.torch_uu.models.spp import SPP
def helloworld(msg="hello"):
print(f'hello world with mgs: {msg}')
def get_defaul_args_for_5cnn() -> Namespace:
args: Namespace = Namespace()
args.image_size = 84
args.bn_eps = 1e-3
args.bn_momentum = 0.95
args.n_classes = 5
args.filter_size = 32
args.levels = None
args.spp = False
return args
def get_learner_from_args(args: Namespace) -> nn.Module:
return Learner(args.image_size, args.bn_eps, args.bn_momentum, args.n_classes)
def get_default_learner(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False) -> nn.Module:
"""
Gets a 5CNN as in the paper "optimization as a model for few shot learning". Cbfin et al. in MAML also use
the same hps:
"For MiniImagenet, we used 32 filters per layer to reduce overfitting, as done by (Ravi & Larochelle, 2017)."
Copy pasted from my script that ran experiments:
args.bn_momentum = 0.95
args.bn_eps = 1e-3
args.grad_clip_mode = 'clip_all_together'
args.image_size = 84
args.base_model = Learner(image_size=args.image_size, bn_eps=args.bn_eps, bn_momentum=args.bn_momentum,
n_classes=args.n_classes).to(args.device)
"""
return Learner(image_size, bn_eps, bn_momentum, n_classes, filter_size, levels, spp)
def get_default_learner_and_hps_dict(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False,
in_channels: int = 3
) -> tuple[nn.Module, dict]:
model_hps: dict = {'image_size': image_size, 'bn_eps': bn_eps, 'bn_momentum': bn_momentum,
'n_classes': n_classes, 'filter_size': filter_size, 'levels': levels,
'spp': spp, 'in_channels': in_channels}
model: nn.Module = Learner(**model_hps)
return model, model_hps
def get_default_learner_from_default_args(args: Optional[Namespace] = None) -> nn.Module:
if args is None:
args = get_defaul_args_for_5cnn()
mdl = get_learner_from_args(args)
return mdl
def get_feature_extractor_pool_layers(L: int = 4) -> list[str]:
return [f'model.features.pool{i}' for i in range(1, L + 1)]
def get_feature_extractor_conv_layers(L: int = 4, include_cls: bool = False) -> list[str]:
"""
Note: if the cls is present then we need B >= s*D since the output for it has shape
[B, n_c] where n_c so we need, B >= 10*5 = 50 for example.
s being used for B = 13 is
s_cls = B/n_c = 13/5 = 2.6
s_cls = B/n_c = 26/5 = 5.2
"""
layers: list[str] = [f'model.features.conv{i}' for i in range(1, L + 1)]
if include_cls:
layers: list[str] = layers + ['model.cls']
return layers
def get_head_cls() -> list[str]:
return ['model.cls']
def get_all_layers_minus_cls(L: int = 4) -> list[str]:
layer_names: str = []
for l in range(1, L + 1):
layer_name1: str = f'model.features.conv{l}'
layer_name2: str = f'model.features.norm{l}'
layer_name3: str = f'model.features.relu{l}'
layer_name4: str = f'model.features.pool{l}'
layer_names.append(layer_name1)
layer_names.append(layer_name2)
layer_names.append(layer_name3)
layer_names.append(layer_name4)
return layer_names
def get_last_two_layers(layer_type: str = 'conv', include_cls: bool = True,
start_L: int = 4, end_L: int = 4
) -> list[str]:
assert layer_type in ['conv', 'norm', 'relu', 'pool']
layers: list[str] = [f'model.features.{layer_type}{i}' for i in range(start_L, end_L + 1)]
if include_cls:
layers: list[str] = layers + ['model.cls']
return layers
class Learner(nn.Module):
def __init__(self, image_size,
bn_eps: float,
bn_momentum: float,
n_classes: int,
filter_size: int = 32, # Meta-LSTM & MAML use 32 filters
levels: Optional = None,
spp: bool = False,
in_channels: int = 3,
):
"""[summary]
Args:
image_size ([type]): [description]
bn_eps ([type]): [description]
bn_momentum ([type]): [description]
n_classes ([type]): [description]
levels ([type], optional): [description]. Defaults to None.
spp (bool, optional): [description]. Defaults to False.
"""
super().__init__()
self.spp = spp
# - note: "model" is also a Module
self.model = nn.ModuleDict({'features': nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(in_channels=in_channels, out_channels=filter_size, kernel_size=3, padding=1)),
('norm1', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu1', nn.ReLU(inplace=False)),
('pool1', nn.MaxPool2d(kernel_size=2)),
('conv2', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm2', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu2', nn.ReLU(inplace=False)),
('pool2', nn.MaxPool2d(kernel_size=2)),
('conv3', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm3', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu3', nn.ReLU(inplace=False)),
('pool3', nn.MaxPool2d(kernel_size=2)),
('conv4', nn.Conv2d(in_channels=filter_size, out_channels=filter_size, kernel_size=3, padding=1)),
('norm4', nn.BatchNorm2d(filter_size, bn_eps, bn_momentum)),
('relu4', nn.ReLU(inplace=False)),
('pool4', nn.MaxPool2d(kernel_size=2))]))
})
if spp:
spp_ = SPP(filter_size, levels)
self.model.update({'spp': spp_})
self.model.update({'cls': nn.Linear(spp_.output_size, n_classes)})
else:
clr_in = image_size // 2 ** 4
self.model.update({'cls': nn.Linear(filter_size * clr_in * clr_in, n_classes)})
# self.criterion = nn.CrossEntropyLoss()
@property
def cls(self):
return self.model.cls
@cls.setter
def cls(self, new_cls):
self.mode.cls = new_cls
def forward(self, x):
out = self.model.features(x)
if self.spp:
out = self.model.spp(out)
else:
out = torch.reshape(out, [out.size(0), -1])
outputs = self.model.cls(out)
return outputs
def get_flat_params(self):
# return torch_uu.cat([p.view(-1) for p in self.model.parameters()], 0)
pass
def copy_flat_params(self, cI):
# idx = 0
# for p in self.model.parameters():
# plen = p.view(-1).size(0)
# p.data.copy_(cI[idx: idx+plen].view_as(p))
# idx += plen
pass
def transfer_params(self, learner_w_grad, cI):
# Use load_state_dict only to copy the running mean/var in batchnorm, the values of the parameters
# are going to be replaced by cI
# self.load_state_dict(learner_w_grad.state_dict())
# # replace nn.Parameters with tensors from cI (NOT nn.Parameters anymore).
# idx = 0
# for m in self.model.modules():
# if isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear):
# wlen = m._parameters['weight'].view(-1).size(0)
# m._parameters['weight'] = cI[idx: idx+wlen].view_as(m._parameters['weight']).clone()
# idx += wlen
# if m._parameters['bias'] is not None:
# blen = m._parameters['bias'].view(-1).size(0)
# m._parameters['bias'] = cI[idx: idx+blen].view_as(m._parameters['bias']).clone()
# idx += blen
pass
def reset_batch_stats(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.reset_running_stats()
def load_model_5CNN_opt_as_model_for_few_shot(model_hps: dict) -> nn.Module:
# - get the hps of the model & build the instance
from uutils.torch_uu.models.learner_from_opt_as_few_shot_paper import Learner
model: nn.Module = Learner(**model_hps)
return model
def replace_final_layer(args: Namespace, n_classes: int):
if hasattr(args.model, 'cls'):
args.model.cls = nn.Linear(args.model.cls.in_features, n_classes)
# -- tests
def pass_cifarfs_data_through_5cnn_model_test():
"""
shape is torch.Size([3, 32, 32])
:return:
"""
# args = Namespace()
# args.data_root = Path('~/data/CIFAR-FS/').expanduser()
# args.data_aug = True
# imagenet = CIFAR100(args.data_root, args.data_aug, 'train')
# print(len(imagenet))
# print(imagenet.__getitem__(500)[0].shape)
B = 4
CHW = [3, 32, 32]
x = torch.randn([B] + CHW)
mdl = get_default_learner()
y = mdl(x)
print(y.shape)
if __name__ == '__main__':
pass_cifarfs_data_through_5cnn_model_test()
print('Done!\a') | [
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.randn"
] | 1.9.1 | patricks-lab/ultimate-utils | e32922d79eddba8cbe9f954a96ef2205491d8a4a |
1.0 | import numpy as np
import torch.nn as nn
from .inventory import model_urls
from .layer_factory import convbnrelu, InvertedResidualBlock, conv1x1
from .model_zoo import load_url
from ..misc.utils import make_list
__all__ = ["mobilenetv2"]
class MobileNetv2(nn.Module):
"""MobileNet-v2 definition.
More information about the model: https://arxiv.org/abs/1801.04381
Args:
return_idx (list or int): indices of the layers to be returned
during the forward pass.
Attributes:
mobilenet_config (list): list of definitions of each layer that includes
expansion rate, number of output channels,
number of repeats, stride.
in_planes (int): number of channels in the stem block.
"""
# expansion rate, output channels, number of repeats, stride
mobilenet_config = [
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
in_planes = 32 # number of input channels
num_layers = len(mobilenet_config)
def __init__(self, return_idx=[6]):
super(MobileNetv2, self).__init__()
self.return_idx = make_list(return_idx)
self.layer1 = convbnrelu(
3, self.in_planes, kernel_size=3, stride=2, act=nn.ReLU6(inplace=True)
)
c_layer = 2
for t, c, n, s in self.mobilenet_config:
layers = []
for idx in range(n):
layers.append(
InvertedResidualBlock(
self.in_planes,
c,
expansion_factor=t,
stride=s if idx == 0 else 1,
)
)
self.in_planes = c
setattr(self, "layer{}".format(c_layer), nn.Sequential(*layers))
c_layer += 1
self._out_c = [self.mobilenet_config[idx][1] for idx in self.return_idx]
def forward(self, x):
outs = []
x = self.layer1(x)
outs.append(self.layer2(x)) # 16, x / 2
outs.append(self.layer3(outs[-1])) # 24, x / 4
outs.append(self.layer4(outs[-1])) # 32, x / 8
outs.append(self.layer5(outs[-1])) # 64, x / 16
outs.append(self.layer6(outs[-1])) # 96, x / 16
outs.append(self.layer7(outs[-1])) # 160, x / 32
outs.append(self.layer8(outs[-1])) # 320, x / 32
return [outs[idx] for idx in self.return_idx]
def mobilenetv2(pretrained=True, **kwargs):
"""Constructs the mobilenet-v2 network.
Args:
pretrained (bool): whether to load pre-trained weights.
Returns:
`nn.Module` instance.
"""
model = MobileNetv2(**kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls["mobilenetv2"]))
return model
| [
"torch.nn.ReLU6",
"torch.nn.Sequential"
] | 1.0.0 | DrSleep/DenseTorch | f90bef075429d763fc08338dea8222d28b0a4516 |
1.0 | import pytest
import torch
import kornia as kornia
from torch.autograd import gradcheck
from torch.testing import assert_allclose
import utils # test utilities
from common import device_type
class TestPinholeCamera:
def _create_intrinsics(self, batch_size, fx, fy, cx, cy):
intrinsics = torch.eye(4).expand(batch_size, -1, -1)
intrinsics[..., 0, 0] = fx
intrinsics[..., 1, 1] = fy
intrinsics[..., 0, 2] = cx
intrinsics[..., 1, 2] = cy
return intrinsics
def _create_extrinsics(self, batch_size, tx, ty, tz):
extrinsics = torch.eye(4).expand(batch_size, -1, -1)
extrinsics[..., 0, -1] = tx
extrinsics[..., 1, -1] = ty
extrinsics[..., 2, -1] = tz
return extrinsics
def test_smoke(self):
intrinsics = torch.eye(4)[None]
extrinsics = torch.eye(4)[None]
height = torch.ones(1)
width = torch.ones(1)
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
assert isinstance(pinhole, kornia.PinholeCamera)
def test_pinhole_camera_attributes(self):
batch_size = 1
height, width = 4, 6
fx, fy, cx, cy = 1, 2, width / 2, height / 2
tx, ty, tz = 1, 2, 3
intrinsics = self._create_intrinsics(batch_size, fx, fy, cx, cy)
extrinsics = self._create_extrinsics(batch_size, tx, ty, tz)
height = torch.ones(batch_size) * height
width = torch.ones(batch_size) * width
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
assert pinhole.batch_size == batch_size
assert pinhole.fx.item() == fx
assert pinhole.fy.item() == fy
assert pinhole.cx.item() == cx
assert pinhole.cy.item() == cy
assert pinhole.tx.item() == tx
assert pinhole.ty.item() == ty
assert pinhole.tz.item() == tz
assert pinhole.height.item() == height
assert pinhole.width.item() == width
assert pinhole.rt_matrix.shape == (batch_size, 3, 4)
assert pinhole.camera_matrix.shape == (batch_size, 3, 3)
assert pinhole.rotation_matrix.shape == (batch_size, 3, 3)
assert pinhole.translation_vector.shape == (batch_size, 3, 1)
def test_pinhole_camera_translation_setters(self):
batch_size = 1
height, width = 4, 6
fx, fy, cx, cy = 1, 2, width / 2, height / 2
tx, ty, tz = 1, 2, 3
intrinsics = self._create_intrinsics(batch_size, fx, fy, cx, cy)
extrinsics = self._create_extrinsics(batch_size, tx, ty, tz)
height = torch.ones(batch_size) * height
width = torch.ones(batch_size) * width
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
assert pinhole.tx.item() == tx
assert pinhole.ty.item() == ty
assert pinhole.tz.item() == tz
# add offset
pinhole.tx += 3.
pinhole.ty += 2.
pinhole.tz += 1.
assert pinhole.tx.item() == tx + 3.
assert pinhole.ty.item() == ty + 2.
assert pinhole.tz.item() == tz + 1.
# set to zero
pinhole.tx = 0.
pinhole.ty = 0.
pinhole.tz = 0.
assert pinhole.tx.item() == 0.
assert pinhole.ty.item() == 0.
assert pinhole.tz.item() == 0.
def test_pinhole_camera_attributes_batch2(self):
batch_size = 2
height, width = 4, 6
fx, fy, cx, cy = 1, 2, width / 2, height / 2
tx, ty, tz = 1, 2, 3
intrinsics = self._create_intrinsics(batch_size, fx, fy, cx, cy)
extrinsics = self._create_extrinsics(batch_size, tx, ty, tz)
height = torch.ones(batch_size) * height
width = torch.ones(batch_size) * width
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
assert pinhole.batch_size == batch_size
assert pinhole.fx.shape[0] == batch_size
assert pinhole.fy.shape[0] == batch_size
assert pinhole.cx.shape[0] == batch_size
assert pinhole.cy.shape[0] == batch_size
assert pinhole.tx.shape[0] == batch_size
assert pinhole.ty.shape[0] == batch_size
assert pinhole.tz.shape[0] == batch_size
assert pinhole.height.shape[0] == batch_size
assert pinhole.width.shape[0] == batch_size
assert pinhole.rt_matrix.shape == (batch_size, 3, 4)
assert pinhole.camera_matrix.shape == (batch_size, 3, 3)
assert pinhole.rotation_matrix.shape == (batch_size, 3, 3)
assert pinhole.translation_vector.shape == (batch_size, 3, 1)
def test_pinhole_camera_scale(self):
batch_size = 2
height, width = 4, 6
fx, fy, cx, cy = 1, 2, width / 2, height / 2
tx, ty, tz = 1, 2, 3
scale_val = 2.0
intrinsics = self._create_intrinsics(batch_size, fx, fy, cx, cy)
extrinsics = self._create_extrinsics(batch_size, tx, ty, tz)
height = torch.ones(batch_size) * height
width = torch.ones(batch_size) * width
scale_factor = torch.ones(batch_size) * scale_val
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
pinhole_scale = pinhole.scale(scale_factor)
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 0, 0],
pinhole.intrinsics[..., 0, 0] * scale_val) # fx
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 1, 1],
pinhole.intrinsics[..., 1, 1] * scale_val) # fy
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 0, 2],
pinhole.intrinsics[..., 0, 2] * scale_val) # cx
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 1, 2],
pinhole.intrinsics[..., 1, 2] * scale_val) # cy
assert utils.check_equal_torch(
pinhole_scale.height,
pinhole.height * scale_val)
assert utils.check_equal_torch(
pinhole_scale.width,
pinhole.width * scale_val)
def test_pinhole_camera_scale_inplace(self):
batch_size = 2
height, width = 4, 6
fx, fy, cx, cy = 1, 2, width / 2, height / 2
tx, ty, tz = 1, 2, 3
scale_val = 2.0
intrinsics = self._create_intrinsics(batch_size, fx, fy, cx, cy)
extrinsics = self._create_extrinsics(batch_size, tx, ty, tz)
height = torch.ones(batch_size) * height
width = torch.ones(batch_size) * width
scale_factor = torch.ones(batch_size) * scale_val
pinhole = kornia.PinholeCamera(intrinsics, extrinsics, height, width)
pinhole_scale = pinhole.clone()
pinhole_scale.scale_(scale_factor)
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 0, 0],
pinhole.intrinsics[..., 0, 0] * scale_val) # fx
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 1, 1],
pinhole.intrinsics[..., 1, 1] * scale_val) # fy
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 0, 2],
pinhole.intrinsics[..., 0, 2] * scale_val) # cx
assert utils.check_equal_torch(
pinhole_scale.intrinsics[..., 1, 2],
pinhole.intrinsics[..., 1, 2] * scale_val) # cy
assert utils.check_equal_torch(
pinhole_scale.height, pinhole.height * scale_val)
assert utils.check_equal_torch(
pinhole_scale.width, pinhole.width * scale_val)
'''@pytest.mark.parametrize("batch_size", [1, 2, 5, 6])
def test_scale_pinhole(batch_size, device_type):
# generate input data
device = torch.device(device_type)
pinholes = torch.rand(batch_size, 12).to(device)
scales = torch.rand(batch_size).to(device)
pinholes_scale = kornia.scale_pinhole(pinholes, scales)
assert utils.check_equal_torch(
pinholes_scale[..., :6] / scales.unsqueeze(-1), pinholes[..., :6])
# evaluate function gradient
pinholes = utils.tensor_to_gradcheck_var(pinholes) # to var
scales = utils.tensor_to_gradcheck_var(scales) # to var
assert gradcheck(kornia.scale_pinhole, (pinholes, scales,),
raise_exception=True)
@pytest.mark.parametrize("batch_size", [1, 2, 5, 6])
def test_pinhole_matrix(batch_size, device_type):
# generate input data
image_height, image_width = 32., 32.
cx, cy = image_width / 2, image_height / 2
fx, fy = 1., 1.
rx, ry, rz = 0., 0., 0.
tx, ty, tz = 0., 0., 0.
offset_x = 10. # we will apply a 10units offset to `i` camera
eps = 1e-6
pinhole = utils.create_pinhole(
fx, fy, cx, cy, image_height, image_width, rx, ry, rx, tx, ty, tz)
pinhole = pinhole.repeat(batch_size, 1).to(torch.device(device_type))
pinhole_matrix = kornia.pinhole_matrix(pinhole)
ones = torch.ones(batch_size)
assert bool((pinhole_matrix[:, 0, 0] == fx * ones).all())
assert bool((pinhole_matrix[:, 1, 1] == fy * ones).all())
assert bool((pinhole_matrix[:, 0, 2] == cx * ones).all())
assert bool((pinhole_matrix[:, 1, 2] == cy * ones).all())
# functional
assert kornia.PinholeMatrix()(pinhole).shape == (batch_size, 4, 4)
# evaluate function gradient
pinhole = utils.tensor_to_gradcheck_var(pinhole) # to var
assert gradcheck(kornia.pinhole_matrix, (pinhole,),
raise_exception=True)
@pytest.mark.parametrize("batch_size", [1, 2, 5, 6])
def test_inverse_pinhole_matrix(batch_size, device_type):
# generate input data
image_height, image_width = 32., 32.
cx, cy = image_width / 2, image_height / 2
fx, fy = 1., 1.
rx, ry, rz = 0., 0., 0.
tx, ty, tz = 0., 0., 0.
offset_x = 10. # we will apply a 10units offset to `i` camera
eps = 1e-6
pinhole = utils.create_pinhole(
fx, fy, cx, cy, image_height, image_width, rx, ry, rx, tx, ty, tz)
pinhole = pinhole.repeat(batch_size, 1).to(torch.device(device_type))
pinhole_matrix = kornia.inverse_pinhole_matrix(pinhole)
ones = torch.ones(batch_size)
assert utils.check_equal_torch(pinhole_matrix[:, 0, 0], (1. / fx) * ones)
assert utils.check_equal_torch(pinhole_matrix[:, 1, 1], (1. / fy) * ones)
assert utils.check_equal_torch(
pinhole_matrix[:, 0, 2], (-1. * cx / fx) * ones)
assert utils.check_equal_torch(
pinhole_matrix[:, 1, 2], (-1. * cy / fx) * ones)
# functional
assert kornia.InversePinholeMatrix()(pinhole).shape == (batch_size, 4, 4)
# evaluate function gradient
pinhole = utils.tensor_to_gradcheck_var(pinhole) # to var
assert gradcheck(kornia.pinhole_matrix, (pinhole,),
raise_exception=True)
@pytest.mark.parametrize("batch_size", [1, 2, 5, 6])
def test_homography_i_H_ref(batch_size, device_type):
# generate input data
device = torch.device(device_type)
image_height, image_width = 32., 32.
cx, cy = image_width / 2, image_height / 2
fx, fy = 1., 1.
rx, ry, rz = 0., 0., 0.
tx, ty, tz = 0., 0., 0.
offset_x = 10. # we will apply a 10units offset to `i` camera
eps = 1e-6
pinhole_ref = utils.create_pinhole(
fx, fy, cx, cy, image_height, image_width, rx, ry, rx, tx, ty, tz)
pinhole_ref = pinhole_ref.repeat(batch_size, 1).to(device)
pinhole_i = utils.create_pinhole(
fx,
fy,
cx,
cy,
image_height,
image_width,
rx,
ry,
rx,
tx + offset_x,
ty,
tz)
pinhole_i = pinhole_i.repeat(batch_size, 1).to(device)
# compute homography from ref to i
i_H_ref = kornia.homography_i_H_ref(pinhole_i, pinhole_ref) + eps
i_H_ref_inv = torch.inverse(i_H_ref)
# compute homography from i to ref
ref_H_i = kornia.homography_i_H_ref(pinhole_ref, pinhole_i) + eps
assert utils.check_equal_torch(i_H_ref_inv, ref_H_i)
# evaluate function gradient
assert gradcheck(kornia.homography_i_H_ref,
(utils.tensor_to_gradcheck_var(pinhole_ref) + eps,
utils.tensor_to_gradcheck_var(pinhole_i) + eps,),
raise_exception=True)'''
class TestNormalizePixelCoordinates:
def test_small(self):
height, width = 3, 4
grid = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=False)
expected = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=True)
grid_norm = kornia.normalize_pixel_coordinates(
grid, height, width)
assert_allclose(grid_norm, expected)
def test_jit(self):
@torch.jit.script
def op_script(input: torch.Tensor, height: int,
width: int) -> torch.Tensor:
return kornia.normalize_pixel_coordinates(input, height, width)
height, width = 3, 4
grid = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=False)
actual = op_script(grid, height, width)
expected = kornia.normalize_pixel_coordinates(
grid, height, width)
assert_allclose(actual, expected)
def test_jit_trace(self):
@torch.jit.script
def op_script(input, height, width):
return kornia.normalize_pixel_coordinates(input, height, width)
# 1. Trace op
height, width = 3, 4
grid = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=False)
op_traced = torch.jit.trace(
op_script,
(grid, torch.tensor(height), torch.tensor(width),))
# 2. Generate new input
height, width = 2, 5
grid = kornia.utils.create_meshgrid(
height, width, normalized_coordinates=False).repeat(2, 1, 1, 1)
# 3. Evaluate
actual = op_traced(
grid, torch.tensor(height), torch.tensor(width))
expected = kornia.normalize_pixel_coordinates(
grid, height, width)
assert_allclose(actual, expected)
| [
"torch.eye",
"torch.testing.assert_allclose",
"torch.tensor",
"torch.ones"
] | 1.0.0 | jiangwei221/kornia | a211d4952355e440b944b1bda8eed4c2a7457c2d |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import pytest
import torch
from scipy.linalg import sqrtm as scipy_sqrtm
from torch.utils.data import Dataset
from torchmetrics.image.fid import FID, sqrtm
from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE
torch.manual_seed(42)
@pytest.mark.parametrize("matrix_size", [2, 10, 100, 500])
def test_matrix_sqrt(matrix_size):
""" test that metrix sqrt function works as expected """
def generate_cov(n):
data = torch.randn(2 * n, n)
return (data - data.mean(dim=0)).T @ (data - data.mean(dim=0))
cov1 = generate_cov(matrix_size)
cov2 = generate_cov(matrix_size)
scipy_res = scipy_sqrtm((cov1 @ cov2).numpy()).real
tm_res = sqrtm(cov1 @ cov2)
assert torch.allclose(torch.tensor(scipy_res).float(), tm_res, atol=1e-3)
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason="test requires torch-fidelity")
def test_no_train():
""" Assert that metric never leaves evaluation mode """
class MyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.metric = FID()
def forward(self, x):
return x
model = MyModel()
model.train()
assert model.training
assert not model.metric.inception.training, 'FID metric was changed to training mode which should not happen'
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason='test requires torch-fidelity')
def test_fid_pickle():
""" Assert that we can initialize the metric and pickle it"""
metric = FID()
assert metric
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
def test_fid_raises_errors_and_warnings():
""" Test that expected warnings and errors are raised """
with pytest.warns(
UserWarning,
match='Metric `FID` will save all extracted features in buffer.'
' For large datasets this may lead to large memory footprint.'
):
_ = FID()
if _TORCH_FIDELITY_AVAILABLE:
with pytest.raises(ValueError, match='Integer input to argument `feature` must be one of .*'):
_ = FID(feature=2)
else:
with pytest.raises(
ValueError,
match='FID metric requires that Torch-fidelity is installed.'
'Either install as `pip install torchmetrics[image-quality]`'
' or `pip install torch-fidelity`'
):
_ = FID()
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason='test requires torch-fidelity')
def test_fid_same_input():
""" if real and fake are update on the same data the fid score should be 0 """
metric = FID(feature=192)
for _ in range(2):
img = torch.randint(0, 255, (5, 3, 299, 299), dtype=torch.uint8)
metric.update(img, real=True)
metric.update(img, real=False)
assert torch.allclose(torch.cat(metric.real_features, dim=0), torch.cat(metric.fake_features, dim=0))
val = metric.compute()
assert torch.allclose(val, torch.zeros_like(val), atol=1e-3)
class _ImgDataset(Dataset):
def __init__(self, imgs):
self.imgs = imgs
def __getitem__(self, idx):
return self.imgs[idx]
def __len__(self):
return self.imgs.shape[0]
@pytest.mark.skipif(not torch.cuda.is_available(), reason='test is too slow without gpu')
@pytest.mark.skipif(not _TORCH_FIDELITY_AVAILABLE, reason='test requires torch-fidelity')
def test_compare_fid(tmpdir, feature=2048):
""" check that the hole pipeline give the same result as torch-fidelity """
from torch_fidelity import calculate_metrics
metric = FID(feature=feature).cuda()
# Generate some synthetic data
img1 = torch.randint(0, 180, (100, 3, 299, 299), dtype=torch.uint8)
img2 = torch.randint(100, 255, (100, 3, 299, 299), dtype=torch.uint8)
batch_size = 10
for i in range(img1.shape[0] // batch_size):
metric.update(img1[batch_size * i:batch_size * (i + 1)].cuda(), real=True)
for i in range(img2.shape[0] // batch_size):
metric.update(img2[batch_size * i:batch_size * (i + 1)].cuda(), real=False)
torch_fid = calculate_metrics(
_ImgDataset(img1), _ImgDataset(img2), fid=True, feature_layer_fid=str(feature), batch_size=batch_size
)
tm_res = metric.compute()
assert torch.allclose(tm_res.cpu(), torch.tensor([torch_fid['frechet_inception_distance']]), atol=1e-3)
| [
"torch.cat",
"torch.manual_seed",
"torch.randint",
"torch.cuda.is_available",
"torch.tensor",
"torch.zeros_like",
"torch.randn"
] | 1.3.1 | vatch123/metrics | 1841cad3839f5d1907a1bb8bb6a266de5c5333f9 |
1.4 | """ Classifier head and layer factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
def _create_pool(num_features, num_classes, pool_type='avg', use_conv=False):
flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
assert num_classes == 0 or use_conv,\
'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten_in_pool)
num_pooled_features = num_features * global_pool.feat_mult()
return global_pool, num_pooled_features
def _create_fc(num_features, num_classes, use_conv=False):
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
fc = nn.Linear(num_features, num_classes, bias=True)
return fc
def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False):
global_pool, num_pooled_features = _create_pool(num_features, num_classes, pool_type, use_conv=use_conv)
fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
return global_pool, fc
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0., use_conv=False):
super(ClassifierHead, self).__init__()
self.drop_rate = drop_rate
self.global_pool, num_pooled_features = _create_pool(in_chs, num_classes, pool_type, use_conv=use_conv)
self.fc = _create_fc(num_pooled_features, num_classes, use_conv=use_conv)
self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
def forward(self, x):
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
x = self.fc(x)
x = self.flatten(x)
return x
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.Conv2d",
"torch.nn.Flatten"
] | 1.4.0 | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 |
1.8 | from torch import nn
from torch.nn import functional as F
from model.layers import SNConv2d
class ReshapeNet(nn.Module):
"""The "initial reconstruction network" of SCSNet"""
def __init__(self, in_channels, block_size=4):
super().__init__()
self.block_size = block_size
self.conv = nn.Conv2d(in_channels, block_size ** 2, kernel_size=1)
def forward(self, x):
x = self.conv(x)
out = self._permute(x)
return out
def _permute(self, x):
B, C, H, W = x.shape
x = x.permute(0, 2, 3, 1)
x = x.view(B, H, W, self.block_size, self.block_size)
x = x.permute(0, 1, 3, 2, 4).contiguous()
out = x.view(-1, 1, H * self.block_size, W * self.block_size)
return out
class UpsampleNet(nn.Module):
def __init__(self, sampling_ratio, upsamplenet_config):
super().__init__()
kernel_size = 4
first_out_channels = int(sampling_ratio * kernel_size ** 2)
config = upsamplenet_config
self.up1 = UpResBlock(
in_channels=first_out_channels,
out_channels=config["out_channels_1"],
middle_channels=None,
upsample=True,
use_transpose_conv=config["use_transpose_conv"],
spectral_norm=config["spectral_norm"],
)
self.up2 = UpResBlock(
in_channels=config["out_channels_1"],
out_channels=config["out_channels_2"],
middle_channels=None,
upsample=True,
use_transpose_conv=config["use_transpose_conv"],
spectral_norm=config["spectral_norm"],
)
def forward(self, x):
x = self.up1(x)
out = self.up2(x) # passed to UNet
return out
class UpResBlock(nn.Module):
def __init__(
self,
in_channels,
out_channels,
middle_channels=None,
upsample=True,
use_transpose_conv=False,
norm_type="instance",
spectral_norm=True,
init_type="xavier",
):
super().__init__()
self.upsample = upsample
self.use_transpose_conv = use_transpose_conv
if middle_channels is None:
middle_channels = out_channels
if use_transpose_conv:
assert upsample is True
self.conv1 = nn.ConvTranspose2d(
in_channels,
middle_channels,
kernel_size=2,
stride=2,
padding=1,
bias=False,
)
self.conv2 = nn.ConvTranspose2d(
middle_channels,
out_channels,
kernel_size=2,
stride=2,
padding=1,
bias=False,
)
else: # if transpose conv is not used.
# The `_residual_block` method will decide whether or not it upsamples depending on `upsample == True/False`
conv = SNConv2d if spectral_norm else nn.Conv2d
self.conv1 = conv(
in_channels,
middle_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.conv2 = conv(
middle_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(middle_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
def _upsample(self, x, conv_layer):
if self.use_transpose_conv:
return conv_layer(x)
else:
return conv_layer(
F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False)
)
def _residual_block(self, x):
x = self._upsample(x, self.conv1) if self.upsample else self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
out = self.relu(x)
return out
def _shortcut(self, x):
return self._upsample(x, self.conv1) if self.upsample else self.conv1(x)
def forward(self, x):
return self._residual_block(x) + self._shortcut(x)
| [
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.functional.interpolate",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.8.1 | stephenllh/bcs-unet | be534a25e28cbe3501278d0ee6e2417b2cd737d3 |
1.8 | import os
from pathlib import Path
import time
import argparse
import warnings
import numpy as np
import cv2
import scipy.ndimage
import scipy.io
import math
import torch
import pytorch_lightning as pl
from .learner import ReconNetLearner
from utils import voltage2pixel, load_config
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--sampling_ratio",
type=float,
required=True,
help="Sampling ratio in percentage",
)
args = parser.parse_args()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
warnings.simplefilter("ignore")
def setup():
inference_config = load_config("../config/inference_config.yaml")
sr = args.sampling_ratio
checkpoint_folder = f"../logs/ReconNet_STL10_{int(sr * 100):04d}/best"
if not os.path.exists(checkpoint_folder):
run_name = os.listdir(Path(checkpoint_folder).parent)[-1]
checkpoint_path = (
f"{Path(checkpoint_folder).parent}/{run_name}/checkpoints/last.ckpt"
)
message = (
f"The checkpoint from the run '{run_name}' is selected by default. "
+ "If this is not intended, change the name of the preferred checkpoint folder to 'best'."
)
print(message)
else:
checkpoint_path = f"{checkpoint_folder}/checkpoints/last.ckpt"
learner = ReconNetLearner.load_from_checkpoint(checkpoint_path=checkpoint_path)
trainer = pl.Trainer(
gpus=1 if inference_config["gpu"] else 0,
logger=False,
default_root_dir="../",
)
return learner, trainer
class RealDataset:
def __init__(self, sampling_ratio, inference_config):
self.real_data = inference_config["real_data"]
self.phi = np.load(inference_config["measurement_matrix"])
self.c = int(sampling_ratio / 100 * 16)
def __getitem__(self, idx):
real_data = self.real_data[idx]
path = os.path.join("../inference_input", real_data["filename"])
y_input = scipy.io.loadmat(path)["y"]
y_input = y_input[
np.mod(np.arange(len(y_input)), len(y_input) // 64) < self.c
] # discard extra measurements
y_input = torch.FloatTensor(y_input).permute(1, 0)
y_input -= y_input.min()
y_input /= real_data["max"]
# Permute is necessary because during sampling, we used "channel-last" format.
# Hence, we need to permute it to become channel-first to match PyTorch "channel-first" format
y_input = y_input.view(-1, self.c)
y_input = y_input.permute(1, 0).contiguous()
y_input = y_input.view(
-1, int(math.sqrt(y_input.shape[-1])), int(math.sqrt(y_input.shape[-1]))
)
y_input = voltage2pixel(
y_input, self.phi[: self.c], real_data["min"], real_data["max"]
)
return y_input
def __len__(self):
return len(self.real_data)
def deploy(learner):
"""Real experimental data"""
inference_config = load_config("../config/inference_config.yaml")
sr = args.sampling_ratio
directory = f"../inference_images/ReconNet/SPI/{int(sr * 100):04d}"
os.makedirs(directory, exist_ok=True)
real_dataset = RealDataset(sr, inference_config)
for x in real_dataset:
prediction = learner(x.unsqueeze(0))
prediction = prediction.squeeze().squeeze().cpu().detach().numpy()
prediction = scipy.ndimage.zoom(prediction, 4, order=0, mode="nearest")
cv2.imwrite(f"{directory}/{time.time()}.png", prediction * 255)
print("Finished reconstructing SPI images.")
if __name__ == "__main__":
learner, trainer = setup()
deploy(learner)
| [
"torch.FloatTensor"
] | 1.8.1 | stephenllh/bcs-unet | be534a25e28cbe3501278d0ee6e2417b2cd737d3 |
1.3 | import os
import argparse
import time
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import pickle as pkl
parser = argparse.ArgumentParser('ODE demo')
parser.add_argument('--method', type=str, choices=['dopri5', 'adams'], default='dopri5')
parser.add_argument('--data_size', type=int, default=1000)
parser.add_argument('--batch_time', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--niters', type=int, default=2000)
parser.add_argument('--test_freq', type=int, default=20)
parser.add_argument('--viz', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--adjoint', action='store_true')
parser.add_argument('--niki', action='store_true')
args = parser.parse_args()
if args.adjoint:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
true_y0 = torch.tensor([[2., 0.]]).to(device)
t = torch.linspace(0., 25., args.data_size).to(device)
true_A = torch.tensor([[-0.1, 2.0], [-2.0, -0.1]]).to(device)
class Lambda(nn.Module):
def forward(self, t, y):
return torch.mm(y**3, true_A)
with torch.no_grad():
true_y = odeint(Lambda(), true_y0, t, method='dopri5')
def get_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
def get_noised_batch():
s = torch.from_numpy(np.random.choice(np.arange(args.data_size - args.batch_time, dtype=np.int64), args.batch_size, replace=False))
batch_y0 = true_y[s] # (M, D)
batch_y0 += torch.normal(mean=0., std=0.2, size=batch_y0.shape)
batch_t = t[:args.batch_time] # (T)
batch_y = torch.stack([true_y[s + i] for i in range(args.batch_time)], dim=0) # (T, M, D)
batch_y += torch.normal(mean=0., std=0.2, size=batch_y.shape)
return batch_y0.to(device), batch_t.to(device), batch_y.to(device)
def makedirs(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
if args.viz:
makedirs('png')
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 4), facecolor='white')
ax_traj = fig.add_subplot(131, frameon=False)
ax_phase = fig.add_subplot(132, frameon=False)
ax_vecfield = fig.add_subplot(133, frameon=False)
plt.show(block=False)
def visualize(true_y, pred_y, odefunc, itr):
if args.viz:
ax_traj.cla()
ax_traj.set_title('Trajectories')
ax_traj.set_xlabel('t')
ax_traj.set_ylabel('x,y')
ax_traj.plot(t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 0], t.cpu().numpy(), true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_traj.plot(t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 0], '--', t.cpu().numpy(), pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_traj.set_xlim(t.cpu().min(), t.cpu().max())
ax_traj.set_ylim(-2, 2)
ax_traj.legend()
ax_phase.cla()
ax_phase.set_title('Phase Portrait')
ax_phase.set_xlabel('x')
ax_phase.set_ylabel('y')
ax_phase.plot(true_y.cpu().numpy()[:, 0, 0], true_y.cpu().numpy()[:, 0, 1], 'g-')
ax_phase.plot(pred_y.cpu().numpy()[:, 0, 0], pred_y.cpu().numpy()[:, 0, 1], 'b--')
ax_phase.set_xlim(-2, 2)
ax_phase.set_ylim(-2, 2)
ax_vecfield.cla()
ax_vecfield.set_title('Learned Vector Field')
ax_vecfield.set_xlabel('x')
ax_vecfield.set_ylabel('y')
y, x = np.mgrid[-2:2:21j, -2:2:21j]
dydt = odefunc(0, torch.Tensor(np.stack([x, y], -1).reshape(21 * 21, 2)).to(device)).cpu().detach().numpy()
mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
dydt = (dydt / mag)
dydt = dydt.reshape(21, 21, 2)
ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
ax_vecfield.set_xlim(-2, 2)
ax_vecfield.set_ylim(-2, 2)
fig.tight_layout()
plt.savefig('png/{:03d}'.format(itr))
plt.draw()
plt.pause(0.001)
class ODEFunc(nn.Module):
def __init__(self):
super(ODEFunc, self).__init__()
self.net = nn.Sequential(
nn.Linear(2, 50),
nn.Tanh(),
nn.Linear(50, 2),
)
for m in self.net.modules():
if isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.1)
nn.init.constant_(m.bias, val=0)
def forward(self, t, y):
return self.net(y**3)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
if __name__ == '__main__':
ii = 0
func = ODEFunc().to(device)
optimizer = optim.RMSprop(func.parameters(), lr=1e-3)
end = time.time()
time_meter = RunningAverageMeter(0.97)
loss_meter = RunningAverageMeter(0.97)
total_int_time = 0.
total_der_time = 0.
losses = []
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
batch_y0, batch_t, batch_y = get_noised_batch() # batch_y0 is just for convenience; batch_y has the same information
a = time.time()
if not args.niki:
pred_y = odeint(func, batch_y0, batch_t).to(device)
# Niki's alternative approach
else:
parallel_predictions = odeint(func, batch_y[:-1], batch_t[:2])[1]
pred_y = torch.vstack([batch_y0[np.newaxis], parallel_predictions])
b = time.time()
total_int_time += b - a
# print('together, we get', n_pred_y.shape)
# pred_ys =
# print("times are", batch_t.shape)
# print("starting points are", batch_y0.shape)
# print("the predicted y is", pred_y.shape)
# print("the true y is", batch_y.shape)
# raise SystemExit
loss = torch.mean(torch.abs(pred_y - batch_y))
c = time.time()
loss.backward()
d = time.time()
total_der_time += d - c
optimizer.step()
time_meter.update(time.time() - end)
if not args.niki:
loss_meter.update(loss.item())
losses.append(loss.item())
else:
with torch.no_grad():
untracked_pred_y = odeint(func, batch_y0, batch_t).to(device)
untracked_loss = torch.mean(torch.abs(untracked_pred_y - batch_y))
loss_meter.update(untracked_loss.item())
losses.append(untracked_loss.item())
if itr % args.test_freq == 0:
with torch.no_grad():
pred_y = odeint(func, true_y0, t)
loss = torch.mean(torch.abs(pred_y - true_y))
print('Iter {:04d} | Total Loss {:.6f}'.format(itr, loss.item()))
visualize(true_y, pred_y, func, ii)
ii += 1
end = time.time()
print("data size", args.data_size)
print("batch time", args.batch_time)
print("Niki integration?", args.niki)
print("adjoint?", args.adjoint)
print("the average integration time was", total_int_time / args.niters)
print("the average differentiation time was", total_der_time / args.niters)
print("the loss meter says:", "cur:", loss_meter.val, "avg:", loss_meter.avg)
filename = f'data_size_{args.data_size}_batch_time_{args.batch_time}_niki_{args.niki}_adjoint_{args.adjoint}'
with open(filename, 'wb') as f:
pkl.dump(losses, f)
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.vstack",
"torch.nn.init.constant_",
"torch.normal",
"torch.abs",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.Tanh",
"torch.linspace",
"torch.mm",
"torch.no_grad"
] | 1.3.0 | nikihowe/torchdiffeq | 6d717af9d4e836294be314a9610e3baee764e31b |
1.6 | """
This script tests the approach on the BUCC 2018 shared task on finding parallel sentences:
https://comparable.limsi.fr/bucc2018/bucc2018-task.html
You can download the necessary files from there.
We have used it in our paper (https://arxiv.org/pdf/2004.09813.pdf) in Section 4.2 to evaluate different multilingual models.
This script requires that you have FAISS installed:
https://github.com/facebookresearch/faiss
"""
from sentence_transformers import SentenceTransformer, models
from collections import defaultdict
import os
import pickle
from sklearn.decomposition import PCA
import torch
from bitext_mining_utils import *
#Model we want to use for bitext mining. LaBSE achieves state-of-the-art performance
model_name = 'LaBSE'
model = SentenceTransformer(model_name)
#Intput files for BUCC2018 shared task
source_file = "bucc2018/de-en/de-en.training.de"
target_file = "bucc2018/de-en/de-en.training.en"
labels_file = "bucc2018/de-en/de-en.training.gold"
# We base the scoring on k nearest neighbors for each element
knn_neighbors = 4
# Min score for text pairs. Note, score can be larger than 1
min_threshold = 1
#Do we want to use exact search of approximate nearest neighbor search (ANN)
#Exact search: Slower, but we don't miss any parallel sentences
#ANN: Faster, but the recall will be lower
use_ann_search = True
#Number of clusters for ANN. Optimal number depends on dataset size
ann_num_clusters = 32768
#How many cluster to explorer for search. Higher number = better recall, slower
ann_num_cluster_probe = 5
#To save memory, we can use PCA to reduce the dimensionality from 768 to for example 128 dimensions
#The encoded embeddings will hence require 6 times less memory. However, we observe a small drop in performance.
use_pca = False
pca_dimensions = 128
#We store the embeddings on disc, so that they can later be loaded from disc
source_embedding_file = '{}_{}_{}.emb'.format(model_name, os.path.basename(source_file), pca_dimensions if use_pca else model.get_sentence_embedding_dimension())
target_embedding_file = '{}_{}_{}.emb'.format(model_name, os.path.basename(target_file), pca_dimensions if use_pca else model.get_sentence_embedding_dimension())
#Use PCA to reduce the dimensionality of the sentence embedding model
if use_pca:
# We use a smaller number of training sentences to learn the PCA
train_sent = []
num_train_sent = 20000
with open(source_file, encoding='utf8') as fSource, open(target_file, encoding='utf8') as fTarget:
for line_source, line_target in zip(fSource, fTarget):
id, sentence = line_source.strip().split("\t", maxsplit=1)
train_sent.append(sentence)
id, sentence = line_target.strip().split("\t", maxsplit=1)
train_sent.append(sentence)
if len(train_sent) >= num_train_sent:
break
print("Encode training embeddings for PCA")
train_matrix = model.encode(train_sent, show_progress_bar=True, convert_to_numpy=True)
pca = PCA(n_components=pca_dimensions)
pca.fit(train_matrix)
dense = models.Dense(in_features=model.get_sentence_embedding_dimension(), out_features=pca_dimensions, bias=False, activation_function=torch.nn.Identity())
dense.linear.weight = torch.nn.Parameter(torch.tensor(pca.components_))
model.add_module('dense', dense)
print("Read source file")
source = {}
with open(source_file, encoding='utf8') as fIn:
for line in fIn:
id, sentence = line.strip().split("\t", maxsplit=1)
source[id] = sentence
print("Read target file")
target = {}
with open(target_file, encoding='utf8') as fIn:
for line in fIn:
id, sentence = line.strip().split("\t", maxsplit=1)
target[id] = sentence
labels = defaultdict(lambda: defaultdict(bool))
num_total_parallel = 0
with open(labels_file) as fIn:
for line in fIn:
src_id, trg_id = line.strip().split("\t")
if src_id in source and trg_id in target:
labels[src_id][trg_id] = True
labels[trg_id][src_id] = True
num_total_parallel += 1
print("Source Sentences:", len(source))
print("Target Sentences:", len(target))
print("Num Parallel:", num_total_parallel)
### Encode source sentences
source_ids = list(source.keys())
source_sentences = [source[id] for id in source_ids]
if not os.path.exists(source_embedding_file):
print("Encode source sentences")
source_embeddings = model.encode(source_sentences, show_progress_bar=True, convert_to_numpy=True)
with open(source_embedding_file, 'wb') as fOut:
pickle.dump(source_embeddings, fOut)
else:
with open(source_embedding_file, 'rb') as fIn:
source_embeddings = pickle.load(fIn)
### Encode target sentences
target_ids = list(target.keys())
target_sentences = [target[id] for id in target_ids]
if not os.path.exists(target_embedding_file):
print("Encode target sentences")
target_embeddings = model.encode(target_sentences, show_progress_bar=True, convert_to_numpy=True)
with open(target_embedding_file, 'wb') as fOut:
pickle.dump(target_embeddings, fOut)
else:
with open(target_embedding_file, 'rb') as fIn:
target_embeddings = pickle.load(fIn)
##### Now we start to search for parallel (translated) sentences
# Normalize embeddings
x = source_embeddings
y = target_embeddings
print("Shape Source:", x.shape)
print("Shape Target:", y.shape)
x = x / np.linalg.norm(x, axis=1, keepdims=True)
y = y / np.linalg.norm(y, axis=1, keepdims=True)
# Perform kNN in both directions
x2y_sim, x2y_ind = kNN(x, y, knn_neighbors, use_ann_search, ann_num_clusters, ann_num_cluster_probe)
x2y_mean = x2y_sim.mean(axis=1)
y2x_sim, y2x_ind = kNN(y, x, knn_neighbors, use_ann_search, ann_num_clusters, ann_num_cluster_probe)
y2x_mean = y2x_sim.mean(axis=1)
# Compute forward and backward scores
margin = lambda a, b: a / b
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
indices = np.stack([np.concatenate([np.arange(x.shape[0]), bwd_best]), np.concatenate([fwd_best, np.arange(y.shape[0])])], axis=1)
scores = np.concatenate([fwd_scores.max(axis=1), bwd_scores.max(axis=1)])
seen_src, seen_trg = set(), set()
#Extact list of parallel sentences
bitext_list = []
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
src_ind = int(src_ind)
trg_ind = int(trg_ind)
if scores[i] < min_threshold:
break
if src_ind not in seen_src and trg_ind not in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
bitext_list.append([scores[i], source_ids[src_ind], target_ids[trg_ind]])
# Measure Performance by computing the threshold
# that leads to the best F1 score performance
bitext_list = sorted(bitext_list, key=lambda x: x[0], reverse=True)
n_extract = n_correct = 0
threshold = 0
best_f1 = best_recall = best_precision = 0
average_precision = 0
for idx in range(len(bitext_list)):
score, id1, id2 = bitext_list[idx]
n_extract += 1
if labels[id1][id2] or labels[id2][id1]:
n_correct += 1
precision = n_correct / n_extract
recall = n_correct / num_total_parallel
f1 = 2 * precision * recall / (precision + recall)
average_precision += precision
if f1 > best_f1:
best_f1 = f1
best_precision = precision
best_recall = recall
threshold = (bitext_list[idx][0] + bitext_list[min(idx + 1, len(bitext_list)-1)][0]) / 2
print("Best Threshold:", threshold)
print("Recall:", best_recall)
print("Precision:", best_precision)
print("F1:", best_f1)
| [
"torch.nn.Identity",
"torch.tensor"
] | 1.6.0 | danielperezr88/sentence-transformers | 56a7990c56c484e7948cf6400b54f27114bb267c |
1.4 | # -*- coding: utf-8 -*
import torch
import torch.nn as nn
from videoanalyst.model.backbone.backbone_base import (TRACK_BACKBONES,
VOS_BACKBONES)
from videoanalyst.model.common_opr.common_block import conv_bn_relu
from videoanalyst.model.module_base import ModuleBase
@VOS_BACKBONES.register
@TRACK_BACKBONES.register
class TinyConv(ModuleBase
): # 将 TinyConv 先后注册到 TRACK_BACKBONES 和 VOS_BACKBONES 中
r"""
TinyNet
Customized, extremely pruned ConvNet
Hyper-parameters
----------------
pretrain_model_path: string
Path to pretrained backbone parameter file,
Parameter to be loaded in _update_params_
"""
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self):
super(TinyConv, self).__init__()
self.conv1 = conv_bn_relu(3, 32, stride=2, kszie=3, pad=0)
self.pool1 = nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True)
self.conv2a = conv_bn_relu(32, 64, stride=1, kszie=1, pad=0)
self.conv2b = conv_bn_relu(64, 64, stride=2, kszie=7, pad=0, groups=64)
self.conv3a = conv_bn_relu(64, 64, stride=1, kszie=3, pad=0)
self.conv3b = conv_bn_relu(64,
64,
stride=1,
kszie=1,
pad=0,
has_relu=False)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.as_tensor(X.rvs(m.weight.numel()),
dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2a(x)
x = self.conv2b(x)
x = self.conv3a(x)
x = self.conv3b(x)
return x
| [
"torch.nn.init.constant_",
"torch.no_grad",
"torch.nn.MaxPool2d"
] | 1.4.0 | ShiAngWang/video_analyst | de4f86363cc408695428b423e8d6e346aa35149b |
1.1 | import torch
from MerCBO.graphGP.kernels.diffusionkernel import DiffusionKernel
from MerCBO.graphGP.models.gp_regression import GPRegression
from MerCBO.graphGP.inference.inference import Inference
from MerCBO.graphGP.sampler.tool_partition import group_input
from MerCBO.acquisition.acquisition_functions import expected_improvement
def acquisition_expectation(x, inference_samples, partition_samples, n_vertices, acquisition_func=expected_improvement,
reference=None):
"""
Using posterior samples, the acquisition function is also averaged over posterior samples
:param x: 1d or 2d tensor
:param inference_samples: inference method for each posterior sample
:param partition_samples:
:param n_vertices:
:param acquisition_func:
:param reference:
:return:
"""
if x.dim() == 1:
x = x.unsqueeze(0)
acquisition_sample_list = []
for s in range(len(inference_samples)):
hyper = inference_samples[s].model.param_to_vec()
grouped_x = group_input(x, sorted_partition=partition_samples[s], n_vertices=n_vertices)
pred_dist = inference_samples[s].predict(grouped_x, hyper=hyper, verbose=False)
pred_mean_sample = pred_dist[0].detach()
pred_var_sample = pred_dist[1].detach()
acquisition_sample_list.append(acquisition_func(pred_mean_sample[:, 0], pred_var_sample[:, 0],
reference=reference))
return torch.stack(acquisition_sample_list, 1).sum(1, keepdim=True)
def inference_sampling(input_data, output_data, n_vertices, hyper_samples, log_beta_samples, partition_samples,
freq_samples, basis_samples):
"""
:param input_data:
:param output_data:
:param n_vertices:
:param hyper_samples:
:param log_beta_samples:
:param partition_samples:
:param freq_samples:
:param basis_samples:
:return:
"""
inference_samples = []
for s in range(len(hyper_samples)):
grouped_log_beta = torch.stack([torch.sum(log_beta_samples[s][subset]) for subset in partition_samples[s]])
kernel = DiffusionKernel(grouped_log_beta=grouped_log_beta,
fourier_freq_list=freq_samples[s], fourier_basis_list=basis_samples[s])
model = GPRegression(kernel=kernel)
model.vec_to_param(hyper_samples[s])
grouped_input_data = group_input(input_data=input_data, sorted_partition=partition_samples[s],
n_vertices=n_vertices)
inference = Inference((grouped_input_data, output_data), model=model)
inference_samples.append(inference)
return inference_samples
def prediction_statistic(x, inference_samples, partition_samples, n_vertices):
if x.dim() == 1:
x = x.unsqueeze(0)
mean_sample_list = []
std_sample_list = []
var_sample_list = []
for s in range(len(inference_samples)):
grouped_x = group_input(input_data=x, sorted_partition=partition_samples[s], n_vertices=n_vertices)
# print(grouped_x)
hyper = inference_samples[s].model.param_to_vec()
# pred_dist = inference_samples[s].predict(grouped_x)
pred_dist = inference_samples[s].predict(grouped_x, hyper=hyper)
pred_mean_sample = pred_dist[0]
pred_var_sample = pred_dist[1]
pred_std_sample = pred_var_sample ** 0.5
mean_sample_list.append(pred_mean_sample.data)
std_sample_list.append(pred_std_sample.data)
var_sample_list.append(pred_var_sample.data)
return torch.cat(mean_sample_list, 1).mean(1, keepdim=True),\
torch.cat(std_sample_list, 1).mean(1, keepdim=True),\
torch.cat(var_sample_list, 1).mean(1, keepdim=True)
| [
"torch.cat",
"torch.stack",
"torch.sum"
] | 1.1.0 | aryandeshwal/MerCBO | 526dfbc05bb7be3a77a30d8943233707f1636f14 |
1.10 | from ..base_module import RegressionModel, PairedModel
from .base_model import CnnModel
import torch.nn as nn
class RegressionCnnModel(RegressionModel):
def __init__(self, cfg, train_df = None, val_df = None, test_df = None):
super().__init__(cfg, CnnModel(cfg), train_df, val_df, test_df)
def forward(self, input, attn_mask):
output = self.model(input, attn_mask).squeeze()
return output
def training_step(self, batch, batch_idx):
y = batch.pop('target')
output = self(batch['input_ids'], batch['attention_mask'])
loss = self.criterion(y, output)
self.log('train_loss', loss)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
y = batch.pop('target')
output1 = self(batch['more_toxic_ids'], batch['more_toxic_mask'])
output2 = self(batch['less_toxic_ids'], batch['less_toxic_mask'])
loss = nn.MarginRankingLoss(margin = self.cfg['margin'])(output1, output2, y)
acc = (output1 > output2).float().mean()
return {'loss': loss, 'acc': acc}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
output = self(batch['input_ids'], batch['attention_mask']).squeeze().cpu()
return output
class PairedCnnModel(PairedModel):
def __init__(self, cfg, train_df = None, val_df = None, test_df = None):
super().__init__(cfg, CnnModel(cfg), train_df, val_df, test_df)
def forward(self, input, attn_mask):
output = self.model(input, attn_mask).squeeze()
return output
def training_step(self, batch, batch_idx):
y = batch.pop('target')
output1 = self(batch['more_toxic_ids'], batch['more_toxic_mask'])
output2 = self(batch['less_toxic_ids'], batch['less_toxic_mask'])
loss = nn.MarginRankingLoss(margin = self.cfg['margin'])(output1, output2, y)
acc = (output1 > output2).float().mean()
self.log('train_loss', loss)
self.log('train_acc', acc)
return {'loss': loss, 'acc': acc}
def validation_step(self, batch, batch_idx):
y = batch.pop('target')
output1 = self(batch['more_toxic_ids'], batch['more_toxic_mask'])
output2 = self(batch['less_toxic_ids'], batch['less_toxic_mask'])
loss = nn.MarginRankingLoss(margin = self.cfg['margin'])(output1, output2, y)
acc = (output1 > output2).float().mean()
return {'loss': loss, 'acc': acc}
def predict_step(self, batch, batch_idx, dataloader_idx=0):
output = self(batch['input_ids'], batch['attention_mask']).squeeze().cpu()
return output | [
"torch.nn.MarginRankingLoss"
] | 1.10.0 | alexvishnevskiy/jigsaw | 7fc2c4cd3700a54e9c5cbc02870bf4057b0a9fe3 |
1.4 | # Copyright (c) 2021, Soohwan Kim. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from kospeech.models.jasper.model import Jasper
batch_size = 3
sequence_length = 14321
dimension = 80
cuda = torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
inputs = torch.rand(batch_size, sequence_length, dimension).to(device) # BxTxD
input_lengths = torch.LongTensor([14321, 14300, 13000]).to(device)
print("Jasper 10x3 Model Test..")
model = Jasper(num_classes=10, version='10x5').to(device)
output = model.recognize(inputs, input_lengths)
print(output)
print(output.size())
print("Jasper 5x3 Model Test..")
model = Jasper(num_classes=10, version='5x3').to(device)
output = model.recognize(inputs, input_lengths)
print(output)
print(output.size())
| [
"torch.device",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.rand"
] | 1.4.0 | daiyaanarfeen/kospeech | 5aff5c7647e5cceceddf7b22c991777fc3792400 |
1.10 | import random
from pathlib import Path
import cv2
import numpy as np
import torch
from vp_suite.base.base_dataset import VPDataset, VPData
import vp_suite.constants as constants
from vp_suite.utils.utils import set_from_kwarg
class KITTIRawDataset(VPDataset):
r"""
Dataset class for the "raw data" regime of the "KITTI Vision Benchmark Suite", as described in
"Vision meets Robotics: The KITTI Dataset" by Geiger et al. (http://www.cvlibs.net/publications/Geiger2013IJRR.pdf).
Each sequence shows a short clip of 'driving around the mid-size city of Karlsruhe, in rural areas and on highways'.
"""
NAME = "KITTI raw"
REFERENCE = "http://www.cvlibs.net/datasets/kitti/raw_data.php"
IS_DOWNLOADABLE = "With Registered Account"
DEFAULT_DATA_DIR = constants.DATA_PATH / "kitti_raw"
VALID_SPLITS = ["train", "val", "test"]
MIN_SEQ_LEN = 994 #: Minimum number of frames across all sequences (6349 in longest).
ACTION_SIZE = 0
DATASET_FRAME_SHAPE = (375, 1242, 3)
FPS = 10 #: Frames per Second.
AVAILABLE_CAMERAS = [f"image_{i:02d}" for i in range(4)] #: Available cameras: [`greyscale_left`, `greyscale_right`, `color_left`, `color_right`].
camera = "image_02" #: Chosen camera, can be set to any of the `AVAILABLE_CAMERAS`.
trainval_to_test_ratio = 0.8 #: The ratio of files that will be training/validation data (rest will be test data).
train_to_val_ratio = 0.9
trainval_test_seed = 1234 #: The random seed used to separate training/validation and testing data.
def __init__(self, split, **dataset_kwargs):
super(KITTIRawDataset, self).__init__(split, **dataset_kwargs)
self.NON_CONFIG_VARS.extend(["sequences", "sequences_with_frame_index",
"AVAILABLE_CAMERAS"])
# set attributes
set_from_kwarg(self, dataset_kwargs, "camera")
set_from_kwarg(self, dataset_kwargs, "trainval_to_test_ratio")
set_from_kwarg(self, dataset_kwargs, "train_to_val_ratio")
set_from_kwarg(self, dataset_kwargs, "trainval_test_seed")
set_from_kwarg(self, dataset_kwargs, "train_val_seed")
# get video filepaths
dd = Path(self.data_dir)
sequence_dirs = [sub for d in dd.iterdir() for sub in d.iterdir() if dd.is_dir() and sub.is_dir()]
if len(sequence_dirs) < 3:
raise ValueError(f"Dataset {self.NAME}: found less than 3 sequences "
f"-> can't split dataset -> can't use it")
# slice accordingly
slice_idx = max(1, int(len(sequence_dirs) * self.trainval_to_test_ratio))
random.Random(self.trainval_test_seed).shuffle(sequence_dirs)
if self.split == "test":
sequence_dirs = sequence_dirs[slice_idx:]
else:
sequence_dirs = sequence_dirs[:slice_idx]
slice_idx = max(1, int(len(sequence_dirs) * self.train_to_val_ratio))
random.Random(self.train_val_seed).shuffle(sequence_dirs)
if self.split == "train":
sequence_dirs = sequence_dirs[:slice_idx]
else:
sequence_dirs = sequence_dirs[slice_idx:]
# retrieve sequence lengths and store
self.sequences = []
for sequence_dir in sorted(sequence_dirs):
sequence_len = len(list(sequence_dir.rglob(f"{self.camera}/data/*.png")))
self.sequences.append((sequence_dir, sequence_len))
self.sequences_with_frame_index = [] # mock value, must not be used for iteration till sequence length is set
def _set_seq_len(self):
# Determine per video which frame indices are valid
for sequence_path, frame_count in self.sequences:
valid_start_idx = range(0, frame_count - self.seq_len + 1,
self.seq_len + self.seq_step - 1)
for idx in valid_start_idx:
self.sequences_with_frame_index.append((sequence_path, idx))
def __getitem__(self, i) -> VPData:
sequence_path, start_idx = self.sequences_with_frame_index[i]
all_img_paths = sorted(list(sequence_path.rglob(f"{self.camera}/data/*.png")))
seq_img_paths = all_img_paths[start_idx:start_idx+self.seq_len:self.seq_step] # t items of [h, w, c]
seq_imgs = [cv2.cvtColor(cv2.imread(str(fp.resolve())), cv2.COLOR_BGR2RGB) for fp in seq_img_paths]
vid = np.stack(seq_imgs, axis=0) # [t, *self.DATASET_FRAME_SHAPE]
vid = self.preprocess(vid) # [t, *self.img_shape]
actions = torch.zeros((self.total_frames, 1)) # [t, a], actions should be disregarded in training logic
data = {"frames": vid, "actions": actions, "origin": f"{sequence_path}, start frame: {start_idx}"}
return data
def __len__(self):
return len(self.sequences_with_frame_index)
@classmethod
def download_and_prepare_dataset(cls):
d_path = cls.DEFAULT_DATA_DIR
d_path.mkdir(parents=True, exist_ok=True)
# download and extract sequences if we can't find them in our folder yet
try:
_ = next(d_path.rglob(f"**/*.png"))
print(f"Found image data in {str(d_path.resolve())} -> Won't download {cls.NAME}")
except StopIteration:
from vp_suite.utils.utils import run_shell_command
import vp_suite.constants as constants
prep_script = (constants.PKG_RESOURCES / 'get_dataset_kitti_raw.sh').resolve()
run_shell_command(f"{prep_script} {cls.DEFAULT_DATA_DIR}")
| [
"torch.zeros"
] | 1.10.1 | angelvillar96/vp-suite | 3e7c7d852862bad09a771d754fc56a71abf0a25f |
1.7 | import copy
import logging
import math
import numpy as np
import PIL
import scipy
import torch
from .preprocess import Preprocess
from .. import utils
LOG = logging.getLogger(__name__)
class RotateBy90(Preprocess):
def __init__(self, angle_perturbation=0.0, fixed_angle=None):
super().__init__()
self.angle_perturbation = angle_perturbation
self.fixed_angle = fixed_angle
def __call__(self, image, anns, meta):
meta = copy.deepcopy(meta)
anns = copy.deepcopy(anns)
w, h = image.size
if self.fixed_angle is not None:
angle = self.fixed_angle
else:
rnd1 = float(torch.rand(1).item())
angle = int(rnd1 * 4.0) * 90.0
sym_rnd2 = (float(torch.rand(1).item()) - 0.5) * 2.0
angle += sym_rnd2 * self.angle_perturbation
LOG.debug('rotation angle = %f', angle)
assert meta['rotation']['angle'] == 0.0
meta['rotation']['angle'] = angle
meta['rotation']['width'] = w
meta['rotation']['height'] = h
# rotate image
if angle != 0.0:
im_np = np.asarray(image)
if im_np.shape[0] == im_np.shape[1] and angle == 90:
im_np = np.swapaxes(im_np, 0, 1)
im_np = np.flip(im_np, axis=0)
elif im_np.shape[0] == im_np.shape[1] and angle == 270:
im_np = np.swapaxes(im_np, 0, 1)
im_np = np.flip(im_np, axis=1)
elif im_np.shape[0] == im_np.shape[1] and angle == 180:
im_np = np.flip(im_np, axis=0)
im_np = np.flip(im_np, axis=1)
else:
im_np = scipy.ndimage.rotate(im_np, angle=angle, cval=127, reshape=False)
image = PIL.Image.fromarray(im_np)
LOG.debug('rotated by = %f degrees', angle)
# rotate keypoints
cangle = math.cos(angle / 180.0 * math.pi)
sangle = math.sin(angle / 180.0 * math.pi)
for ann in anns:
xy = ann['keypoints'][:, :2]
x_old = xy[:, 0].copy() - (w - 1) / 2
y_old = xy[:, 1].copy() - (h - 1) / 2
xy[:, 0] = (w - 1) / 2 + cangle * x_old + sangle * y_old
xy[:, 1] = (h - 1) / 2 - sangle * x_old + cangle * y_old
ann['bbox'] = utils.rotate_box(ann['bbox'], w - 1, h - 1, angle)
LOG.debug('meta before: %s', meta)
meta['valid_area'] = utils.rotate_box(meta['valid_area'], w - 1, h - 1, angle)
# fix valid area to be inside original image dimensions
original_valid_area = meta['valid_area'].copy()
meta['valid_area'][0] = np.clip(meta['valid_area'][0], 0, w)
meta['valid_area'][1] = np.clip(meta['valid_area'][1], 0, h)
new_rb_corner = original_valid_area[:2] + original_valid_area[2:]
new_rb_corner[0] = np.clip(new_rb_corner[0], 0, w)
new_rb_corner[1] = np.clip(new_rb_corner[1], 0, h)
meta['valid_area'][2:] = new_rb_corner - meta['valid_area'][:2]
LOG.debug('meta after: %s', meta)
return image, anns, meta
| [
"torch.rand"
] | 1.7.1 | adujardin/openpifpaf | 4fa79162f5529f5b0de72e2312aab54d410bee3f |
1.6 | import os, time
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, auc, precision_recall_curve
from skimage.measure import label, regionprops
from tqdm import tqdm
from visualize import *
from model import load_decoder_arch, load_encoder_arch, positionalencoding2d, activation
from utils import *
from custom_datasets import *
from custom_models import *
OUT_DIR = './viz/'
gamma = 0.0
theta = torch.nn.Sigmoid()
log_theta = torch.nn.LogSigmoid()
def train_meta_epoch(c, epoch, loader, encoder, decoders, optimizer, pool_layers, N):
P = c.condition_vec
L = c.pool_layers
decoders = [decoder.train() for decoder in decoders]
adjust_learning_rate(c, optimizer, epoch)
I = len(loader)
iterator = iter(loader)
for sub_epoch in range(c.sub_epochs):
train_loss = 0.0
train_count = 0
for i in range(I):
# warm-up learning rate
lr = warmup_learning_rate(c, epoch, i+sub_epoch*I, I*c.sub_epochs, optimizer)
# sample batch
try:
image, _, _ = next(iterator)
except StopIteration:
iterator = iter(loader)
image, _, _ = next(iterator)
# encoder prediction
image = image.to(c.device) # single scale
with torch.no_grad():
_ = encoder(image)
# train decoder
e_list = list()
c_list = list()
for l, layer in enumerate(pool_layers):
if 'vit' in c.enc_arch:
e = activation[layer].transpose(1, 2)[...,1:]
e_hw = int(np.sqrt(e.size(2)))
e = e.reshape(-1, e.size(1), e_hw, e_hw) # BxCxHxW
else:
e = activation[layer].detach() # BxCxHxW
#
B, C, H, W = e.size()
S = H*W
E = B*S
#
p = positionalencoding2d(P, H, W).to(c.device).unsqueeze(0).repeat(B, 1, 1, 1)
c_r = p.reshape(B, P, S).transpose(1, 2).reshape(E, P) # BHWxP
e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C) # BHWxC
perm = torch.randperm(E).to(c.device) # BHW
decoder = decoders[l]
#
FIB = E//N # number of fiber batches
assert FIB > 0, 'MAKE SURE WE HAVE ENOUGH FIBERS, otherwise decrease N or batch-size!'
for f in range(FIB): # per-fiber processing
idx = torch.arange(f*N, (f+1)*N)
c_p = c_r[perm[idx]] # NxP
e_p = e_r[perm[idx]] # NxC
if 'cflow' in c.dec_arch:
z, log_jac_det = decoder(e_p, [c_p,])
else:
z, log_jac_det = decoder(e_p)
#
decoder_log_prob = get_logp(C, z, log_jac_det)
log_prob = decoder_log_prob / C # likelihood per dim
loss = -log_theta(log_prob)
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
train_loss += t2np(loss.sum())
train_count += len(loss)
#
mean_train_loss = train_loss / train_count
if c.verbose:
print('Epoch: {:d}.{:d} \t train loss: {:.4f}, lr={:.6f}'.format(epoch, sub_epoch, mean_train_loss, lr))
#
def test_meta_epoch(c, epoch, loader, encoder, decoders, pool_layers, N):
# test
if c.verbose:
print('\nCompute loss and scores on test set:')
#
P = c.condition_vec
decoders = [decoder.eval() for decoder in decoders]
height = list()
width = list()
image_list = list()
gt_label_list = list()
gt_mask_list = list()
test_dist = [list() for layer in pool_layers]
test_loss = 0.0
test_count = 0
start = time.time()
with torch.no_grad():
for i, (image, label, mask) in enumerate(tqdm(loader, disable=c.hide_tqdm_bar)):
# save
if c.viz:
image_list.extend(t2np(image))
gt_label_list.extend(t2np(label))
gt_mask_list.extend(t2np(mask))
# data
image = image.to(c.device) # single scale
_ = encoder(image) # BxCxHxW
# test decoder
e_list = list()
for l, layer in enumerate(pool_layers):
if 'vit' in c.enc_arch:
e = activation[layer].transpose(1, 2)[...,1:]
e_hw = int(np.sqrt(e.size(2)))
e = e.reshape(-1, e.size(1), e_hw, e_hw) # BxCxHxW
else:
e = activation[layer] # BxCxHxW
#
B, C, H, W = e.size()
S = H*W
E = B*S
#
if i == 0: # get stats
height.append(H)
width.append(W)
#
p = positionalencoding2d(P, H, W).to(c.device).unsqueeze(0).repeat(B, 1, 1, 1)
c_r = p.reshape(B, P, S).transpose(1, 2).reshape(E, P) # BHWxP
e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C) # BHWxC
#
m = F.interpolate(mask, size=(H, W), mode='nearest')
m_r = m.reshape(B, 1, S).transpose(1, 2).reshape(E, 1) # BHWx1
#
decoder = decoders[l]
FIB = E//N + int(E%N > 0) # number of fiber batches
for f in range(FIB):
if f < (FIB-1):
idx = torch.arange(f*N, (f+1)*N)
else:
idx = torch.arange(f*N, E)
#
c_p = c_r[idx] # NxP
e_p = e_r[idx] # NxC
m_p = m_r[idx] > 0.5 # Nx1
#
if 'cflow' in c.dec_arch:
z, log_jac_det = decoder(e_p, [c_p,])
else:
z, log_jac_det = decoder(e_p)
#
decoder_log_prob = get_logp(C, z, log_jac_det)
log_prob = decoder_log_prob / C # likelihood per dim
loss = -log_theta(log_prob)
test_loss += t2np(loss.sum())
test_count += len(loss)
test_dist[l] = test_dist[l] + log_prob.detach().cpu().tolist()
#
fps = len(loader.dataset) / (time.time() - start)
mean_test_loss = test_loss / test_count
if c.verbose:
print('Epoch: {:d} \t test_loss: {:.4f} and {:.2f} fps'.format(epoch, mean_test_loss, fps))
#
return height, width, image_list, test_dist, gt_label_list, gt_mask_list
def test_meta_fps(c, epoch, loader, encoder, decoders, pool_layers, N):
# test
if c.verbose:
print('\nCompute loss and scores on test set:')
#
P = c.condition_vec
decoders = [decoder.eval() for decoder in decoders]
height = list()
width = list()
image_list = list()
gt_label_list = list()
gt_mask_list = list()
test_dist = [list() for layer in pool_layers]
test_loss = 0.0
test_count = 0
A = len(loader.dataset)
with torch.no_grad():
# warm-up
for i, (image, _, _) in enumerate(tqdm(loader, disable=c.hide_tqdm_bar)):
# data
image = image.to(c.device) # single scale
_ = encoder(image) # BxCxHxW
# measure encoder only
torch.cuda.synchronize()
start = time.time()
for i, (image, _, _) in enumerate(tqdm(loader, disable=c.hide_tqdm_bar)):
# data
image = image.to(c.device) # single scale
_ = encoder(image) # BxCxHxW
# measure encoder + decoder
torch.cuda.synchronize()
time_enc = time.time() - start
start = time.time()
for i, (image, _, _) in enumerate(tqdm(loader, disable = c.hide_tqdm_bar)):
# data
image = image.to(c.device) # single scale
_ = encoder(image) # BxCxHxW
# test decoder
e_list = list()
for l, layer in enumerate(pool_layers):
if 'vit' in c.enc_arch:
e = activation[layer].transpose(1, 2)[...,1:]
e_hw = int(np.sqrt(e.size(2)))
e = e.reshape(-1, e.size(1), e_hw, e_hw) # BxCxHxW
else:
e = activation[layer] # BxCxHxW
#
B, C, H, W = e.size()
S = H*W
E = B*S
#
if i == 0: # get stats
height.append(H)
width.append(W)
#
p = positionalencoding2d(P, H, W).to(c.device).unsqueeze(0).repeat(B, 1, 1, 1)
c_r = p.reshape(B, P, S).transpose(1, 2).reshape(E, P) # BHWxP
e_r = e.reshape(B, C, S).transpose(1, 2).reshape(E, C) # BHWxC
#
decoder = decoders[l]
FIB = E//N + int(E%N > 0) # number of fiber batches
for f in range(FIB):
if f < (FIB-1):
idx = torch.arange(f*N, (f+1)*N)
else:
idx = torch.arange(f*N, E)
#
c_p = c_r[idx] # NxP
e_p = e_r[idx] # NxC
#
if 'cflow' in c.dec_arch:
z, log_jac_det = decoder(e_p, [c_p,])
else:
z, log_jac_det = decoder(e_p)
#
torch.cuda.synchronize()
time_all = time.time() - start
fps_enc = A / time_enc
fps_all = A / time_all
print('Encoder/All {:.2f}/{:.2f} fps'.format(fps_enc, fps_all))
#
return height, width, image_list, test_dist, gt_label_list, gt_mask_list
def train(c):
run_date = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
L = c.pool_layers # number of pooled layers
print('Number of pool layers =', L)
encoder, pool_layers, pool_dims = load_encoder_arch(c, L)
encoder = encoder.to(c.device).eval()
#print(encoder)
# NF decoder
decoders = [load_decoder_arch(c, pool_dim) for pool_dim in pool_dims]
decoders = [decoder.to(c.device) for decoder in decoders]
params = list(decoders[0].parameters())
for l in range(1, L):
params += list(decoders[l].parameters())
# optimizer
optimizer = torch.optim.Adam(params, lr=c.lr)
# data
kwargs = {'num_workers': c.workers, 'pin_memory': True} if c.use_cuda else {}
# task data
if c.dataset == 'mvtec':
train_dataset = MVTecDataset(c, is_train=True)
test_dataset = MVTecDataset(c, is_train=False)
elif c.dataset == 'stc':
train_dataset = StcDataset(c, is_train=True)
test_dataset = StcDataset(c, is_train=False)
else:
raise NotImplementedError('{} is not supported dataset!'.format(c.dataset))
#
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=c.batch_size, shuffle=True, drop_last=True, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=c.batch_size, shuffle=False, drop_last=False, **kwargs)
N = 256 # hyperparameter that increases batch size for the decoder model by N
print('train/test loader length', len(train_loader.dataset), len(test_loader.dataset))
print('train/test loader batches', len(train_loader), len(test_loader))
# stats
det_roc_obs = Score_Observer('DET_AUROC')
seg_roc_obs = Score_Observer('SEG_AUROC')
seg_pro_obs = Score_Observer('SEG_AUPRO')
if c.action_type == 'norm-test':
c.meta_epochs = 1
for epoch in range(c.meta_epochs):
if c.action_type == 'norm-test' and c.checkpoint:
load_weights(encoder, decoders, c.checkpoint)
elif c.action_type == 'norm-train':
print('Train meta epoch: {}'.format(epoch))
train_meta_epoch(c, epoch, train_loader, encoder, decoders, optimizer, pool_layers, N)
else:
raise NotImplementedError('{} is not supported action type!'.format(c.action_type))
#height, width, test_image_list, test_dist, gt_label_list, gt_mask_list = test_meta_fps(
# c, epoch, test_loader, encoder, decoders, pool_layers, N)
height, width, test_image_list, test_dist, gt_label_list, gt_mask_list = test_meta_epoch(
c, epoch, test_loader, encoder, decoders, pool_layers, N)
# PxEHW
print('Heights/Widths', height, width)
test_map = [list() for p in pool_layers]
for l, p in enumerate(pool_layers):
test_norm = torch.tensor(test_dist[l], dtype=torch.double) # EHWx1
test_norm-= torch.max(test_norm) # normalize likelihoods to (-Inf:0] by subtracting a constant
test_prob = torch.exp(test_norm) # convert to probs in range [0:1]
test_mask = test_prob.reshape(-1, height[l], width[l])
test_mask = test_prob.reshape(-1, height[l], width[l])
# upsample
test_map[l] = F.interpolate(test_mask.unsqueeze(1),
size=c.crp_size, mode='bilinear', align_corners=True).squeeze().numpy()
# score aggregation
score_map = np.zeros_like(test_map[0])
for l, p in enumerate(pool_layers):
score_map += test_map[l]
score_mask = score_map
# invert probs to anomaly scores
super_mask = score_mask.max() - score_mask
# calculate detection AUROC
score_label = np.max(super_mask, axis=(1, 2))
gt_label = np.asarray(gt_label_list, dtype=np.bool)
det_roc_auc = roc_auc_score(gt_label, score_label)
_ = det_roc_obs.update(100.0*det_roc_auc, epoch)
# calculate segmentation AUROC
gt_mask = np.squeeze(np.asarray(gt_mask_list, dtype=np.bool), axis=1)
seg_roc_auc = roc_auc_score(gt_mask.flatten(), super_mask.flatten())
save_best_seg_weights = seg_roc_obs.update(100.0*seg_roc_auc, epoch)
if save_best_seg_weights and c.action_type != 'norm-test':
save_weights(encoder, decoders, c.model, run_date) # avoid unnecessary saves
# calculate segmentation AUPRO
# from https://github.com/YoungGod/DFR:
if c.pro: # and (epoch % 4 == 0): # AUPRO is expensive to compute
max_step = 1000
expect_fpr = 0.3 # default 30%
max_th = super_mask.max()
min_th = super_mask.min()
delta = (max_th - min_th) / max_step
ious_mean = []
ious_std = []
pros_mean = []
pros_std = []
threds = []
fprs = []
binary_score_maps = np.zeros_like(super_mask, dtype=np.bool)
for step in range(max_step):
thred = max_th - step * delta
# segmentation
binary_score_maps[super_mask <= thred] = 0
binary_score_maps[super_mask > thred] = 1
pro = [] # per region overlap
iou = [] # per image iou
# pro: find each connected gt region, compute the overlapped pixels between the gt region and predicted region
# iou: for each image, compute the ratio, i.e. intersection/union between the gt and predicted binary map
for i in range(len(binary_score_maps)): # for i th image
# pro (per region level)
label_map = label(gt_mask[i], connectivity=2)
props = regionprops(label_map)
for prop in props:
x_min, y_min, x_max, y_max = prop.bbox # find the bounding box of an anomaly region
cropped_pred_label = binary_score_maps[i][x_min:x_max, y_min:y_max]
# cropped_mask = gt_mask[i][x_min:x_max, y_min:y_max] # bug!
cropped_mask = prop.filled_image # corrected!
intersection = np.logical_and(cropped_pred_label, cropped_mask).astype(np.float32).sum()
pro.append(intersection / prop.area)
# iou (per image level)
intersection = np.logical_and(binary_score_maps[i], gt_mask[i]).astype(np.float32).sum()
union = np.logical_or(binary_score_maps[i], gt_mask[i]).astype(np.float32).sum()
if gt_mask[i].any() > 0: # when the gt have no anomaly pixels, skip it
iou.append(intersection / union)
# against steps and average metrics on the testing data
ious_mean.append(np.array(iou).mean())
#print("per image mean iou:", np.array(iou).mean())
ious_std.append(np.array(iou).std())
pros_mean.append(np.array(pro).mean())
pros_std.append(np.array(pro).std())
# fpr for pro-auc
gt_masks_neg = ~gt_mask
fpr = np.logical_and(gt_masks_neg, binary_score_maps).sum() / gt_masks_neg.sum()
fprs.append(fpr)
threds.append(thred)
# as array
threds = np.array(threds)
pros_mean = np.array(pros_mean)
pros_std = np.array(pros_std)
fprs = np.array(fprs)
ious_mean = np.array(ious_mean)
ious_std = np.array(ious_std)
# best per image iou
best_miou = ious_mean.max()
#print(f"Best IOU: {best_miou:.4f}")
# default 30% fpr vs pro, pro_auc
idx = fprs <= expect_fpr # find the indexs of fprs that is less than expect_fpr (default 0.3)
fprs_selected = fprs[idx]
fprs_selected = rescale(fprs_selected) # rescale fpr [0,0.3] -> [0, 1]
pros_mean_selected = pros_mean[idx]
seg_pro_auc = auc(fprs_selected, pros_mean_selected)
_ = seg_pro_obs.update(100.0*seg_pro_auc, epoch)
#
save_results(det_roc_obs, seg_roc_obs, seg_pro_obs, c.model, c.class_name, run_date)
# export visualuzations
if c.viz:
precision, recall, thresholds = precision_recall_curve(gt_label, score_label)
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
det_threshold = thresholds[np.argmax(f1)]
print('Optimal DET Threshold: {:.2f}'.format(det_threshold))
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), super_mask.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
seg_threshold = thresholds[np.argmax(f1)]
print('Optimal SEG Threshold: {:.2f}'.format(seg_threshold))
export_groundtruth(c, test_image_list, gt_mask)
export_scores(c, test_image_list, super_mask, seg_threshold)
export_test_images(c, test_image_list, gt_mask, super_mask, seg_threshold)
export_hist(c, gt_mask, super_mask, seg_threshold)
| [
"torch.randperm",
"torch.exp",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.max",
"torch.cuda.synchronize",
"torch.nn.Sigmoid",
"torch.arange",
"torch.no_grad",
"torch.optim.Adam",
"torch.nn.functional.interpolate",
"torch.nn.LogSigmoid"
] | 1.6.0 | Msmhasani/cflow-ad | bc8bcf796723ba885587a72a6fbbf45ecb4b7bf4 |
1.0 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.21/radon_vary_intercept_nofloor_chr.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'J' in data, 'variable not found in data: key=J'
assert 'N' in data, 'variable not found in data: key=N'
assert 'county' in data, 'variable not found in data: key=county'
assert 'u' in data, 'variable not found in data: key=u'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
J = data["J"]
N = data["N"]
county = data["county"]
u = data["u"]
y = data["y"]
def init_params(data):
params = {}
# assign init values for parameters
params["sigma_a"] = pyro.sample("sigma_a", dist.Uniform(0., 100.))
params["sigma_y"] = pyro.sample("sigma_y", dist.Uniform(0., 100.))
return params
def model(data, params):
# initialize data
J = data["J"]
N = data["N"]
county = data["county"].long() - 1
u = data["u"]
y = data["y"]
# init parameters
sigma_a = params["sigma_a"]
sigma_y = params["sigma_y"]
# initialize transformed parameters
mu_a = pyro.sample("mu_a", dist.Normal(0., 1))
with pyro.plate("J", J):
eta = pyro.sample("eta", dist.Normal(0., 1))
b = pyro.sample("b", dist.Normal(0., 1))
a = mu_a + sigma_a * eta
with pyro.plate("data", N):
y_hat = a[county] + u * b * 0.1
y = pyro.sample("y", dist.Normal(y_hat, sigma_y), obs=y)
| [
"torch.zeros",
"torch.ones"
] | 1.0.1 | jpchen/pyro-models | b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b |
1.0 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.13/y_x.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'x' in data, 'variable not found in data: key=x'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
N = data["N"]
x = data["x"]
y = data["y"]
def init_params(data):
params = {}
params["beta"] = init_vector("beta", dims=(2)) # vector
params["sigma"] = pyro.sample("sigma", dist.Uniform(0., 1000.))
return params
def model(data, params):
# initialize data
N = data["N"]
x = data["x"]
y = data["y"]
# init parameters
beta = params["beta"]
sigma = params["sigma"]
with pyro.plate("data", N):
pyro.sample('obs', dist.Normal(beta[0] + beta[1] * x, sigma), obs=y)
| [
"torch.zeros",
"torch.ones"
] | 1.0.1 | jpchen/pyro-models | b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b |
1.0 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.12/radon_no_pool.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'J' in data, 'variable not found in data: key=J'
assert 'county' in data, 'variable not found in data: key=county'
assert 'x' in data, 'variable not found in data: key=x'
assert 'y' in data, 'variable not found in data: key=y'
# initialize data
N = data["N"]
J = data["J"]
county = data["county"]
x = data["x"]
y = data["y"]
def init_params(data):
params = {}
return params
def model(data, params):
# initialize data
N = data["N"]
J = data["J"]
county = data["county"].long() - 1
x = data["x"]
y = data["y"]
# model block
mu_a = pyro.sample("mu_a", dist.Normal(0., 1.))
sigma_a = pyro.sample("sigma_a", dist.HalfCauchy(2.5))
with pyro.plate("J", J):
a = pyro.sample("a", dist.Normal(mu_a, sigma_a))
beta = pyro.sample("beta", dist.Normal(0., 1.))
sigma_y = pyro.sample("sigma_y", dist.HalfCauchy(2.5))
with pyro.plate("data", N):
y_hat = beta * x + a[county]
y = pyro.sample('y', dist.Normal(y_hat, sigma_y), obs=y)
| [
"torch.zeros",
"torch.ones"
] | 1.0.1 | jpchen/pyro-models | b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b |
1.0 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# model file: example-models/ARM/Ch.10/ideo_reparam.stan
import torch
import pyro
import pyro.distributions as dist
def init_vector(name, dims=None):
return pyro.sample(name, dist.Normal(torch.zeros(dims), 0.2 * torch.ones(dims)).to_event(1))
def validate_data_def(data):
assert 'N' in data, 'variable not found in data: key=N'
assert 'party' in data, 'variable not found in data: key=party'
assert 'score1' in data, 'variable not found in data: key=score1'
assert 'z1' in data, 'variable not found in data: key=z1'
assert 'z2' in data, 'variable not found in data: key=z2'
# initialize data
N = data["N"]
party = data["party"]
score1 = data["score1"]
z1 = data["z1"]
z2 = data["z2"]
def init_params(data):
params = {}
# assign init values for parameters
params["beta"] = init_vector("beta", dims=(4)) # vector
params["sigma"] = pyro.sample("sigma", dist.Uniform(0., 100.))
return params
def model(data, params):
# initialize data
N = data["N"]
party = data["party"]
score1 = data["score1"]
z1 = data["z1"]
z2 = data["z2"]
# init parameters
beta = params["beta"]
sigma = params["sigma"]
# initialize transformed parameters
# model block
with pyro.plate("data", N):
score1 = pyro.sample('score1', dist.Normal(beta[0] + beta[1] * party + beta[2] * z1 + beta[3] * z2, sigma), obs=score1)
| [
"torch.zeros",
"torch.ones"
] | 1.0.1 | jpchen/pyro-models | b9e6ae6271e6cd622fbb4d34d67c450d5a954c9b |
1.5 | """
test:
- running some numbers through two versions of sru, checking they come out the sam
- save sru in older version, and loading in new version
"""
import torch
import sru
import pytest
import sys
EPSILON = 1e-6
ARTIFACT_DIR = 'test/regression/artifacts'
@pytest.mark.parametrize(
"sru_prev_version",
["2.3.5"]
)
def test_regression(sru_prev_version):
"""
IMPORTANT:
You need to run:
test/regression/build_artifact.sh [SRU VERSION]
and add the resulting artifact in test/regression/artifacts into github,
for each sru_prev_version you want to test
"""
torch.manual_seed(2) # so the model is initialized differently than first stage
artifact_path = f'{ARTIFACT_DIR}/{sru_prev_version}.pt'
artifact_dict = torch.load(artifact_path)
assert artifact_dict['sru.__version__'] == sru_prev_version
model = sru.SRU(**artifact_dict['sru_kwargs']).eval()
output_artifact = artifact_dict['outputs']
model.load_state_dict(artifact_dict['model_state'])
with torch.no_grad():
output_current = model(artifact_dict['inputs'])
assert len(output_artifact) == len(output_current) == 2
max_diff0 = (output_artifact[0] - output_current[0]).abs().max().item()
max_diff1 = (output_artifact[1] - output_current[1]).abs().max().item()
assert max_diff0 <= EPSILON
assert max_diff1 <= EPSILON
| [
"torch.manual_seed",
"torch.no_grad",
"torch.load"
] | 1.5.1 | visionscaper/sru | 6e0038ec675be0a37d870865f7f8fa22f1ad2254 |
1.9 | import torch
from PIL import Image
import io
def get_yolov5():
# local best.pt
model = torch.hub.load('./yolov5', 'custom', path='./model/best.pt', source='local') # local repo
model.conf = 0.5
return model
def get_image_from_bytes(binary_image, max_size=1024):
input_image = Image.open(io.BytesIO(binary_image)).convert("RGB")
width, height = input_image.size
resize_factor = min(max_size / width, max_size / height)
resized_image = input_image.resize(
(
int(input_image.width * resize_factor),
int(input_image.height * resize_factor),
)
)
return resized_image
| [
"torch.hub.load"
] | 1.9.1 | DanielChuDC/yolov5-fastapi | 27eef7d52cf72cda0c14856a745a8798d51d9383 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import pytest
import torch
import flash
from flash.core.utilities.imports import _IMAGE_AVAILABLE, _IMAGE_TESTING, _TORCHVISION_AVAILABLE, _VISSL_AVAILABLE
from flash.image import ImageClassificationData, ImageEmbedder
from tests.helpers.task_tester import TaskTester
if _TORCHVISION_AVAILABLE:
from torchvision.datasets import FakeData
else:
FakeData = object
class TestImageEmbedder(TaskTester):
task = ImageEmbedder
task_kwargs = dict(
backbone="resnet18",
)
is_testing = _IMAGE_TESTING
is_available = _IMAGE_AVAILABLE
# TODO: Resolve JIT script issues
scriptable = False
@property
def example_forward_input(self):
return torch.rand(1, 3, 64, 64)
def check_forward_output(self, output: Any):
assert isinstance(output, torch.Tensor)
assert output.shape == torch.Size([1, 512])
@pytest.mark.skipif(torch.cuda.device_count() > 1, reason="VISSL integration doesn't support multi-GPU")
@pytest.mark.skipif(not (_IMAGE_AVAILABLE and _VISSL_AVAILABLE), reason="vissl not installed.")
@pytest.mark.parametrize(
"backbone, training_strategy, head, pretraining_transform, embedding_size",
[
("resnet18", "simclr", "simclr_head", "simclr_transform", 512),
("resnet18", "barlow_twins", "barlow_twins_head", "barlow_twins_transform", 512),
("resnet18", "swav", "swav_head", "swav_transform", 512),
("vit_small_patch16_224", "simclr", "simclr_head", "simclr_transform", 384),
("vit_small_patch16_224", "barlow_twins", "barlow_twins_head", "barlow_twins_transform", 384),
],
)
def test_vissl_training(backbone, training_strategy, head, pretraining_transform, embedding_size):
datamodule = ImageClassificationData.from_datasets(
train_dataset=FakeData(16),
predict_dataset=FakeData(8),
batch_size=4,
)
embedder = ImageEmbedder(
backbone=backbone,
training_strategy=training_strategy,
head=head,
pretraining_transform=pretraining_transform,
)
trainer = flash.Trainer(
max_steps=3,
max_epochs=1,
gpus=torch.cuda.device_count(),
)
trainer.fit(embedder, datamodule=datamodule)
predictions = trainer.predict(embedder, datamodule=datamodule)
for prediction_batch in predictions:
for prediction in prediction_batch:
assert prediction.size(0) == embedding_size
@pytest.mark.skipif(not (_IMAGE_AVAILABLE and _VISSL_AVAILABLE), reason="vissl not installed.")
@pytest.mark.parametrize(
"backbone, training_strategy, head, pretraining_transform, expected_exception",
[
("resnet18", "simclr", "simclr_head", None, ValueError),
("resnet18", "simclr", None, "simclr_transform", KeyError),
],
)
def test_vissl_training_with_wrong_arguments(
backbone, training_strategy, head, pretraining_transform, expected_exception
):
with pytest.raises(expected_exception):
ImageEmbedder(
backbone=backbone,
training_strategy=training_strategy,
head=head,
pretraining_transform=pretraining_transform,
)
@pytest.mark.skipif(not _IMAGE_TESTING, reason="torch vision not installed.")
@pytest.mark.parametrize(
"backbone, embedding_size",
[
("resnet18", 512),
("vit_small_patch16_224", 384),
],
)
def test_only_embedding(backbone, embedding_size):
datamodule = ImageClassificationData.from_datasets(
predict_dataset=FakeData(8),
batch_size=4,
transform_kwargs=dict(image_size=(224, 224)),
)
embedder = ImageEmbedder(backbone=backbone)
trainer = flash.Trainer()
predictions = trainer.predict(embedder, datamodule=datamodule)
for prediction_batch in predictions:
for prediction in prediction_batch:
assert prediction.size(0) == embedding_size
@pytest.mark.skipif(not _IMAGE_TESTING, reason="torch vision not installed.")
def test_not_implemented_steps():
embedder = ImageEmbedder(backbone="resnet18")
with pytest.raises(NotImplementedError):
embedder.training_step([], 0)
with pytest.raises(NotImplementedError):
embedder.validation_step([], 0)
with pytest.raises(NotImplementedError):
embedder.test_step([], 0)
| [
"torch.Size",
"torch.rand",
"torch.cuda.device_count"
] | 1.7.1 | ar90n/lightning-flash | 61e1a2d3b72f8fbbffe6ace14fb5b5bb35c5f131 |
1.0 | import torch
from torch import nn
from visdialch.utils import DynamicRNN
class DiscriminativeDecoder(nn.Module):
def __init__(self, config, vocabulary):
super().__init__()
self.config = config
self.word_embed = nn.Embedding(
len(vocabulary),
config["word_embedding_size"],
padding_idx=vocabulary.PAD_INDEX,
)
self.option_rnn = nn.LSTM(
config["word_embedding_size"],
config["lstm_hidden_size"],
config["lstm_num_layers"],
batch_first=True,
dropout=config["dropout"],
)
# Options are variable length padded sequences, use DynamicRNN.
self.option_rnn = DynamicRNN(self.option_rnn)
def forward(self, encoder_output, batch):
"""Given `encoder_output` + candidate option sequences, predict a score
for each option sequence.
Parameters
----------
encoder_output: torch.Tensor
Output from the encoder through its forward pass.
(batch_size, num_rounds, lstm_hidden_size)
"""
options = batch["opt"]
batch_size, num_rounds, num_options, max_sequence_length = (
options.size()
)
options = options.view(
batch_size * num_rounds * num_options, max_sequence_length
)
options_length = batch["opt_len"]
options_length = options_length.view(
batch_size * num_rounds * num_options
)
# Pick options with non-zero length (relevant for test split).
nonzero_options_length_indices = options_length.nonzero().squeeze()
nonzero_options_length = options_length[nonzero_options_length_indices]
nonzero_options = options[nonzero_options_length_indices]
# shape: (batch_size * num_rounds * num_options, max_sequence_length,
# word_embedding_size)
# FOR TEST SPLIT, shape: (batch_size * 1, num_options,
# max_sequence_length, word_embedding_size)
nonzero_options_embed = self.word_embed(nonzero_options)
# shape: (batch_size * num_rounds * num_options, lstm_hidden_size)
# FOR TEST SPLIT, shape: (batch_size * 1, num_options,
# lstm_hidden_size)
_, (nonzero_options_embed, _) = self.option_rnn(
nonzero_options_embed, nonzero_options_length
)
options_embed = torch.zeros(
batch_size * num_rounds * num_options,
nonzero_options_embed.size(-1),
device=nonzero_options_embed.device,
)
options_embed[nonzero_options_length_indices] = nonzero_options_embed
# Repeat encoder output for every option.
# shape: (batch_size, num_rounds, num_options, max_sequence_length)
encoder_output = encoder_output.unsqueeze(2).repeat(
1, 1, num_options, 1
)
# Shape now same as `options`, can calculate dot product similarity.
# shape: (batch_size * num_rounds * num_options, lstm_hidden_state)
encoder_output = encoder_output.view(
batch_size * num_rounds * num_options,
self.config["lstm_hidden_size"],
)
# shape: (batch_size * num_rounds * num_options)
scores = torch.sum(options_embed * encoder_output, 1)
# shape: (batch_size, num_rounds, num_options)
scores = scores.view(batch_size, num_rounds, num_options)
return scores
| [
"torch.nn.LSTM",
"torch.sum"
] | 1.0.0 | shubhamagarwal92/visdial-challenge-starter-pytorch | 474ceb338b5f5dbed8236fc59212a4debcb40576 |
1.6 | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import copy
import gzip
import json
import lzma
import os
import re
import shutil
import sys
import tarfile
import tempfile
import time
import urllib
from contextlib import closing, contextmanager
from dataclasses import dataclass
from functools import partial
from hashlib import sha256
from pathlib import Path
from typing import Dict, Optional, Union
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import numpy as np
import posixpath
import pyarrow as pa
import requests
from tqdm.auto import tqdm
from .. import __version__, config
from .filelock import FileLock
from .logging import WARNING, get_logger
logger = get_logger(__name__) # pylint: disable=invalid-name
INCOMPLETE_SUFFIX = ".incomplete"
def init_hf_modules(hf_modules_cache: Optional[Union[Path, str]] = None) -> str:
"""
Add hf_modules_cache to the python path.
By default hf_modules_cache='~/.cache/huggingface/modules'.
It can also be set with the environment variable HF_MODULES_CACHE.
This is used to add modules such as `datasets_modules`
"""
hf_modules_cache = hf_modules_cache if hf_modules_cache is not None else config.HF_MODULES_CACHE
hf_modules_cache = str(hf_modules_cache)
if hf_modules_cache not in sys.path:
sys.path.append(hf_modules_cache)
os.makedirs(hf_modules_cache, exist_ok=True)
if not os.path.exists(os.path.join(hf_modules_cache, "__init__.py")):
with open(os.path.join(hf_modules_cache, "__init__.py"), "w"):
pass
return hf_modules_cache
@contextmanager
def temp_seed(seed: int, set_pytorch=False, set_tensorflow=False):
"""Temporarily set the random seed. This works for python numpy, pytorch and tensorflow."""
np_state = np.random.get_state()
np.random.seed(seed)
if set_pytorch and config.TORCH_AVAILABLE:
import torch
torch_state = torch.random.get_rng_state()
torch.random.manual_seed(seed)
if torch.cuda.is_available():
torch_cuda_states = torch.cuda.get_rng_state_all()
torch.cuda.manual_seed_all(seed)
if set_tensorflow and config.TF_AVAILABLE:
import tensorflow as tf
from tensorflow.python import context as tfpycontext
tf_state = tf.random.get_global_generator()
temp_gen = tf.random.Generator.from_seed(seed)
tf.random.set_global_generator(temp_gen)
if not tf.executing_eagerly():
raise ValueError("Setting random seed for TensorFlow is only available in eager mode")
tf_context = tfpycontext.context() # eager mode context
tf_seed = tf_context._seed
tf_rng_initialized = hasattr(tf_context, "_rng")
if tf_rng_initialized:
tf_rng = tf_context._rng
tf_context._set_global_seed(seed)
try:
yield
finally:
np.random.set_state(np_state)
if set_pytorch and config.TORCH_AVAILABLE:
torch.random.set_rng_state(torch_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state_all(torch_cuda_states)
if set_tensorflow and config.TF_AVAILABLE:
tf.random.set_global_generator(tf_state)
tf_context._seed = tf_seed
if tf_rng_initialized:
tf_context._rng = tf_rng
else:
delattr(tf_context, "_rng")
def is_remote_url(url_or_filename: str) -> bool:
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3", "gs", "hdfs", "ftp")
def is_local_path(url_or_filename: str) -> bool:
# On unix the scheme of a local path is empty (for both absolute and relative),
# while on windows the scheme is the drive name (ex: "c") for absolute paths.
# for details on the windows behavior, see https://bugs.python.org/issue42215
return urlparse(url_or_filename).scheme == "" or os.path.ismount(urlparse(url_or_filename).scheme + ":/")
def is_relative_path(url_or_filename: str) -> bool:
return urlparse(url_or_filename).scheme == "" and not os.path.isabs(url_or_filename)
def hf_bucket_url(identifier: str, filename: str, use_cdn=False, dataset=True) -> str:
if dataset:
endpoint = config.CLOUDFRONT_DATASETS_DISTRIB_PREFIX if use_cdn else config.S3_DATASETS_BUCKET_PREFIX
else:
endpoint = config.CLOUDFRONT_METRICS_DISTRIB_PREFIX if use_cdn else config.S3_METRICS_BUCKET_PREFIX
return "/".join((endpoint, identifier, filename))
def head_hf_s3(
identifier: str, filename: str, use_cdn=False, dataset=True, max_retries=0
) -> Union[requests.Response, Exception]:
try:
return http_head(
hf_bucket_url(identifier=identifier, filename=filename, use_cdn=use_cdn, dataset=dataset),
max_retries=max_retries,
)
except Exception as e:
return e
def hf_github_url(path: str, name: str, dataset=True, version: Optional[str] = None) -> str:
from .. import SCRIPTS_VERSION
version = version or os.getenv("HF_SCRIPTS_VERSION", SCRIPTS_VERSION)
if dataset:
return config.REPO_DATASETS_URL.format(version=version, path=path, name=name)
else:
return config.REPO_METRICS_URL.format(version=version, path=path, name=name)
def hf_hub_url(path: str, name: str, version: Optional[str] = None) -> str:
version = version or config.HUB_DEFAULT_VERSION
return config.HUB_DATASETS_URL.format(path=path, name=name, version=version)
def url_or_path_join(base_name: str, *pathnames: str) -> str:
if is_remote_url(base_name):
return posixpath.join(base_name, *pathnames)
else:
return Path(base_name).joinpath(*pathnames).as_posix()
def url_or_path_parent(url_or_path: str) -> str:
if is_remote_url(url_or_path):
return url_or_path[: url_or_path.rindex("/")]
else:
return os.path.dirname(url_or_path)
def hash_url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name
so that TF 2.0 can identify it as a HDF5 file
(see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380)
"""
url_bytes = url.encode("utf-8")
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode("utf-8")
etag_hash = sha256(etag_bytes)
filename += "." + etag_hash.hexdigest()
if url.endswith(".py"):
filename += ".py"
return filename
@dataclass
class DownloadConfig:
"""Configuration for our cached path manager
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
max_retries: the number of times to retry an HTTP request if it fails. Defaults to 1.
use_auth_token (Optional ``Union[str, bool]``): Optional string or boolean to use as Bearer token
for remote files on the Datasets Hub. If True, will get token from ~/.huggingface.
"""
cache_dir: Optional[Union[str, Path]] = None
force_download: bool = False
resume_download: bool = False
local_files_only: bool = False
proxies: Optional[Dict] = None
user_agent: Optional[str] = None
extract_compressed_file: bool = False
force_extract: bool = False
use_etag: bool = True
num_proc: Optional[int] = None
max_retries: int = 1
use_auth_token: Optional[Union[str, bool]] = None
def copy(self) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(v) for k, v in self.__dict__.items()})
def cached_path(
url_or_filename,
download_config=None,
**download_kwargs,
) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
ValueError: if it couldn't parse the url or filename correctly
requests.exceptions.ConnectionError: in case of internet connection issue
"""
if download_config is None:
download_config = DownloadConfig(**download_kwargs)
cache_dir = download_config.cache_dir or os.path.join(config.HF_DATASETS_CACHE, "downloads")
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=download_config.force_download,
proxies=download_config.proxies,
resume_download=download_config.resume_download,
user_agent=download_config.user_agent,
local_files_only=download_config.local_files_only,
use_etag=download_config.use_etag,
max_retries=download_config.max_retries,
use_auth_token=download_config.use_auth_token,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif is_local_path(url_or_filename):
# File, but it doesn't exist.
raise FileNotFoundError("Local file {} doesn't exist".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if download_config.extract_compressed_file and output_path is not None:
if (
not is_zipfile(output_path)
and not tarfile.is_tarfile(output_path)
and not is_gzip(output_path)
and not is_xz(output_path)
and not is_rarfile(output_path)
):
return output_path
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
abs_output_path = os.path.abspath(output_path)
output_path_extracted = os.path.join(cache_dir, "extracted", hash_url_to_filename(abs_output_path))
if (
os.path.isdir(output_path_extracted)
and os.listdir(output_path_extracted)
and not download_config.force_extract
) or (os.path.isfile(output_path_extracted) and not download_config.force_extract):
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted, exist_ok=True)
if tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
elif is_gzip(output_path):
os.rmdir(output_path_extracted)
with gzip.open(output_path, "rb") as gzip_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(gzip_file, extracted_file)
elif is_zipfile(output_path): # put zip file to the last, b/c it is possible wrongly detected as zip
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif is_xz(output_path):
os.rmdir(output_path_extracted)
with lzma.open(output_path) as compressed_file:
with open(output_path_extracted, "wb") as extracted_file:
shutil.copyfileobj(compressed_file, extracted_file)
elif is_rarfile(output_path):
if config.RARFILE_AVAILABLE:
import rarfile
rf = rarfile.RarFile(output_path)
rf.extractall(output_path_extracted)
rf.close()
else:
raise EnvironmentError("Please pip install rarfile")
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path
def get_datasets_user_agent(user_agent: Optional[Union[str, dict]] = None) -> str:
ua = "datasets/{}; python/{}".format(__version__, config.PY_VERSION)
ua += "; pyarrow/{}".format(pa.__version__)
if config.TORCH_AVAILABLE:
ua += "; torch/{}".format(config.TORCH_VERSION)
if config.TF_AVAILABLE:
ua += "; tensorflow/{}".format(config.TF_VERSION)
if config.BEAM_AVAILABLE:
ua += "; apache_beam/{}".format(config.BEAM_VERSION)
if isinstance(user_agent, dict):
ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items())
elif isinstance(user_agent, str):
ua += "; " + user_agent
return ua
def get_authentication_headers_for_url(url: str, use_auth_token: Optional[Union[str, bool]] = None) -> dict:
"""Handle the HF authentication"""
headers = {}
if url.startswith("https://huggingface.co/"):
token = None
if isinstance(use_auth_token, str):
token = use_auth_token
elif bool(use_auth_token):
from huggingface_hub import hf_api
token = hf_api.HfFolder.get_token()
if token:
headers["authorization"] = "Bearer {}".format(token)
return headers
def _request_with_retry(
verb: str, url: str, max_retries: int = 0, base_wait_time: float = 0.5, max_wait_time: float = 2, **params
) -> requests.Response:
"""Wrapper around requests to retry in case it fails with a ConnectTimeout, with exponential backoff
Args:
verb (str): HTTP verb, such as 'GET' or 'HEAD'
url (str): The URL of the ressource to fetch
max_retries (int): Maximum number of retries, defaults to 0 (no retries)
base_wait_time (float): Duration (in seconds) to wait before retrying the first time. Wait time between
retries then grows exponentially, capped by max_wait_time.
max_wait_time (float): Maximum amount of time between two retries, in seconds
**params: Params to pass to `requests.request`
"""
tries, success = 0, False
while not success:
tries += 1
try:
response = requests.request(verb.upper(), url, **params)
success = True
except requests.exceptions.ConnectTimeout as err:
if tries > max_retries:
raise err
else:
logger.info(f"{verb} request to {url} timed out, retrying... [{tries/max_retries}]")
sleep_time = max(max_wait_time, base_wait_time * 2 ** (tries - 1)) # Exponential backoff
time.sleep(sleep_time)
return response
def ftp_head(url, timeout=2.0):
try:
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
r.read(1)
except Exception:
return False
return True
def ftp_get(url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, timeout=2.0):
try:
logger.info(f"Getting through FTP {url} into {temp_file.name}")
with closing(urllib.request.urlopen(url, timeout=timeout)) as r:
shutil.copyfileobj(r, temp_file)
except urllib.error.URLError as e:
raise ConnectionError(e)
def http_get(url, temp_file, proxies=None, resume_size=0, headers=None, cookies=None, max_retries=0):
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
if resume_size > 0:
headers["Range"] = "bytes=%d-" % (resume_size,)
response = _request_with_retry(
verb="GET", url=url, stream=True, proxies=proxies, headers=headers, cookies=cookies, max_retries=max_retries
)
if response.status_code == 416: # Range not satisfiable
return
content_length = response.headers.get("Content-Length")
total = resume_size + int(content_length) if content_length is not None else None
not_verbose = bool(logger.getEffectiveLevel() > WARNING)
progress = tqdm(
unit="B",
unit_scale=True,
total=total,
initial=resume_size,
desc="Downloading",
disable=not_verbose,
)
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def http_head(
url, proxies=None, headers=None, cookies=None, allow_redirects=True, timeout=10, max_retries=0
) -> requests.Response:
headers = copy.deepcopy(headers) or {}
headers["user-agent"] = get_datasets_user_agent(user_agent=headers.get("user-agent"))
response = _request_with_retry(
verb="HEAD",
url=url,
proxies=proxies,
headers=headers,
cookies=cookies,
allow_redirects=allow_redirects,
timeout=timeout,
max_retries=max_retries,
)
return response
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
use_etag=True,
max_retries=0,
use_auth_token=None,
) -> str:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
Local path (string)
Raises:
FileNotFoundError: in case of non-recoverable file
(non-existent or no cache on disk)
ConnectionError: in case of unreachable url
and no cache on disk
"""
if cache_dir is None:
cache_dir = config.HF_DATASETS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
original_url = url # Some parameters may be added
connected = False
response = None
cookies = None
etag = None
# Try a first time to file the file on the local file system without eTag (None)
# if we don't ask for 'force_download' then we spare a request
filename = hash_url_to_filename(original_url, etag=None)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download and not use_etag:
return cache_path
# Prepare headers for authentication
headers = get_authentication_headers_for_url(url, use_auth_token=use_auth_token)
if user_agent is not None:
headers["user-agent"] = user_agent
# We don't have the file locally or we need an eTag
if not local_files_only:
if url.startswith("ftp://"):
connected = ftp_head(url)
try:
response = http_head(
url,
allow_redirects=True,
proxies=proxies,
timeout=etag_timeout,
max_retries=max_retries,
headers=headers,
)
if response.status_code == 200: # ok
etag = response.headers.get("ETag") if use_etag else None
for k, v in response.cookies.items():
# In some edge cases, we need to get a confirmation token
if k.startswith("download_warning") and "drive.google.com" in url:
url += "&confirm=" + v
cookies = response.cookies
connected = True
# In some edge cases, head request returns 400 but the connection is actually ok
elif (
(response.status_code == 400 and "firebasestorage.googleapis.com" in url)
or (response.status_code == 405 and "drive.google.com" in url)
or (
response.status_code == 403
and re.match(r"^https?://github.com/.*?/.*?/releases/download/.*?/.*?$", url)
)
):
connected = True
logger.info("Couldn't get ETag version for url {}".format(url))
except (EnvironmentError, requests.exceptions.Timeout):
# not connected
pass
# connected == False = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if not connected:
if os.path.exists(cache_path):
return cache_path
if local_files_only:
raise FileNotFoundError(
f"Cannot find the requested files in the cached path at {cache_path} and outgoing traffic has been"
" disabled. To enable file online look-ups, set 'local_files_only' to False."
)
elif response is not None and response.status_code == 404:
raise FileNotFoundError("Couldn't find file at {}".format(url))
raise ConnectionError("Couldn't reach {}".format(url))
# Try a second time
filename = hash_url_to_filename(original_url, etag)
cache_path = os.path.join(cache_dir, filename)
if os.path.exists(cache_path) and not force_download:
return cache_path
# From now on, connected is True.
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("ftp://"):
ftp_get(url, temp_file, proxies=proxies, resume_size=resume_size, headers=headers, cookies=cookies)
else:
http_get(
url,
temp_file,
proxies=proxies,
resume_size=resume_size,
headers=headers,
cookies=cookies,
max_retries=max_retries,
)
logger.info("storing %s in cache at %s", url, cache_path)
shutil.move(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w", encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
return cache_path
def is_gzip(path: str) -> bool:
"""from https://stackoverflow.com/a/60634210"""
with gzip.open(path, "r") as fh:
try:
fh.read(1)
return True
except OSError:
return False
def is_xz(path: str) -> bool:
"""https://tukaani.org/xz/xz-file-format-1.0.4.txt"""
with open(path, "rb") as f:
try:
header_magic_bytes = f.read(6)
except OSError:
return False
if header_magic_bytes == b"\xfd7zXZ\x00":
return True
else:
return False
def is_rarfile(path: str) -> bool:
"""https://github.com/markokr/rarfile/blob/master/rarfile.py"""
RAR_ID = b"Rar!\x1a\x07\x00"
RAR5_ID = b"Rar!\x1a\x07\x01\x00"
with open(path, "rb", 1024) as fd:
buf = fd.read(len(RAR5_ID))
if buf.startswith(RAR_ID) or buf.startswith(RAR5_ID):
return True
else:
return False
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
| [
"torch.cuda.get_rng_state_all",
"torch.cuda.manual_seed_all",
"torch.cuda.set_rng_state_all",
"torch.random.get_rng_state",
"torch.random.manual_seed",
"torch.cuda.is_available",
"torch.random.set_rng_state"
] | 1.6.0 | source-data/datasets | 987df6b4e9e20fc0c92bc9df48137d170756fd7b |
1.3 | import mmcv
import numpy as np
import torch
from os import path as osp
from torch.nn import functional as F
from basicsr.data.transforms import mod_crop, totensor
def read_img_seq(path, require_mod_crop=False, scale=1):
"""Read a sequence of images from a given folder path.
Args:
path (list[str] | str): List of image paths or image folder path.
require_mod_crop (bool): Require mod crop for each image.
Default: False.
scale (int): Scale factor for mod_crop. Default: 1.
Returns:
Tensor: size (t, c, h, w), RGB, [0, 1].
"""
if isinstance(path, list):
img_paths = path
else:
img_paths = sorted([osp.join(path, v) for v in mmcv.scandir(path)])
imgs = [mmcv.imread(v).astype(np.float32) / 255. for v in img_paths]
if require_mod_crop:
imgs = [mod_crop(img, scale) for img in imgs]
imgs = totensor(imgs, bgr2rgb=True, float32=True)
imgs = torch.stack(imgs, dim=0)
return imgs
def generate_frame_indices(crt_idx,
max_frame_num,
num_frames,
padding='reflection'):
"""Generate an index list for reading `num_frames` frames from a sequence
of images.
Args:
crt_idx (int): Current center index.
max_frame_num (int): Max number of the sequence of images (from 1).
num_frames (int): Reading num_frames frames.
padding (str): Padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'
Examples: current_idx = 0, num_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
Returns:
list[int]: A list of indices.
"""
assert num_frames % 2 == 1, 'num_frames should be an odd number.'
assert padding in ('replicate', 'reflection', 'reflection_circle',
'circle'), f'Wrong padding mode: {padding}.'
max_frame_num = max_frame_num - 1 # start from 0
num_pad = num_frames // 2
indices = []
for i in range(crt_idx - num_pad, crt_idx + num_pad + 1):
if i < 0:
if padding == 'replicate':
pad_idx = 0
elif padding == 'reflection':
pad_idx = -i
elif padding == 'reflection_circle':
pad_idx = crt_idx + num_pad - i
else:
pad_idx = num_frames + i
elif i > max_frame_num:
if padding == 'replicate':
pad_idx = max_frame_num
elif padding == 'reflection':
pad_idx = max_frame_num * 2 - i
elif padding == 'reflection_circle':
pad_idx = (crt_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_frames
else:
pad_idx = i
indices.append(pad_idx)
return indices
def paired_paths_from_lmdb(folders, keys):
"""Generate paired paths from lmdb files.
Contents of lmdb. Taking the `lq.lmdb` for example, the file structure is:
lq.lmdb
├── data.mdb
├── lock.mdb
├── meta_info.txt
The data.mdb and lock.mdb are standard lmdb files and you can refer to
https://lmdb.readthedocs.io/en/release/ for more details.
The meta_info.txt is a specified txt file to record the meta information
of our datasets. It will be automatically created when preparing
datasets by our provided dataset tools.
Each line in the txt file records
1)image name (with extension),
2)image shape,
3)compression level, separated by a white space.
Example: `baboon.png (120,125,3) 1`
We use the image name without extension as the lmdb key.
Note that we use the same key for the corresponding lq and gt images.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
Note that this key is different from lmdb keys.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
if not (input_folder.endswith('.lmdb') and gt_folder.endswith('.lmdb')):
raise ValueError(
f'{input_key} folder and {gt_key} folder should both in lmdb '
f'formats. But received {input_key}: {input_folder}; '
f'{gt_key}: {gt_folder}')
# ensure that the two meta_info files are the same
with open(osp.join(input_folder, 'meta_info.txt')) as fin:
input_lmdb_keys = [line.split('.')[0] for line in fin]
with open(osp.join(gt_folder, 'meta_info.txt')) as fin:
gt_lmdb_keys = [line.split('.')[0] for line in fin]
if set(input_lmdb_keys) != set(gt_lmdb_keys):
raise ValueError(
f'Keys in {input_key}_folder and {gt_key}_folder are different.')
else:
paths = []
for lmdb_key in sorted(input_lmdb_keys):
paths.append(
dict([(f'{input_key}_path', lmdb_key),
(f'{gt_key}_path', lmdb_key)]))
return paths
def paired_paths_from_meta_info_file(folders, keys, meta_info_file,
filename_tmpl):
"""Generate paired paths from an meta information file.
Each line in the meta information file contains the image names and
image shape (usually for gt), separated by a white space.
Example of an meta information file:
```
0001_s001.png (480,480,3)
0001_s002.png (480,480,3)
```
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
meta_info_file (str): Path to the meta information file.
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
with open(meta_info_file, 'r') as fin:
gt_names = [line.split(' ')[0] for line in fin]
paths = []
for gt_name in gt_names:
basename, ext = osp.splitext(osp.basename(gt_name))
input_name = f'{filename_tmpl.format(basename)}{ext}'
input_path = osp.join(input_folder, input_name)
gt_path = osp.join(gt_folder, gt_name)
paths.append(
dict([(f'{input_key}_path', input_path),
(f'{gt_key}_path', gt_path)]))
return paths
def paired_paths_from_folder(folders, keys, filename_tmpl):
"""Generate paired paths from folders.
Args:
folders (list[str]): A list of folder path. The order of list should
be [input_folder, gt_folder].
keys (list[str]): A list of keys identifying folders. The order should
be in consistent with folders, e.g., ['lq', 'gt'].
filename_tmpl (str): Template for each filename. Note that the
template excludes the file extension. Usually the filename_tmpl is
for files in the input folder.
Returns:
list[str]: Returned path list.
"""
assert len(folders) == 2, (
'The len of folders should be 2 with [input_folder, gt_folder]. '
f'But got {len(folders)}')
assert len(keys) == 2, (
'The len of keys should be 2 with [input_key, gt_key]. '
f'But got {len(keys)}')
input_folder, gt_folder = folders
input_key, gt_key = keys
input_paths = list(mmcv.scandir(input_folder))
gt_paths = list(mmcv.scandir(gt_folder))
assert len(input_paths) == len(gt_paths), (
f'{input_key} and {gt_key} datasets have different number of images: '
f'{len(input_paths)}, {len(gt_paths)}.')
paths = []
for gt_path in gt_paths:
basename, ext = osp.splitext(osp.basename(gt_path))
input_name = f'{filename_tmpl.format(basename)}{ext}'
input_path = osp.join(input_folder, input_name)
assert input_name in input_paths, (f'{input_name} is not in '
f'{input_key}_paths.')
gt_path = osp.join(gt_folder, gt_path)
paths.append(
dict([(f'{input_key}_path', input_path),
(f'{gt_key}_path', gt_path)]))
return paths
def generate_gaussian_kernel(kernel_size=13, sigma=1.6):
"""Generate Gaussian kernel used in `duf_downsample`.
Args:
kernel_size (int): Kernel size. Default: 13.
sigma (float): Sigma of the Gaussian kernel. Default: 1.6.
Returns:
np.array: The Gaussian kernel.
"""
from scipy.ndimage import filters as filters
kernel = np.zeros((kernel_size, kernel_size))
# set element at the middle to one, a dirac delta
kernel[kernel_size // 2, kernel_size // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter
return filters.gaussian_filter(kernel, sigma)
def duf_downsample(x, kernel_size=13, scale=4):
"""Downsamping with Gaussian kernel used in the DUF official code.
Args:
x (Tensor): Frames to be downsampled, with shape (b, t, c, h, w).
kernel_size (int): Kernel size. Default: 13.
scale (int): Downsampling factor. Supported scale: (2, 3, 4).
Default: 4.
Returns:
Tensor: DUF downsampled frames.
"""
assert scale in (2, 3,
4), f'Only support scale (2, 3, 4), but got {scale}.'
squeeze_flag = False
if x.ndim == 4:
squeeze_flag = True
x = x.unsqueeze(0)
b, t, c, h, w = x.size()
x = x.view(-1, 1, h, w)
pad_w, pad_h = kernel_size // 2 + scale * 2, kernel_size // 2 + scale * 2
x = F.pad(x, (pad_w, pad_w, pad_h, pad_h), 'reflect')
gaussian_filter = generate_gaussian_kernel(kernel_size, 0.4 * scale)
gaussian_filter = torch.from_numpy(gaussian_filter).type_as(x).unsqueeze(
0).unsqueeze(0)
x = F.conv2d(x, gaussian_filter, stride=scale)
x = x[:, :, 2:-2, 2:-2]
x = x.view(b, t, c, x.size(2), x.size(3))
if squeeze_flag:
x = x.squeeze(0)
return x
| [
"torch.stack",
"torch.from_numpy",
"torch.nn.functional.pad",
"torch.nn.functional.conv2d"
] | 1.3 | yicrane/BasicSR | 5924d3bc20334381798099b7e841a26b6be90a4b |
1.6 | import os
import logging
import argparse
import numpy as np
from tqdm import tqdm
import torch
from torch.serialization import default_restore_location
from seq2seq import models, utils
from seq2seq.data.dictionary import Dictionary
from seq2seq.data.dataset import Seq2SeqDataset, BatchSampler
from seq2seq.beam import BeamSearch, BeamSearchNode
def get_args():
""" Defines generation-specific hyper-parameters. """
parser = argparse.ArgumentParser('Sequence to Sequence Model')
parser.add_argument('--cuda', default=False, help='Use a GPU')
parser.add_argument('--seed', default=42, type=int, help='pseudo random number generator seed')
# Add data arguments
parser.add_argument('--data', default='assignments/03/prepared', help='path to data directory')
parser.add_argument('--dicts', required=True, help='path to directory containing source and target dictionaries')
parser.add_argument('--checkpoint-path', default='checkpoints_asg4/checkpoint_best.pt', help='path to the model file')
parser.add_argument('--batch-size', default=None, type=int, help='maximum number of sentences in a batch')
parser.add_argument('--output', default='model_translations.txt', type=str,
help='path to the output file destination')
parser.add_argument('--max-len', default=100, type=int, help='maximum length of generated sequence')
# Add beam search arguments
parser.add_argument('--beam-size', default=5, type=int, help='number of hypotheses expanded in beam search')
# alpha hyperparameter for length normalization (described as lp in https://arxiv.org/pdf/1609.08144.pdf equation 14)
parser.add_argument('--alpha', default=0.0, type=float, help='alpha for softer length normalization')
return parser.parse_args()
def main(args):
""" Main translation function' """
# Load arguments from checkpoint
torch.manual_seed(args.seed)
state_dict = torch.load(args.checkpoint_path, map_location=lambda s, l: default_restore_location(s, 'cpu'))
args_loaded = argparse.Namespace(**{**vars(state_dict['args']), **vars(args)})
args = args_loaded
utils.init_logging(args)
# Load dictionaries
src_dict = Dictionary.load(os.path.join(args.dicts, 'dict.{:s}'.format(args.source_lang)))
logging.info('Loaded a source dictionary ({:s}) with {:d} words'.format(args.source_lang, len(src_dict)))
tgt_dict = Dictionary.load(os.path.join(args.dicts, 'dict.{:s}'.format(args.target_lang)))
logging.info('Loaded a target dictionary ({:s}) with {:d} words'.format(args.target_lang, len(tgt_dict)))
# Load dataset
test_dataset = Seq2SeqDataset(
src_file=os.path.join(args.data, 'test.{:s}'.format(args.source_lang)),
tgt_file=os.path.join(args.data, 'test.{:s}'.format(args.target_lang)),
src_dict=src_dict, tgt_dict=tgt_dict)
test_loader = torch.utils.data.DataLoader(test_dataset, num_workers=1, collate_fn=test_dataset.collater,
batch_sampler=BatchSampler(test_dataset, 9999999,
args.batch_size, 1, 0, shuffle=False,
seed=args.seed))
# Build model and criterion
model = models.build_model(args, src_dict, tgt_dict)
if args.cuda:
model = model.cuda()
model.eval()
model.load_state_dict(state_dict['model'])
logging.info('Loaded a model from checkpoint {:s}'.format(args.checkpoint_path))
progress_bar = tqdm(test_loader, desc='| Generation', leave=False)
# Iterate over the test set
all_hyps = {}
for i, sample in enumerate(progress_bar):
# Create a beam search object or every input sentence in batch
batch_size = sample['src_tokens'].shape[0]
searches = [BeamSearch(args.beam_size, args.max_len - 1, tgt_dict.unk_idx) for i in range(batch_size)]
with torch.no_grad():
# Compute the encoder output
encoder_out = model.encoder(sample['src_tokens'], sample['src_lengths'])
# __QUESTION 1: What is "go_slice" used for and what do its dimensions represent?
go_slice = \
torch.ones(sample['src_tokens'].shape[0], 1).fill_(tgt_dict.eos_idx).type_as(sample['src_tokens'])
if args.cuda:
go_slice = utils.move_to_cuda(go_slice)
#import pdb;pdb.set_trace()
# Compute the decoder output at the first time step
decoder_out, _ = model.decoder(go_slice, encoder_out)
# __QUESTION 2: Why do we keep one top candidate more than the beam size?
log_probs, next_candidates = torch.topk(torch.log(torch.softmax(decoder_out, dim=2)),
args.beam_size+1, dim=-1)
# Create number of beam_size beam search nodes for every input sentence
for i in range(batch_size):
for j in range(args.beam_size):
best_candidate = next_candidates[i, :, j]
backoff_candidate = next_candidates[i, :, j+1]
best_log_p = log_probs[i, :, j]
backoff_log_p = log_probs[i, :, j+1]
next_word = torch.where(best_candidate == tgt_dict.unk_idx, backoff_candidate, best_candidate)
log_p = torch.where(best_candidate == tgt_dict.unk_idx, backoff_log_p, best_log_p)
log_p = log_p[-1]
# Store the encoder_out information for the current input sentence and beam
emb = encoder_out['src_embeddings'][:,i,:]
lstm_out = encoder_out['src_out'][0][:,i,:]
final_hidden = encoder_out['src_out'][1][:,i,:]
final_cell = encoder_out['src_out'][2][:,i,:]
try:
mask = encoder_out['src_mask'][i,:]
except TypeError:
mask = None
node = BeamSearchNode(searches[i], emb, lstm_out, final_hidden, final_cell,
mask, torch.cat((go_slice[i], next_word)), log_p, 1)
# __QUESTION 3: Why do we add the node with a negative score?
searches[i].add(-node.eval(args.alpha), node)
#import pdb;pdb.set_trace()
# Start generating further tokens until max sentence length reached
for _ in range(args.max_len-1):
# Get the current nodes to expand
nodes = [n[1] for s in searches for n in s.get_current_beams()]
if nodes == []:
break # All beams ended in EOS
# Reconstruct prev_words, encoder_out from current beam search nodes
prev_words = torch.stack([node.sequence for node in nodes])
encoder_out["src_embeddings"] = torch.stack([node.emb for node in nodes], dim=1)
lstm_out = torch.stack([node.lstm_out for node in nodes], dim=1)
final_hidden = torch.stack([node.final_hidden for node in nodes], dim=1)
final_cell = torch.stack([node.final_cell for node in nodes], dim=1)
encoder_out["src_out"] = (lstm_out, final_hidden, final_cell)
try:
encoder_out["src_mask"] = torch.stack([node.mask for node in nodes], dim=0)
except TypeError:
encoder_out["src_mask"] = None
with torch.no_grad():
# Compute the decoder output by feeding it the decoded sentence prefix
decoder_out, _ = model.decoder(prev_words, encoder_out)
# see __QUESTION 2
log_probs, next_candidates = torch.topk(torch.log(torch.softmax(decoder_out, dim=2)), args.beam_size+1, dim=-1)
# Create number of beam_size next nodes for every current node
for i in range(log_probs.shape[0]):
for j in range(args.beam_size):
best_candidate = next_candidates[i, :, j]
backoff_candidate = next_candidates[i, :, j+1]
best_log_p = log_probs[i, :, j]
backoff_log_p = log_probs[i, :, j+1]
next_word = torch.where(best_candidate == tgt_dict.unk_idx, backoff_candidate, best_candidate)
log_p = torch.where(best_candidate == tgt_dict.unk_idx, backoff_log_p, best_log_p)
log_p = log_p[-1]
next_word = torch.cat((prev_words[i][1:], next_word[-1:]))
# Get parent node and beam search object for corresponding sentence
node = nodes[i]
search = node.search
# __QUESTION 4: How are "add" and "add_final" different?
# What would happen if we did not make this distinction?
# Store the node as final if EOS is generated
if next_word[-1] == tgt_dict.eos_idx:
node = BeamSearchNode(
search, node.emb, node.lstm_out, node.final_hidden,
node.final_cell, node.mask, torch.cat((prev_words[i][0].view([1]),
next_word)), node.logp, node.length
)
search.add_final(-node.eval(args.alpha), node)
# Add the node to current nodes for next iteration
else:
node = BeamSearchNode(
search, node.emb, node.lstm_out, node.final_hidden,
node.final_cell, node.mask, torch.cat((prev_words[i][0].view([1]),
next_word)), node.logp + log_p, node.length + 1
)
search.add(-node.eval(args.alpha), node)
# #import pdb;pdb.set_trace()
# __QUESTION 5: What happens internally when we prune our beams?
# How do we know we always maintain the best sequences?
for search in searches:
search.prune()
# Segment into sentences
best_sents = torch.stack([search.get_best(n=1)[0][1].sequence[1:].cpu() for search in searches])
decoded_batch = best_sents.numpy()
#import pdb;pdb.set_trace()
output_sentences = [decoded_batch[row, :] for row in range(decoded_batch.shape[0])]
# __QUESTION 6: What is the purpose of this for loop?
temp = list()
for sent in output_sentences:
first_eos = np.where(sent == tgt_dict.eos_idx)[0]
if len(first_eos) > 0:
temp.append(sent[:first_eos[0]])
else:
temp.append(sent)
output_sentences = temp
# Convert arrays of indices into strings of words
output_sentences = [tgt_dict.string(sent) for sent in output_sentences]
for ii, sent in enumerate(output_sentences):
all_hyps[int(sample['id'].data[ii])] = sent
# Write to file
if args.output is not None:
with open(args.output, 'w') as out_file:
for sent_id in range(len(all_hyps.keys())):
out_file.write(all_hyps[sent_id] + '\n')
if __name__ == '__main__':
args = get_args()
main(args)
| [
"torch.cat",
"torch.stack",
"torch.no_grad",
"torch.softmax",
"torch.ones",
"torch.manual_seed",
"torch.where",
"torch.serialization.default_restore_location"
] | 1.6.0 | aditen/atmt | 7bd17fecc095e019c9e79ec02788e1e979d7a8e8 |
1.1 | import math
import torch
from torch.optim.optimizer import Optimizer
from .types import OptFloat, OptLossClosure, Params
__all__ = ('SGDP',)
class SGDP(Optimizer):
r"""Implements SGDP algorithm.
It has been proposed in `Slowing Down the Weight Norm Increase in
Momentum-based Optimizers`__
Arguments:
params: iterable of parameters to optimize or dicts defining
parameter groups
lr: learning rate (default: 1e-3)
momentum: momentum factor (default: 0)
dampening: dampening for momentum (default: 0)
eps: term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay: weight decay (L2 penalty) (default: 0)
delta: threhold that determines whether a set of parameters is scale
invariant or not (default: 0.1)
wd_ratio: relative weight decay applied on scale-invariant parameters
compared to that applied on scale-variant parameters (default: 0.1)
nesterov: enables Nesterov momentum (default: False)
Example:
>>> import torch_optimizer as optim
>>> optimizer = optim.SGDP(model.parameters(), lr=0.1)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ https://arxiv.org/abs/2006.08217
Note:
Reference code: https://github.com/clovaai/AdamP
"""
def __init__(
self,
params: Params,
lr: float = 1e-3,
momentum: float = 0,
dampening: float = 0,
eps: float = 1e-8,
weight_decay: float = 0,
delta: float = 0.1,
wd_ratio: float = 0.1,
nesterov: bool = False,
) -> None:
if lr <= 0.0:
raise ValueError('Invalid learning rate: {}'.format(lr))
if eps < 0.0:
raise ValueError('Invalid epsilon value: {}'.format(eps))
if momentum < 0.0:
raise ValueError('Invalid momentum value: {}'.format(momentum))
if dampening < 0.0:
raise ValueError('Invalid dampening value: {}'.format(dampening))
if weight_decay < 0:
raise ValueError(
'Invalid weight_decay value: {}'.format(weight_decay)
)
if delta < 0:
raise ValueError('Invalid delta value: {}'.format(delta))
if wd_ratio < 0:
raise ValueError('Invalid wd_ratio value: {}'.format(wd_ratio))
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
eps=eps,
weight_decay=weight_decay,
delta=delta,
wd_ratio=wd_ratio,
nesterov=nesterov,
)
super(SGDP, self).__init__(params, defaults)
@staticmethod
def _channel_view(x):
return x.view(x.size(0), -1)
@staticmethod
def _layer_view(x):
return x.view(1, -1)
@staticmethod
def _cosine_similarity(x, y, eps, view_func):
x = view_func(x)
y = view_func(y)
x_norm = x.norm(dim=1).add_(eps)
y_norm = y.norm(dim=1).add_(eps)
dot = (x * y).sum(dim=1)
return dot.abs() / x_norm / y_norm
def _projection(self, p, grad, perturb, delta, wd_ratio, eps):
wd = 1
expand_size = [-1] + [1] * (len(p.shape) - 1)
for view_func in [self._channel_view, self._layer_view]:
cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func)
if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)):
p_n = p.data / view_func(p.data).norm(dim=1).view(
expand_size
).add_(eps)
perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(
expand_size
)
wd = wd_ratio
return perturb, wd
return perturb, wd
def step(self, closure: OptLossClosure = None) -> OptFloat:
r"""Performs a single optimization step.
Arguments:
closure: A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
# State initialization
if len(state) == 0:
state['momentum'] = torch.zeros_like(p.data)
# SGD
buf = state['momentum']
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if nesterov:
d_p = grad + momentum * buf
else:
d_p = buf
# Projection
wd_ratio = 1
if len(p.shape) > 1:
d_p, wd_ratio = self._projection(
p,
grad,
d_p,
group['delta'],
group['wd_ratio'],
group['eps'],
)
# Weight decay
if weight_decay != 0:
p.data.mul_(
1
- group['lr']
* group['weight_decay']
* wd_ratio
/ (1 - momentum)
)
# Step
p.data.add_(d_p, alpha=-group['lr'])
return loss
| [
"torch.zeros_like"
] | 1.1.0 | muupan/pytorch-optimizer | efeea8fe4d06c5f4612f1f5bc34acf0c7d7682e1 |
1.2 | import glob
import random
import time
import os
import os.path as osp
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
from torchvision.ops import nms
def mkdir_if_missing(dir):
os.makedirs(dir, exist_ok=True)
def float3(x): # format floats to 3 decimals
return float(format(x, '.3f'))
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_classes(path):
"""
Loads class labels at 'path'
"""
fp = open(path, 'r')
names = fp.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def model_info(model):
"""
Prints out a line-by-line description of a PyTorch model ending with a summary.
"""
n_p = sum(x.numel() for x in model.parameters()) # number parameters
n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients
print('\n%5s %50s %9s %12s %20s %12s %12s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))
for i, (name, p) in enumerate(model.named_parameters()):
name = name.replace('module_list.', '')
print('%5g %50s %9s %12g %20s %12.3g %12.3g' % (
i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))
print('Model Summary: %g layers, %g parameters, %g gradients\n' % (i + 1, n_p, n_g))
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
"""
Plots one bounding box on image img.
"""
tl = line_thickness or round(0.0004 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.03)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.03)
torch.nn.init.constant_(m.bias.data, 0.0)
def xyxy2xywh(x):
# Convert bounding box format from [x1, y1, x2, y2] to [x, y, w, h]
# x, y are coordinates of center
# (x1, y1) and (x2, y2) are coordinates of bottom left and top right respectively.
y = torch.zeros_like(x) if x.dtype is torch.float32 else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
# x, y are coordinates of center
# (x1, y1) and (x2, y2) are coordinates of bottom left and top right respectively.
y = torch.zeros_like(x) if x.dtype is torch.float32 else np.zeros_like(x)
y[:, 0] = (x[:, 0] - x[:, 2] / 2) # Bottom left x
y[:, 1] = (x[:, 1] - x[:, 3] / 2) # Bottom left y
y[:, 2] = (x[:, 0] + x[:, 2] / 2) # Top right x
y[:, 3] = (x[:, 1] + x[:, 3] / 2) # Top right y
return y
def scale_coords(img_size, coords, img0_shape):
# Rescale x1, y1, x2, y2 from 416 to image size
gain_w = float(img_size[0]) / img0_shape[1] # gain = old / new
gain_h = float(img_size[1]) / img0_shape[0]
gain = min(gain_w, gain_h)
pad_x = (img_size[0] - img0_shape[1] * gain) / 2 # width padding
pad_y = (img_size[1] - img0_shape[0] * gain) / 2 # height padding
coords[:, [0, 2]] -= pad_x
coords[:, [1, 3]] -= pad_y
coords[:, 0:4] /= gain
coords[:, :4] = torch.clamp(coords[:, :4], min=0)
return coords
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Computes the average precision, given the recall and precision curves.
Method originally from https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (list).
conf: Objectness value from 0-1 (list).
pred_cls: Predicted object classes (list).
target_cls: True object classes (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# lists/pytorch to numpy
tp, conf, pred_cls, target_cls = np.array(tp), np.array(conf), np.array(pred_cls), np.array(target_cls)
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(np.concatenate((pred_cls, target_cls), 0))
# Create Precision-Recall curve and compute AP for each class
ap, p, r = [], [], []
for c in unique_classes:
i = pred_cls == c
n_gt = sum(target_cls == c) # Number of ground truth objects
n_p = sum(i) # Number of predicted objects
if (n_p == 0) and (n_gt == 0):
continue
elif (n_p == 0) or (n_gt == 0):
ap.append(0)
r.append(0)
p.append(0)
else:
# Accumulate FPs and TPs
fpc = np.cumsum(1 - tp[i])
tpc = np.cumsum(tp[i])
# Recall
recall_curve = tpc / (n_gt + 1e-16)
r.append(tpc[-1] / (n_gt + 1e-16))
# Precision
precision_curve = tpc / (tpc + fpc)
p.append(tpc[-1] / (tpc[-1] + fpc[-1]))
# AP from recall-precision curve
ap.append(compute_ap(recall_curve, precision_curve))
return np.array(ap), unique_classes.astype('int32'), np.array(r), np.array(p)
def compute_ap(recall, precision):
""" Computes the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def bbox_iou(box1, box2, x1y1x2y2=False):
"""
Returns the IoU of two bounding boxes
"""
N, M = len(box1), len(box2)
if x1y1x2y2:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
else:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1.unsqueeze(1), b2_x1)
inter_rect_y1 = torch.max(b1_y1.unsqueeze(1), b2_y1)
inter_rect_x2 = torch.min(b1_x2.unsqueeze(1), b2_x2)
inter_rect_y2 = torch.min(b1_y2.unsqueeze(1), b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)
# Union Area
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1))
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1)).view(-1,1).expand(N,M)
b2_area = ((b2_x2 - b2_x1) * (b2_y2 - b2_y1)).view(1,-1).expand(N,M)
return inter_area / (b1_area + b2_area - inter_area + 1e-16)
def bbox_giou(box1, box2, x1y1x2y2=False):
"""
Returns the IoU of two bounding boxes
"""
N, M = len(box1), len(box2)
# 先要把他们都转为xyxy
if x1y1x2y2:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
else:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1.unsqueeze(1), b2_x1)
inter_rect_y1 = torch.max(b1_y1.unsqueeze(1), b2_y1)
inter_rect_x2 = torch.min(b1_x2.unsqueeze(1), b2_x2)
inter_rect_y2 = torch.min(b1_y2.unsqueeze(1), b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)
# Union Area
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1))
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1)).view(-1,1).expand(N,M)
b2_area = ((b2_x2 - b2_x1) * (b2_y2 - b2_y1)).view(1,-1).expand(N,M)
union = b1_area + b2_area - inter_area + 1e-16
iou = inter_area / union
# DIoU
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
def bbox_diou(box1, box2, x1y1x2y2=False):
"""
Returns the IoU of two bounding boxes
"""
N, M = len(box1), len(box2)
# 先要把他们都转为xyxy
if x1y1x2y2:
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
else:
# Transform from center and width to exact coordinates
b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
# get the coordinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1.unsqueeze(1), b2_x1)
inter_rect_y1 = torch.max(b1_y1.unsqueeze(1), b2_y1)
inter_rect_x2 = torch.min(b1_x2.unsqueeze(1), b2_x2)
inter_rect_y2 = torch.min(b1_y2.unsqueeze(1), b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1, 0) * torch.clamp(inter_rect_y2 - inter_rect_y1, 0)
# Union Area
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1))
b1_area = ((b1_x2 - b1_x1) * (b1_y2 - b1_y1)).view(-1,1).expand(N,M)
b2_area = ((b2_x2 - b2_x1) * (b2_y2 - b2_y1)).view(1,-1).expand(N,M)
iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
# DIoU
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
return iou - rho2 / c2 # DIoU # DiouLoss = 1 - DIOU,记得捡回去
def build_targets_max(target, anchor_wh, nA, nC, nGh, nGw):
"""
returns nT, nCorrect, tx, ty, tw, th, tconf, tcls
"""
nB = len(target) # number of images in batch
txy = torch.zeros(nB, nA, nGh, nGw, 2).cuda() # batch size, anchors, grid size
twh = torch.zeros(nB, nA, nGh, nGw, 2).cuda()
tconf = torch.LongTensor(nB, nA, nGh, nGw).fill_(0).cuda()
tcls = torch.ByteTensor(nB, nA, nGh, nGw, nC).fill_(0).cuda() # nC = number of classes
tid = torch.LongTensor(nB, nA, nGh, nGw, 1).fill_(-1).cuda()
for b in range(nB):
t = target[b]
t_id = t[:, 1].clone().long().cuda()
t = t[:,[0,2,3,4,5]]
nTb = len(t) # number of targets
if nTb == 0:
continue
#gxy, gwh = t[:, 1:3] * nG, t[:, 3:5] * nG
gxy, gwh = t[: , 1:3].clone() , t[:, 3:5].clone()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
gi = torch.clamp(gxy[:, 0], min=0, max=nGw -1).long()
gj = torch.clamp(gxy[:, 1], min=0, max=nGh -1).long()
# Get grid box indices and prevent overflows (i.e. 13.01 on 13 anchors)
#gi, gj = torch.clamp(gxy.long(), min=0, max=nG - 1).t()
#gi, gj = gxy.long().t()
# iou of targets-anchors (using wh only)
box1 = gwh
box2 = anchor_wh.unsqueeze(1)
inter_area = torch.min(box1, box2).prod(2)
iou = inter_area / (box1.prod(1) + box2.prod(2) - inter_area + 1e-16)
# Select best iou_pred and anchor
iou_best, a = iou.max(0) # best anchor [0-2] for each target
# Select best unique target-anchor combinations
if nTb > 1:
_, iou_order = torch.sort(-iou_best) # best to worst
# Unique anchor selection
u = torch.stack((gi, gj, a), 0)[:, iou_order]
# _, first_unique = np.unique(u, axis=1, return_index=True) # first unique indices
first_unique = return_torch_unique_index(u, torch.unique(u, dim=1)) # torch alternative
i = iou_order[first_unique]
# best anchor must share significant commonality (iou) with target
i = i[iou_best[i] > 0.60] # TODO: examine arbitrary threshold
if len(i) == 0:
continue
a, gj, gi, t = a[i], gj[i], gi[i], t[i]
t_id = t_id[i]
if len(t.shape) == 1:
t = t.view(1, 5)
else:
if iou_best < 0.60:
continue
tc, gxy, gwh = t[:, 0].long(), t[:, 1:3].clone(), t[:, 3:5].clone()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
# XY coordinates
txy[b, a, gj, gi] = gxy - gxy.floor()
# Width and height
twh[b, a, gj, gi] = torch.log(gwh / anchor_wh[a]) # yolo method
# twh[b, a, gj, gi] = torch.sqrt(gwh / anchor_wh[a]) / 2 # power method
# One-hot encoding of label
tcls[b, a, gj, gi, tc] = 1
tconf[b, a, gj, gi] = 1
tid[b, a, gj, gi] = t_id.unsqueeze(1)
tbox = torch.cat([txy, twh], -1)
return tconf, tbox, tid
def build_targets_thres(target, anchor_wh, nA, nC, nGh, nGw):
ID_THRESH = 0.5
FG_THRESH = 0.5
BG_THRESH = 0.4
nB = len(target) # number of images in batch
assert(len(anchor_wh)==nA)
tbox = torch.zeros(nB, nA, nGh, nGw, 4).cuda() # batch size, anchors, grid size
tconf = torch.LongTensor(nB, nA, nGh, nGw).fill_(0).cuda()
tid = torch.LongTensor(nB, nA, nGh, nGw, 1).fill_(-1).cuda()
for b in range(nB):
t = target[b]
t_id = t[:, 1].clone().long().cuda()
t = t[:,[0,2,3,4,5]]
nTb = len(t) # number of targets
if nTb == 0:
continue
gxy, gwh = t[: , 1:3].clone() , t[:, 3:5].clone()
gxy[:, 0] = gxy[:, 0] * nGw
gxy[:, 1] = gxy[:, 1] * nGh
gwh[:, 0] = gwh[:, 0] * nGw
gwh[:, 1] = gwh[:, 1] * nGh
gxy[:, 0] = torch.clamp(gxy[:, 0], min=0, max=nGw -1)
gxy[:, 1] = torch.clamp(gxy[:, 1], min=0, max=nGh -1)
gt_boxes = torch.cat([gxy, gwh], dim=1) # Shape Ngx4 (xc, yc, w, h)
anchor_mesh = generate_anchor(nGh, nGw, anchor_wh)
anchor_list = anchor_mesh.permute(0,2,3,1).contiguous().view(-1, 4) # Shpae (nA x nGh x nGw) x 4
#print(anchor_list.shape, gt_boxes.shape)
iou_pdist = bbox_iou(anchor_list, gt_boxes) # Shape (nA x nGh x nGw) x Ng
iou_max, max_gt_index = torch.max(iou_pdist, dim=1) # Shape (nA x nGh x nGw), both
iou_map = iou_max.view(nA, nGh, nGw)
gt_index_map = max_gt_index.view(nA, nGh, nGw)
#nms_map = pooling_nms(iou_map, 3)
id_index = iou_map > ID_THRESH
fg_index = iou_map > FG_THRESH
bg_index = iou_map < BG_THRESH
ign_index = (iou_map < FG_THRESH) * (iou_map > BG_THRESH)
tconf[b][fg_index] = 1
tconf[b][bg_index] = 0
tconf[b][ign_index] = -1
gt_index = gt_index_map[fg_index]
gt_box_list = gt_boxes[gt_index]
gt_id_list = t_id[gt_index_map[id_index]]
#print(gt_index.shape, gt_index_map[id_index].shape, gt_boxes.shape)
if torch.sum(fg_index) > 0:
tid[b][id_index] = gt_id_list.unsqueeze(1)
fg_anchor_list = anchor_list.view(nA, nGh, nGw, 4)[fg_index]
delta_target = encode_delta(gt_box_list, fg_anchor_list)
tbox[b][fg_index] = delta_target
return tconf, tbox, tid
def generate_anchor(nGh, nGw, anchor_wh):
nA = len(anchor_wh)
yy, xx =torch.meshgrid(torch.arange(nGh), torch.arange(nGw))
xx, yy = xx.cuda(), yy.cuda()
mesh = torch.stack([xx, yy], dim=0) # Shape 2, nGh, nGw
mesh = mesh.unsqueeze(0).repeat(nA,1,1,1).float() # Shape nA x 2 x nGh x nGw
anchor_offset_mesh = anchor_wh.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, nGh,nGw) # Shape nA x 2 x nGh x nGw
anchor_mesh = torch.cat([mesh, anchor_offset_mesh], dim=1) # Shape nA x 4 x nGh x nGw
return anchor_mesh
def encode_delta(gt_box_list, fg_anchor_list):
px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
fg_anchor_list[:, 2], fg_anchor_list[:,3]
gx, gy, gw, gh = gt_box_list[:, 0], gt_box_list[:, 1], \
gt_box_list[:, 2], gt_box_list[:, 3]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw/pw)
dh = torch.log(gh/ph)
return torch.stack([dx, dy, dw, dh], dim=1)
def decode_delta(delta, fg_anchor_list):
px, py, pw, ph = fg_anchor_list[:, 0], fg_anchor_list[:,1], \
fg_anchor_list[:, 2], fg_anchor_list[:,3]
dx, dy, dw, dh = delta[:, 0], delta[:, 1], delta[:, 2], delta[:, 3]
gx = pw * dx + px
gy = ph * dy + py
gw = pw * torch.exp(dw)
gh = ph * torch.exp(dh)
return torch.stack([gx, gy, gw, gh], dim=1)
def decode_delta_map(delta_map, anchors):
'''
:param: delta_map, shape (nB, nA, nGh, nGw, 4)
:param: anchors, shape (nA,4)
'''
nB, nA, nGh, nGw, _ = delta_map.shape
anchor_mesh = generate_anchor(nGh, nGw, anchors)
anchor_mesh = anchor_mesh.permute(0,2,3,1).contiguous() # Shpae (nA x nGh x nGw) x 4
anchor_mesh = anchor_mesh.unsqueeze(0).repeat(nB,1,1,1,1)
pred_list = decode_delta(delta_map.view(-1,4), anchor_mesh.view(-1,4))
pred_map = pred_list.view(nB, nA, nGh, nGw, 4)
return pred_map
def pooling_nms(heatmap, kernel=1):
pad = (kernel -1 ) // 2
hmax = F.max_pool2d(heatmap, (kernel, kernel), stride=1, padding=pad)
keep = (hmax == heatmap).float()
return keep * heatmap
def non_max_suppression(prediction, conf_thres=0.5, nms_thres=0.4, method='standard'):
"""
Removes detections with lower object confidence score than 'conf_thres'
Non-Maximum Suppression to further filter detections.
Returns detections with shape:
(x1, y1, x2, y2, object_conf, class_score, class_pred)
Args:
prediction,
conf_thres,
nms_thres,
method = 'standard' or 'fast'
"""
output = [None for _ in range(len(prediction))]
for image_i, pred in enumerate(prediction):
# Filter out confidence scores below threshold
# Get score and class with highest confidence
v = pred[:, 4] > conf_thres
v = v.nonzero().squeeze()
if len(v.shape) == 0:
v = v.unsqueeze(0)
pred = pred[v]
# If none are remaining => process next image
nP = pred.shape[0]
if not nP:
continue
# From (center x, center y, width, height) to (x1, y1, x2, y2)
pred[:, :4] = xywh2xyxy(pred[:, :4])
# Non-maximum suppression
if method == 'standard':
nms_indices = nms(pred[:, :4], pred[:, 4], nms_thres)
elif method == 'fast':
nms_indices = fast_nms(pred[:, :4], pred[:, 4], iou_thres=nms_thres, conf_thres=conf_thres)
else:
raise ValueError('Invalid NMS type!')
det_max = pred[nms_indices]
if len(det_max) > 0:
# Add max detections to outputs
output[image_i] = det_max if output[image_i] is None else torch.cat((output[image_i], det_max))
return output
def fast_nms(boxes, scores, iou_thres:float=0.5, top_k:int=200, second_threshold:bool=False, conf_thres:float=0.5):
'''
Vectorized, approximated, fast NMS, adopted from YOLACT:
https://github.com/dbolya/yolact/blob/master/layers/functions/detection.py
The original version is for multi-class NMS, here we simplify the code for single-class NMS
'''
scores, idx = scores.sort(0, descending=True)
idx = idx[:top_k].contiguous()
scores = scores[:top_k]
num_dets = idx.size()
boxes = boxes[idx, :]
iou = jaccard(boxes, boxes)
iou.triu_(diagonal=1)
iou_max, _ = iou.max(dim=0)
keep = (iou_max <= iou_thres)
if second_threshold:
keep *= (scores > self.conf_thresh)
return idx[keep]
@torch.jit.script
def intersect(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [n,A,4].
box_b: (tensor) bounding boxes, Shape: [n,B,4].
Return:
(tensor) intersection area, Shape: [n,A,B].
"""
n = box_a.size(0)
A = box_a.size(1)
B = box_b.size(1)
max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))
min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),
box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
return inter[:, :, :, 0] * inter[:, :, :, 1]
def jaccard(box_a, box_b, iscrowd:bool=False):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
use_batch = True
if box_a.dim() == 2:
use_batch = False
box_a = box_a[None, ...]
box_b = box_b[None, ...]
inter = intersect(box_a, box_b)
area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *
(box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]
area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *
(box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]
union = area_a + area_b - inter
out = inter / area_a if iscrowd else inter / union
return out if use_batch else out.squeeze(0)
def return_torch_unique_index(u, uv):
n = uv.shape[1] # number of columns
first_unique = torch.zeros(n, device=u.device).long()
for j in range(n):
first_unique[j] = (uv[:, j:j + 1] == u).all(0).nonzero()[0]
return first_unique
def strip_optimizer_from_checkpoint(filename='weights/best.pt'):
# Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
a = torch.load(filename, map_location='cpu')
a['optimizer'] = []
torch.save(a, filename.replace('.pt', '_lite.pt'))
def plot_results():
"""
Plot YOLO training results from the file 'results.txt'
Example of what this is trying to plot can be found at:
https://user-images.githubusercontent.com/26833433/63258271-fe9d5300-c27b-11e9-9a15-95038daf4438.png
An example results.txt file:
import os; os.system('wget https://storage.googleapis.com/ultralytics/yolov3/results_v1.txt')
"""
plt.figure(figsize=(14, 7))
s = ['X + Y', 'Width + Height', 'Confidence', 'Classification', 'Total Loss', 'mAP', 'Recall', 'Precision']
files = sorted(glob.glob('results*.txt'))
for f in files:
results = np.loadtxt(f, usecols=[2, 3, 4, 5, 6, 9, 10, 11]).T # column 11 is mAP
x = range(1, results.shape[1])
for i in range(8):
plt.subplot(2, 4, i + 1)
plt.plot(x, results[i, x], marker='.', label=f)
plt.title(s[i])
if i == 0:
plt.legend()
| [
"torch.cat",
"torch.cuda.manual_seed",
"torch.stack",
"torch.LongTensor",
"torch.load",
"torch.exp",
"torch.sum",
"torch.nn.init.constant_",
"torch.ByteTensor",
"torch.manual_seed",
"torch.nn.init.normal_",
"torch.zeros_like",
"torch.zeros",
"torch.cuda.manual_seed_all",
"torch.min",
"torch.max",
"torch.clamp",
"torch.log",
"torch.sort",
"torch.unique",
"torch.arange",
"torch.nn.functional.max_pool2d"
] | 1.2.0 | tjulitianyi1997/Towards-Realtime-MOT-1 | cda44e18022fd90411cb7f8911cb7ed9fd9b140d |
1.3 | import numbers
import warnings
from typing import Any, Callable, List, Optional, Union
import torch
import torch.nn as nn
from torch.optim import Optimizer
from ignite.contrib.handlers.base_logger import (
BaseLogger,
BaseOptimizerParamsHandler,
BaseOutputHandler,
BaseWeightsHistHandler,
BaseWeightsScalarHandler,
)
from ignite.engine import Engine, EventEnum
from ignite.handlers import global_step_from_engine
__all__ = [
"TensorboardLogger",
"OptimizerParamsHandler",
"OutputHandler",
"WeightsScalarHandler",
"WeightsHistHandler",
"GradsScalarHandler",
"GradsHistHandler",
"global_step_from_engine",
]
class TensorboardLogger(BaseLogger):
"""
TensorBoard handler to log metrics, model/optimizer parameters, gradients during the training and validation.
By default, this class favors `tensorboardX <https://github.com/lanpa/tensorboardX>`_ package if installed:
.. code-block:: bash
pip install tensorboardX
otherwise, it falls back to using
`PyTorch's SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_
(>=v1.2.0).
Args:
*args: Positional arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
**kwargs: Keyword arguments accepted from
`SummaryWriter
<https://pytorch.org/docs/stable/tensorboard.html>`_.
For example, `log_dir` to setup path to the directory where to log.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
# Attach the logger to the evaluator on the training dataset and log NLL, Accuracy metrics after each epoch
# We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer` instead of `train_evaluator`.
tb_logger.attach_output_handler(
train_evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="training",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer),
)
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch of the
# `trainer` instead of `evaluator`.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)),
)
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer,
param_name='lr' # optional
)
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model)
)
# Attach the logger to the trainer to log model's weights as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=WeightsHistHandler(model)
)
# Attach the logger to the trainer to log model's gradients norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model)
)
# Attach the logger to the trainer to log model's gradients as a histogram after each epoch
tb_logger.attach(
trainer,
event_name=Events.EPOCH_COMPLETED,
log_handler=GradsHistHandler(model)
)
# We need to close the logger with we are done
tb_logger.close()
It is also possible to use the logger as context manager:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
with TensorboardLogger(log_dir="experiments/tb_logs") as tb_logger:
trainer = Engine(update_fn)
# Attach the logger to the trainer to log training loss at each iteration
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED,
tag="training",
output_transform=lambda loss: {"loss": loss}
)
"""
def __init__(self, *args: Any, **kwargs: Any):
try:
from tensorboardX import SummaryWriter
except ImportError:
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise RuntimeError(
"This contrib module requires either tensorboardX or torch >= 1.2.0. "
"You may install tensorboardX with command: \n pip install tensorboardX \n"
"or upgrade PyTorch using your package manager of choice (pip or conda)."
)
self.writer = SummaryWriter(*args, **kwargs)
def close(self):
self.writer.close()
def _create_output_handler(self, *args: Any, **kwargs: Any):
return OutputHandler(*args, **kwargs)
def _create_opt_params_handler(self, *args: Any, **kwargs: Any):
return OptimizerParamsHandler(*args, **kwargs)
class OutputHandler(BaseOutputHandler):
"""Helper handler to log engine's output and/or metrics
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# each epoch. We setup `global_step_transform=global_step_from_engine(trainer)` to take the epoch
# of the `trainer`:
tb_logger.attach(
evaluator,
log_handler=OutputHandler(
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
),
event_name=Events.EPOCH_COMPLETED
)
# or equivalently
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metric_names=["nll", "accuracy"],
global_step_transform=global_step_from_engine(trainer)
)
Another example, where model is evaluated every 500 iterations:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
@trainer.on(Events.ITERATION_COMPLETED(every=500))
def evaluate(engine):
evaluator.run(validation_set, max_epochs=1)
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
def global_step_transform(*args, **kwargs):
return trainer.state.iteration
# Attach the logger to the evaluator on the validation dataset and log NLL, Accuracy metrics after
# every 500 iterations. Since evaluator engine does not have access to the training iteration, we
# provide a global_step_transform to return the trainer.state.iteration for the global_step, each time
# evaluator metrics are plotted on Tensorboard.
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag="validation",
metrics=["nll", "accuracy"],
global_step_transform=global_step_transform
)
Args:
tag (str): common title for all produced plots. For example, "training"
metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
metrics.
output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
For example, `output_transform = lambda output: output`
This function can also return a dictionary, e.g `{"loss": loss1, "another_loss": loss2}` to label the plot
with corresponding keys.
global_step_transform (callable, optional): global step transform function to output a desired global step.
Input of the function is `(engine, event_name)`. Output of function should be an integer.
Default is None, global_step based on attached engine. If provided,
uses function output as global_step. To setup global step from another engine, please use
:meth:`~ignite.contrib.handlers.tensorboard_logger.global_step_from_engine`.
Note:
Example of `global_step_transform`:
.. code-block:: python
def global_step_transform(engine, event_name):
return engine.state.get_event_attrib_value(event_name)
"""
def __init__(
self,
tag: str,
metric_names: Optional[List[str]] = None,
output_transform: Optional[Callable] = None,
global_step_transform: Optional[Callable] = None,
):
super(OutputHandler, self).__init__(tag, metric_names, output_transform, global_step_transform)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'OutputHandler' works only with TensorboardLogger")
metrics = self._setup_output_metrics(engine)
global_step = self.global_step_transform(engine, event_name)
if not isinstance(global_step, int):
raise TypeError(
"global_step must be int, got {}."
" Please check the output of global_step_transform.".format(type(global_step))
)
for key, value in metrics.items():
if isinstance(value, numbers.Number) or isinstance(value, torch.Tensor) and value.ndimension() == 0:
logger.writer.add_scalar("{}/{}".format(self.tag, key), value, global_step)
elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
for i, v in enumerate(value):
logger.writer.add_scalar("{}/{}/{}".format(self.tag, key, i), v.item(), global_step)
else:
warnings.warn("TensorboardLogger output_handler can not log metrics value type {}".format(type(value)))
class OptimizerParamsHandler(BaseOptimizerParamsHandler):
"""Helper handler to log optimizer parameters
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log optimizer's parameters, e.g. learning rate at each iteration
tb_logger.attach(
trainer,
log_handler=OptimizerParamsHandler(optimizer),
event_name=Events.ITERATION_STARTED
)
# or equivalently
tb_logger.attach_opt_params_handler(
trainer,
event_name=Events.ITERATION_STARTED,
optimizer=optimizer
)
Args:
optimizer (torch.optim.Optimizer or object): torch optimizer or any object with attribute ``param_groups``
as a sequence.
param_name (str): parameter name
tag (str, optional): common title for all produced plots. For example, "generator"
"""
def __init__(self, optimizer: Optimizer, param_name: str = "lr", tag: Optional[str] = None):
super(OptimizerParamsHandler, self).__init__(optimizer, param_name, tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler OptimizerParamsHandler works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
params = {
"{}{}/group_{}".format(tag_prefix, self.param_name, i): float(param_group[self.param_name])
for i, param_group in enumerate(self.optimizer.param_groups)
}
for k, v in params.items():
logger.writer.add_scalar(k, v, global_step)
class WeightsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's weights as scalars.
Handler iterates over named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsScalarHandler(model, reduction=torch.norm)
)
Args:
model (torch.nn.Module): model to log weights
reduction (callable): function to reduce parameters into scalar
tag (str, optional): common title for all produced plots. For example, "generator"
"""
def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(WeightsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_scalar(
"{}weights_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.data), global_step
)
class WeightsHistHandler(BaseWeightsHistHandler):
"""Helper handler to log model's weights as histograms.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=WeightsHistHandler(model)
)
Args:
model (torch.nn.Module): model to log weights
tag (str, optional): common title for all produced plots. For example, "generator"
"""
def __init__(self, model: nn.Module, tag: Optional[str] = None):
super(WeightsHistHandler, self).__init__(model, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'WeightsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_histogram(
tag="{}weights/{}".format(tag_prefix, name),
values=p.data.detach().cpu().numpy(),
global_step=global_step,
)
class GradsScalarHandler(BaseWeightsScalarHandler):
"""Helper handler to log model's gradients as scalars.
Handler iterates over the gradients of named parameters of the model, applies reduction function to each parameter
produce a scalar and then logs the scalar.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsScalarHandler(model, reduction=torch.norm)
)
Args:
model (torch.nn.Module): model to log weights
reduction (callable): function to reduce parameters into scalar
tag (str, optional): common title for all produced plots. For example, "generator"
"""
def __init__(self, model: nn.Module, reduction: Callable = torch.norm, tag: Optional[str] = None):
super(GradsScalarHandler, self).__init__(model, reduction, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsScalarHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_scalar(
"{}grads_{}/{}".format(tag_prefix, self.reduction.__name__, name), self.reduction(p.grad), global_step
)
class GradsHistHandler(BaseWeightsHistHandler):
"""Helper handler to log model's gradients as histograms.
Examples:
.. code-block:: python
from ignite.contrib.handlers.tensorboard_logger import *
# Create a logger
tb_logger = TensorboardLogger(log_dir="experiments/tb_logs")
# Attach the logger to the trainer to log model's weights norm after each iteration
tb_logger.attach(
trainer,
event_name=Events.ITERATION_COMPLETED,
log_handler=GradsHistHandler(model)
)
Args:
model (torch.nn.Module): model to log weights
tag (str, optional): common title for all produced plots. For example, "generator"
"""
def __init__(self, model: nn.Module, tag: Optional[str] = None):
super(GradsHistHandler, self).__init__(model, tag=tag)
def __call__(self, engine: Engine, logger: TensorboardLogger, event_name: Union[str, EventEnum]):
if not isinstance(logger, TensorboardLogger):
raise RuntimeError("Handler 'GradsHistHandler' works only with TensorboardLogger")
global_step = engine.state.get_event_attrib_value(event_name)
tag_prefix = "{}/".format(self.tag) if self.tag else ""
for name, p in self.model.named_parameters():
if p.grad is None:
continue
name = name.replace(".", "/")
logger.writer.add_histogram(
tag="{}grads/{}".format(tag_prefix, name), values=p.grad.detach().cpu().numpy(), global_step=global_step
)
| [
"torch.utils.tensorboard.SummaryWriter"
] | 1.3 | ibotdotout/ignite | d2da93d2ff0aab139218e578dee1d0dc8c6481db |
1.9 | from typing import Any
from typing import Callable
import torch
from torch import Tensor
from torch.testing import assert_close
def assert_monotone(
fn: Callable[[Tensor], Tensor],
x1: Tensor,
x2: Tensor,
increasing: bool = False,
allow_equal: bool = False,
) -> None:
"""Assert ``fn`` is monotone function for :math:`x_1 > x_2`:
.. math::
f(x_1) > f(x_2)
Args:
fn (callable[[torch.Tensor], torch.Tensor]): Function to test.
x1 (torch.Tensor): The first tensor.
x2 (torch.Tensor): The second tensor.
increasing (bool, default=True): Whether to test increasing monotonicity.
Only False is supported.
allow_equal (bool, default=False): Whether to allow :math:`f(x_1) = f(x_2)`.
Only False is supported.
Returns:
None
Raises:
AssertionError: If fn is not monotone.
"""
assert not increasing, "not supported"
assert not allow_equal, "not supported"
assert (x1 > x2).all(), "x1 > x2 must be satisfied"
assert (fn(x1) < fn(x2)).all()
def assert_convex(
fn: Callable[[Tensor], Tensor], x1: Tensor, x2: Tensor, alpha: float
) -> None:
"""Assert convexity.
.. math::
f(\\alpha * x_1 + (1 - \\alpha) * x_2) \\leq
\\alpha * f(x_1) + (1 - \\alpha) * f(x_2)
Args:
fn (callable[[torch.Tensor], torch.Tensor]): Function to test.
It should return a tensor with a single element.
x1 (torch.Tensor): The first tensor.
x2 (torch.Tensor): The second tensor.
alpha (float): The parameter alpha.
"""
y = fn(alpha * x1 + (1 - alpha) * x2)
y1 = fn(x1)
y2 = fn(x2)
assert y <= alpha * y1 + (1 - alpha) * y2
def assert_cash_invariant(
fn: Callable[[Tensor], Tensor], x: Tensor, c: float, **kwargs: Any
) -> None:
"""Assert cash invariance.
.. math::
f(x + c) = f(x) - c
Args:
fn (callable): Function to test cash invariance.
x (torch.Tensor): The input tensor.
c (float): The parameter c.
"""
assert_close(fn(x + c), fn(x) - c, **kwargs)
def assert_cash_equivalent(
fn: Callable[[Tensor], Tensor], x: Tensor, c: float, **kwargs: Any
) -> None:
"""Assert ``c`` is the cash equivalent of ``x``.
``fn(x) = fn(torch.full_like(x, c))``
Args:
fn (callable): Function to test cash equivalent.
x (torch.Tensor): The input tensor.
c (float): The parameter c.
**kwargs: Keyword arguments to pass to ``assert_close``.
"""
assert_close(fn(x), fn(torch.full_like(x, c)), **kwargs)
| [
"torch.full_like"
] | 1.9.0 | YieldLabs/pfhedge | a5ba9d054a8418cb8b27bb67d81a8fc8fb83ef57 |
1.8 | import sys
import time
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.active_tamer.policies import SACPolicy
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.human_feedback import HumanFeedback
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.online_learning_interface import FeedbackInterface
from stable_baselines3.common.type_aliases import (
GymEnv,
MaybeCallback,
RolloutReturn,
Schedule,
TrainFreq,
)
from stable_baselines3.common.utils import polyak_update, should_collect_more_steps
from stable_baselines3.common.vec_env import VecEnv
class ProbeTamerOptim(OffPolicyAlgorithm):
"""
TAMER + Soft Actor-Critic (SAC): Probe the user everytime abstract state changes for feedback.
Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor,
This implementation borrows code from original implementation (https://github.com/haarnoja/sac)
from OpenAI Spinning Up (https://github.com/openai/spinningup), from the softlearning repo
(https://github.com/rail-berkeley/softlearning/)
and from Stable Baselines (https://github.com/hill-a/stable-baselines)
Paper: https://arxiv.org/abs/1801.01290
Introduction to SAC: https://spinningup.openai.com/en/latest/algorithms/sac.html
Note: we use double q target and not value target as discussed
in https://github.com/hill-a/stable-baselines/issues/270
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param ent_coef: Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off.
Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value)
:param target_update_interval: update the target network every ``target_network_update_freq``
gradient steps.
:param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``)
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[SACPolicy]],
env: Union[GymEnv, str],
abstract_state,
trained_model = None,
learning_rate: Union[float, Schedule] = 3e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 5,
batch_size: int = 2,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 1,
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
ent_coef: Union[str, float] = "auto",
target_update_interval: int = 1,
target_entropy: Union[str, float] = "auto",
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
save_every: int = 2500,
_init_setup_model: bool = True,
model_name: str = "ProbeTamerOptim",
render: bool = False,
):
super(ProbeTamerOptim, self).__init__(
policy,
env,
SACPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
use_sde_at_warmup=use_sde_at_warmup,
optimize_memory_usage=optimize_memory_usage,
save_every=save_every,
supported_action_spaces=(gym.spaces.Box),
model_name=model_name,
render=render,
)
self.target_entropy = target_entropy
self.log_ent_coef = None # type: Optional[th.Tensor]
# Entropy coefficient / Entropy temperature
# Inverse of the reward scale
self.ent_coef = ent_coef
self.target_update_interval = target_update_interval
self.ent_coef_optimizer = None
self.abstract_state = abstract_state
self.curr_abstract_state = 0
self.trained_model = trained_model
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(ProbeTamerOptim, self)._setup_model()
self._create_aliases()
# Target entropy is used when learning the entropy coefficient
if self.target_entropy == "auto":
# automatically set target entropy if needed
self.target_entropy = -np.prod(self.env.action_space.shape).astype(
np.float32
)
else:
# Force conversion
# this will also throw an error for unexpected string
self.target_entropy = float(self.target_entropy)
# The entropy coefficient or entropy can be learned automatically
# see Automating Entropy Adjustment for Maximum Entropy RL section
# of https://arxiv.org/abs/1812.05905
if isinstance(self.ent_coef, str) and self.ent_coef.startswith("auto"):
# Default initial value of ent_coef when learned
init_value = 1.0
if "_" in self.ent_coef:
init_value = float(self.ent_coef.split("_")[1])
assert (
init_value > 0.0
), "The initial value of ent_coef must be greater than 0"
# Note: we optimize the log of the entropy coeff which is slightly different from the paper
# as discussed in https://github.com/rail-berkeley/softlearning/issues/37
self.log_ent_coef = th.log(
th.ones(1, device=self.device) * init_value
).requires_grad_(True)
self.ent_coef_optimizer = th.optim.Adam(
[self.log_ent_coef], lr=self.lr_schedule(1)
)
else:
# Force conversion to float
# this will throw an error if a malformed string (different from 'auto')
# is passed
self.ent_coef_tensor = th.tensor(float(self.ent_coef)).to(self.device)
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(
self,
gradient_steps: int,
human_feedback_gui: HumanFeedback = None,
batch_size: int = 64,
) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizers learning rate
optimizers = [self.actor.optimizer, self.critic.optimizer]
if self.ent_coef_optimizer is not None:
optimizers += [self.ent_coef_optimizer]
# Update learning rate according to lr schedule
self._update_learning_rate(optimizers)
ent_coef_losses, ent_coefs = [], []
actor_losses, critic_losses = [], []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(
batch_size, env=self._vec_normalize_env
)
# We need to sample because `log_std` may have changed between two gradient steps
if self.use_sde:
self.actor.reset_noise()
# Action by the current actor for the sampled state
actions_pi, log_prob = self.actor.action_log_prob(replay_data.observations)
log_prob = log_prob.reshape(-1, 1)
ent_coef_loss = None
if self.ent_coef_optimizer is not None:
# Important: detach the variable from the graph
# so we don't change it with other losses
# see https://github.com/rail-berkeley/softlearning/issues/60
ent_coef = th.exp(self.log_ent_coef.detach())
ent_coef_loss = -(
self.log_ent_coef * (log_prob + self.target_entropy).detach()
).mean()
ent_coef_losses.append(ent_coef_loss.item())
else:
ent_coef = self.ent_coef_tensor
ent_coefs.append(ent_coef.item())
# Optimize entropy coefficient, also called
# entropy temperature or alpha in the paper
if ent_coef_loss is not None:
self.ent_coef_optimizer.zero_grad()
ent_coef_loss.backward()
self.ent_coef_optimizer.step()
with th.no_grad():
# Select action according to policy
next_actions, next_log_prob = self.actor.action_log_prob(
replay_data.next_observations
)
# Compute the next Q values: min over all critics targets
next_q_values = th.cat(
self.critic_target(replay_data.next_observations, next_actions),
dim=1,
)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
# add entropy term
next_q_values = next_q_values - ent_coef * next_log_prob.reshape(-1, 1)
# td error + entropy term
target_q_values = (
replay_data.rewards
+ (1 - replay_data.dones) * self.gamma * next_q_values
)
# Get current Q-values estimates for each critic network
# using action from the replay buffer
current_q_values = self.critic(
replay_data.observations, replay_data.actions
)
# Compute critic loss
critic_loss = 0.5 * sum(
[
F.mse_loss(current_q, target_q_values)
for current_q in current_q_values
]
)
critic_losses.append(critic_loss.item())
# Optimize the critic
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Compute actor loss
# Alternative: actor_loss = th.mean(log_prob - qf1_pi)
# Mean over all critic networks
q_values_pi = th.cat(
self.critic.forward(replay_data.observations, actions_pi), dim=1
)
min_qf_pi, _ = th.min(q_values_pi, dim=1, keepdim=True)
actor_loss = (ent_coef * log_prob - min_qf_pi).mean()
actor_losses.append(actor_loss.item())
if human_feedback_gui:
human_feedback_gui.updateLoss(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
# Update target networks
if gradient_step % self.target_update_interval == 0:
polyak_update(
self.critic.parameters(), self.critic_target.parameters(), self.tau
)
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/ent_coef", np.mean(ent_coefs))
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
if len(ent_coef_losses) > 0:
self.logger.record("train/ent_coef_loss", np.mean(ent_coef_losses))
def learn(
self,
total_timesteps: int,
human_feedback_gui: FeedbackInterface,
human_feedback: HumanFeedback,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "ProbeTamerOptim",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(ProbeTamerOptim, self).learn(
total_timesteps=total_timesteps,
human_feedback_gui=human_feedback_gui,
human_feedback=human_feedback,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(ProbeTamerOptim, self)._excluded_save_params() + [
"actor",
"critic",
"critic_target",
]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
if self.ent_coef_optimizer is not None:
saved_pytorch_variables = ["log_ent_coef"]
state_dicts.append("ent_coef_optimizer")
else:
saved_pytorch_variables = ["ent_coef_tensor"]
return state_dicts, saved_pytorch_variables
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
train_freq: TrainFreq,
replay_buffer: ReplayBuffer,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
log_interval: Optional[int] = None,
human_feedback: HumanFeedback = None,
human_feedback_gui: FeedbackInterface = None,
) -> RolloutReturn:
"""
Collect experiences and store them into a ``ReplayBuffer``.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param train_freq: How much experience to collect
by doing rollouts of current policy.
Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``
or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``
with ``<n>`` being an integer greater than 0.
:param action_noise: Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: Number of steps before learning for the warm-up phase.
:param replay_buffer:
:param log_interval: Log data every ``log_interval`` episodes
:return:
"""
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
episode_rewards, total_timesteps = [], []
num_collected_steps, num_collected_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert env.num_envs == 1, "ProbeTamerOptim only support single environment"
assert train_freq.frequency > 0, "Should at least collect one step or episode."
if self.use_sde:
self.actor.reset_noise()
callback.on_rollout_start()
continue_training = True
while should_collect_more_steps(
train_freq, num_collected_steps, num_collected_episodes
):
done = False
episode_reward, episode_timesteps = 0.0, 0
while not done:
if (
self.use_sde
and self.sde_sample_freq > 0
and num_collected_steps % self.sde_sample_freq == 0
):
# Sample a new noise matrix
self.actor.reset_noise()
# Select action randomly or according to policy
action, buffer_action = self._sample_action(
learning_starts, action_noise
)
# Rescale and perform action
if self.render:
env.render()
new_obs, reward, done, infos = env.step(action)
next_abstract_state = self.abstract_state(new_obs)
simulated_human_reward = 0
human_feedback_received = False
if self.curr_abstract_state != next_abstract_state:
self.curr_abstract_state = next_abstract_state
critic_rewards = self.trained_model.critic.forward(
th.from_numpy(self._last_obs).to(self.device),
th.from_numpy(action).to(self.device),
)
simulated_human_reward, _ = th.min(
th.cat(critic_rewards, dim=1), dim=1, keepdim=True
)
simulated_human_reward = simulated_human_reward.cpu()[0][0]
human_feedback_received = True
self.num_timesteps += 1
episode_timesteps += 1
num_collected_steps += 1
# Give access to local variables
callback.update_locals(locals())
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(
0.0,
num_collected_steps,
num_collected_episodes,
continue_training=False,
)
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, done)
if human_feedback_received:
self.apply_uniform_credit_assignment(
replay_buffer, float(simulated_human_reward), 40, 0
)
# reward[0] += simulated_human_reward
episode_reward += reward[0]
# Store data in replay buffer (normalized action and unnormalized observation)
self._store_transition(
replay_buffer, buffer_action, new_obs, reward, done, infos
)
if human_feedback_gui:
human_feedback_gui.updateReward(episode_reward)
self._update_current_progress_remaining(
self.num_timesteps, self._total_timesteps
)
# For DQN, check if the target network should be updated
# and update the exploration schedule
# For SAC/TD3, the update is done as the same time as the gradient update
# see https://github.com/hill-a/stable-baselines/issues/900
self._on_step()
if not should_collect_more_steps(
train_freq, num_collected_steps, num_collected_episodes
):
break
if done:
num_collected_episodes += 1
self._episode_num += 1
episode_rewards.append(episode_reward)
total_timesteps.append(episode_timesteps)
if action_noise is not None:
action_noise.reset()
# Log training infos
if log_interval is not None and self._episode_num % log_interval == 0:
self._dump_logs()
mean_reward = np.mean(episode_rewards) if num_collected_episodes > 0 else 0.0
callback.on_rollout_end()
return RolloutReturn(
mean_reward, num_collected_steps, num_collected_episodes, continue_training
)
| [
"torch.cat",
"torch.min",
"torch.no_grad",
"torch.ones",
"torch.nn.functional.mse_loss",
"torch.from_numpy"
] | 1.8.1 | corgiTrax/stable-baselines3 | 95dc5e30ab6a21225da4b718953e83870e4f146b |
1.1 | import torch
class FGM(object):
"""
refer to the paper: FGM(Fast Gradient Method)
Adversarial training methods for semi-supervised text classification
"""
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1e-6, emd_name="embedding"):
for name, param in self.model.named_parameters():
if param.requires_grad and emd_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emd_name="embedding"):
for name, param in self.model.named_parameters():
if param.requires_grad and emd_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD(object):
"""
refer to the paper: PGD(Projected Gradient Descent)
Towards Deep Learning Models Resistant to Adversarial Attacks
"""
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1., alpha=0.3, emb_name="embedding", is_first_attack=False):
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name="embedding"):
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| [
"torch.norm",
"torch.isnan"
] | 1.1 | zhengmidon/jingju_baseline | 4c6ef80ac14b4640efb1f81cde38df2ac35eacd2 |
1.1 | import re
import librosa
import numpy as np
import torch
from scipy.interpolate import interp1d
from sklearn.preprocessing import normalize
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
skeleton_line_pairs = [(0, 1, 'b'), (1, 2, 'darkred'), (2, 3, 'r'), (3, 4, 'orange'), (1, 5, 'darkgreen'),
(5, 6, 'limegreen'), (6, 7, 'darkseagreen')]
dir_vec_pairs = [(0, 1, 0.26), (1, 2, 0.18), (2, 3, 0.14), (1, 4, 0.22), (4, 5, 0.36),
(5, 6, 0.33), (1, 7, 0.22), (7, 8, 0.36), (8, 9, 0.33)] # adjacency and bone length
def normalize_string(s):
""" lowercase, trim, and remove non-letter characters """
s = s.lower().strip()
s = re.sub(r"([,.!?])", r" \1 ", s) # isolate some marks
s = re.sub(r"(['])", r"", s) # remove apostrophe
s = re.sub(r"[^a-zA-Z,.!?]+", r" ", s) # replace other characters with whitespace
s = re.sub(r"\s+", r" ", s).strip()
return s
def remove_tags_marks(text):
reg_expr = re.compile('<.*?>|[.,:;!?]+')
clean_text = re.sub(reg_expr, '', text)
return clean_text
def extract_melspectrogram(y, sr=16000):
melspec = librosa.feature.melspectrogram(y=y, sr=sr, n_fft=1024, hop_length=512, power=2)
log_melspec = librosa.power_to_db(melspec, ref=np.max) # mels x time
log_melspec = log_melspec.astype('float16')
return log_melspec
def calc_spectrogram_length_from_motion_length(n_frames, fps):
ret = (n_frames / fps * 16000 - 1024) / 512 + 1
return int(round(ret))
def resample_pose_seq(poses, duration_in_sec, fps):
n = len(poses)
x = np.arange(0, n)
y = poses
f = interp1d(x, y, axis=0, kind='linear', fill_value='extrapolate')
expected_n = duration_in_sec * fps
x_new = np.arange(0, n, n / expected_n)
interpolated_y = f(x_new)
if hasattr(poses, 'dtype'):
interpolated_y = interpolated_y.astype(poses.dtype)
return interpolated_y
def time_stretch_for_words(words, start_time, speech_speed_rate):
for i in range(len(words)):
if words[i][1] > start_time:
words[i][1] = start_time + (words[i][1] - start_time) / speech_speed_rate
words[i][2] = start_time + (words[i][2] - start_time) / speech_speed_rate
return words
def make_audio_fixed_length(audio, expected_audio_length):
n_padding = expected_audio_length - len(audio)
if n_padding > 0:
audio = np.pad(audio, (0, n_padding), mode='symmetric')
else:
audio = audio[0:expected_audio_length]
return audio
def convert_dir_vec_to_pose(vec):
vec = np.array(vec)
if vec.shape[-1] != 3:
vec = vec.reshape(vec.shape[:-1] + (-1, 3))
if len(vec.shape) == 2:
joint_pos = np.zeros((10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[pair[1]] = joint_pos[pair[0]] + pair[2] * vec[j]
elif len(vec.shape) == 3:
joint_pos = np.zeros((vec.shape[0], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, pair[1]] = joint_pos[:, pair[0]] + pair[2] * vec[:, j]
elif len(vec.shape) == 4: # (batch, seq, 9, 3)
joint_pos = np.zeros((vec.shape[0], vec.shape[1], 10, 3))
for j, pair in enumerate(dir_vec_pairs):
joint_pos[:, :, pair[1]] = joint_pos[:, :, pair[0]] + pair[2] * vec[:, :, j]
else:
assert False
return joint_pos
def convert_pose_seq_to_dir_vec(pose):
if pose.shape[-1] != 3:
pose = pose.reshape(pose.shape[:-1] + (-1, 3))
if len(pose.shape) == 3:
dir_vec = np.zeros((pose.shape[0], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, i] = pose[:, pair[1]] - pose[:, pair[0]]
dir_vec[:, i, :] = normalize(dir_vec[:, i, :], axis=1) # to unit length
elif len(pose.shape) == 4: # (batch, seq, ...)
dir_vec = np.zeros((pose.shape[0], pose.shape[1], len(dir_vec_pairs), 3))
for i, pair in enumerate(dir_vec_pairs):
dir_vec[:, :, i] = pose[:, :, pair[1]] - pose[:, :, pair[0]]
for j in range(dir_vec.shape[0]): # batch
for i in range(len(dir_vec_pairs)):
dir_vec[j, :, i, :] = normalize(dir_vec[j, :, i, :], axis=1) # to unit length
else:
assert False
return dir_vec
| [
"torch.cuda.is_available"
] | 1.1.0 | er1ca/Gesture-Generation-from-Trimodal-Context | 6d988a7211a4d8294e1ef4b45c45ee25d12455d2 |
1.1 | import logging
import torch
import torch.nn.functional as F
loss_i = 0
def custom_loss(output, target, args, epoch):
n_element = output.numel()
# mae
mse_loss = F.mse_loss(output, target)
mse_loss *= args.loss_regression_weight
# continuous motion
diff = [abs(output[:, n, :] - output[:, n-1, :]) for n in range(1, output.shape[1])]
cont_loss = torch.sum(torch.stack(diff)) / n_element
cont_loss *= args.loss_kld_weight
# motion variance
norm = torch.norm(output, 2, 1) # output shape (batch, seq, dim)
var_loss = -torch.sum(norm) / n_element
var_loss *= args.loss_reg_weight
loss = mse_loss + cont_loss + var_loss
# debugging code
global loss_i
if loss_i == 1000:
logging.debug('(custom loss) mse %.5f, cont %.5f, var %.5f'
% (mse_loss.item(), cont_loss.item(), var_loss.item()))
loss_i = 0
loss_i += 1
return loss
def train_iter_seq2seq(args, epoch, in_text, in_lengths, target_poses, net, optim):
# zero gradients
optim.zero_grad()
# generation
outputs = net(in_text, in_lengths, target_poses, None)
# loss
loss = custom_loss(outputs, target_poses, args, epoch)
loss.backward()
# optimize
torch.nn.utils.clip_grad_norm_(net.parameters(), 5)
optim.step()
return {'loss': loss.item()}
| [
"torch.nn.functional.mse_loss",
"torch.norm",
"torch.stack",
"torch.sum"
] | 1.1.0 | er1ca/Gesture-Generation-from-Trimodal-Context | 6d988a7211a4d8294e1ef4b45c45ee25d12455d2 |
1.3 | import inspect
import logging
import warnings
from typing import Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import pandas as pd
import torch
from scvi._compat import Literal
logger = logging.getLogger(__name__)
Number = Union[int, float]
class DifferentialComputation:
"""
Unified class for differential computation.
This class takes a function from a model like `SCVI` or `TOTALVI` and takes outputs
from this function with respect to the adata input and computed Bayes factors as
described in [Lopez18]_, [Xu20]_, or [Boyeau19]_.
Parameters
----------
model_fn
Function in model API to get values from.
adata
AnnData setup with scvi
"""
def __init__(self, model_fn, adata):
self.adata = adata
self.model_fn = model_fn
def get_bayes_factors(
self,
idx1: Union[List[bool], np.ndarray],
idx2: Union[List[bool], np.ndarray],
mode: Literal["vanilla", "change"] = "vanilla",
batchid1: Optional[Sequence[Union[Number, str]]] = None,
batchid2: Optional[Sequence[Union[Number, str]]] = None,
use_observed_batches: Optional[bool] = False,
n_samples: int = 5000,
use_permutation: bool = False,
m_permutation: int = 10000,
change_fn: Optional[Union[str, Callable]] = None,
m1_domain_fn: Optional[Callable] = None,
delta: Optional[float] = 0.5,
cred_interval_lvls: Optional[Union[List[float], np.ndarray]] = None,
) -> Dict[str, np.ndarray]:
r"""
A unified method for differential expression inference.
Two modes coexist:
- the `"vanilla"` mode follows protocol described in [Lopez18]_ and [Xu20]_
In this case, we perform hypothesis testing based on the hypotheses
.. math::
M_1: h_1 > h_2 ~\text{and}~ M_2: h_1 \leq h_2.
DE can then be based on the study of the Bayes factors
.. math::
\log p(M_1 | x_1, x_2) / p(M_2 | x_1, x_2).
- the `"change"` mode (described in [Boyeau19]_)
This mode consists of estimating an effect size random variable (e.g., log fold-change) and
performing Bayesian hypothesis testing on this variable.
The `change_fn` function computes the effect size variable :math:`r` based on two inputs
corresponding to the posterior quantities (e.g., normalized expression) in both populations.
Hypotheses:
.. math::
M_1: r \in R_1 ~\text{(effect size r in region inducing differential expression)}
.. math::
M_2: r \notin R_1 ~\text{(no differential expression)}
To characterize the region :math:`R_1`, which induces DE, the user has two choices.
1. A common case is when the region :math:`[-\delta, \delta]` does not induce differential
expression. If the user specifies a threshold delta, we suppose that :math:`R_1 = \mathbb{R} \setminus [-\delta, \delta]`
2. Specify an specific indicator function:
.. math::
f: \mathbb{R} \mapsto \{0, 1\} ~\text{s.t.}~ r \in R_1 ~\text{iff.}~ f(r) = 1.
Decision-making can then be based on the estimates of
.. math::
p(M_1 \mid x_1, x_2).
Both modes require to sample the posterior distributions.
To that purpose, we sample the posterior in the following way:
1. The posterior is sampled `n_samples` times for each subpopulation.
2. For computational efficiency (posterior sampling is quite expensive), instead of
comparing the obtained samples element-wise, we can permute posterior samples.
Remember that computing the Bayes Factor requires sampling :math:`q(z_A \mid x_A)` and :math:`q(z_B \mid x_B)`.
Currently, the code covers several batch handling configurations:
1. If ``use_observed_batches=True``, then batch are considered as observations
and cells' normalized means are conditioned on real batch observations.
2. If case (cell group 1) and control (cell group 2) are conditioned on the same
batch ids. This requires ``set(batchid1) == set(batchid2)`` or ``batchid1 == batchid2 === None``.
3. If case and control are conditioned on different batch ids that do not intersect
i.e., ``set(batchid1) != set(batchid2)`` and ``len(set(batchid1).intersection(set(batchid2))) == 0``.
This function does not cover other cases yet and will warn users in such cases.
Parameters
----------
mode
one of ["vanilla", "change"]
idx1
bool array masking subpopulation cells 1. Should be True where cell is
from associated population
idx2
bool array masking subpopulation cells 2. Should be True where cell is
from associated population
batchid1
List of batch ids for which you want to perform DE Analysis for
subpopulation 1. By default, all ids are taken into account
batchid2
List of batch ids for which you want to perform DE Analysis for
subpopulation 2. By default, all ids are taken into account
use_observed_batches
Whether posterior values are conditioned on observed
batches
n_samples
Number of posterior samples
use_permutation
Activates step 2 described above.
Simply formulated, pairs obtained from posterior sampling
will be randomly permuted so that the number of pairs used
to compute Bayes Factors becomes `m_permutation`.
m_permutation
Number of times we will "mix" posterior samples in step 2.
Only makes sense when `use_permutation=True`
change_fn
function computing effect size based on both posterior values
m1_domain_fn
custom indicator function of effect size regions
inducing differential expression
delta
specific case of region inducing differential expression.
In this case, we suppose that :math:`R \setminus [-\delta, \delta]` does not induce differential expression
(LFC case)
cred_interval_lvls
List of credible interval levels to compute for the posterior
LFC distribution
Returns
-------
Differential expression properties
"""
# if not np.array_equal(self.indices, np.arange(len(self.dataset))):
# logger.warning(
# "Differential expression requires a Posterior object created with all indices."
# )
eps = 1e-8 # used for numerical stability
# Normalized means sampling for both populations
scales_batches_1 = self.scale_sampler(
selection=idx1,
batchid=batchid1,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
scales_batches_2 = self.scale_sampler(
selection=idx2,
batchid=batchid2,
use_observed_batches=use_observed_batches,
n_samples=n_samples,
)
px_scale_mean1 = scales_batches_1["scale"].mean(axis=0)
px_scale_mean2 = scales_batches_2["scale"].mean(axis=0)
# Sampling pairs
# The objective of code section below is to ensure than the samples of normalized
# means we consider are conditioned on the same batch id
batchid1_vals = np.unique(scales_batches_1["batch"])
batchid2_vals = np.unique(scales_batches_2["batch"])
create_pairs_from_same_batches = (
set(batchid1_vals) == set(batchid2_vals)
) and not use_observed_batches
if create_pairs_from_same_batches:
# First case: same batch normalization in two groups
logger.debug("Same batches in both cell groups")
n_batches = len(set(batchid1_vals))
n_samples_per_batch = (
m_permutation // n_batches if m_permutation is not None else None
)
scales_1 = []
scales_2 = []
for batch_val in set(batchid1_vals):
# Select scale samples that originate from the same batch id
scales_1_batch = scales_batches_1["scale"][
scales_batches_1["batch"] == batch_val
]
scales_2_batch = scales_batches_2["scale"][
scales_batches_2["batch"] == batch_val
]
# Create more pairs
scales_1_local, scales_2_local = pairs_sampler(
scales_1_batch,
scales_2_batch,
use_permutation=use_permutation,
m_permutation=n_samples_per_batch,
)
scales_1.append(scales_1_local)
scales_2.append(scales_2_local)
scales_1 = np.concatenate(scales_1, axis=0)
scales_2 = np.concatenate(scales_2, axis=0)
else:
logger.debug("Ignoring batch conditionings to compare means")
if len(set(batchid1_vals).intersection(set(batchid2_vals))) >= 1:
warnings.warn(
"Batchids of cells groups 1 and 2 are different but have an non-null "
"intersection. Specific handling of such situations is not implemented "
"yet and batch correction is not trustworthy."
)
scales_1, scales_2 = pairs_sampler(
scales_batches_1["scale"],
scales_batches_2["scale"],
use_permutation=use_permutation,
m_permutation=m_permutation,
)
# Core of function: hypotheses testing based on the posterior samples we obtained above
if mode == "vanilla":
logger.debug("Differential expression using vanilla mode")
proba_m1 = np.mean(scales_1 > scales_2, 0)
proba_m2 = 1.0 - proba_m1
res = dict(
proba_m1=proba_m1,
proba_m2=proba_m2,
bayes_factor=np.log(proba_m1 + eps) - np.log(proba_m2 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
)
elif mode == "change":
logger.debug("Differential expression using change mode")
# step 1: Construct the change function
def lfc(x, y):
return np.log2(x) - np.log2(y)
if change_fn == "log-fold" or change_fn is None:
change_fn = lfc
elif not callable(change_fn):
raise ValueError("'change_fn' attribute not understood")
# step2: Construct the DE area function
if m1_domain_fn is None:
delta = delta if delta is not None else 0.5
def m1_domain_fn(samples):
return np.abs(samples) >= delta
change_fn_specs = inspect.getfullargspec(change_fn)
domain_fn_specs = inspect.getfullargspec(m1_domain_fn)
if (len(change_fn_specs.args) != 2) | (len(domain_fn_specs.args) != 1):
raise ValueError(
"change_fn should take exactly two parameters as inputs; m1_domain_fn one parameter."
)
try:
change_distribution = change_fn(scales_1, scales_2)
is_de = m1_domain_fn(change_distribution)
except TypeError:
raise TypeError(
"change_fn or m1_domain_fn have has wrong properties."
"Please ensure that these functions have the right signatures and"
"outputs and that they can process numpy arrays"
)
proba_m1 = np.mean(is_de, 0)
change_distribution_props = describe_continuous_distrib(
samples=change_distribution,
credible_intervals_levels=cred_interval_lvls,
)
change_distribution_props = {
"lfc_" + key: val for (key, val) in change_distribution_props.items()
}
res = dict(
proba_de=proba_m1,
proba_not_de=1.0 - proba_m1,
bayes_factor=np.log(proba_m1 + eps) - np.log(1.0 - proba_m1 + eps),
scale1=px_scale_mean1,
scale2=px_scale_mean2,
**change_distribution_props,
)
else:
raise NotImplementedError("Mode {mode} not recognized".format(mode=mode))
return res
@torch.no_grad()
def scale_sampler(
self,
selection: Union[List[bool], np.ndarray],
n_samples: Optional[int] = 5000,
n_samples_per_cell: Optional[int] = None,
batchid: Optional[Sequence[Union[Number, str]]] = None,
use_observed_batches: Optional[bool] = False,
give_mean: Optional[bool] = False,
) -> dict:
"""
Samples the posterior scale using the variational posterior distribution.
Parameters
----------
selection
Mask or list of cell ids to select
n_samples
Number of samples in total per batch (fill either `n_samples_total`
or `n_samples_per_cell`)
n_samples_per_cell
Number of time we sample from each observation per batch
(fill either `n_samples_total` or `n_samples_per_cell`)
batchid
Biological batch for which to sample from.
Default (None) sample from all batches
use_observed_batches
Whether normalized means are conditioned on observed
batches or if observed batches are to be used
give_mean
Return mean of values
Returns
-------
type
Dictionary containing:
`scale`
Posterior aggregated scale samples of shape (n_samples, n_vars)
where n_samples correspond to either:
- n_bio_batches * n_cells * n_samples_per_cell
or
- n_samples_total
`batch`
associated batch ids
"""
# Get overall number of desired samples and desired batches
if batchid is None and not use_observed_batches:
categorical_mappings = self.adata.uns["_scvi"]["categorical_mappings"]
batchid = categorical_mappings["_scvi_batch"]["mapping"]
if use_observed_batches:
if batchid is not None:
raise ValueError("Unconsistent batch policy")
batchid = [None]
if n_samples is None and n_samples_per_cell is None:
n_samples = 5000
elif n_samples_per_cell is not None and n_samples is None:
n_samples = n_samples_per_cell * len(selection)
if (n_samples_per_cell is not None) and (n_samples is not None):
warnings.warn(
"n_samples and n_samples_per_cell were provided. Ignoring n_samples_per_cell"
)
n_samples = int(n_samples / len(batchid))
if n_samples == 0:
warnings.warn(
"very small sample size, please consider increasing `n_samples`"
)
n_samples = 2
# Selection of desired cells for sampling
if selection is None:
raise ValueError("selections should be a list of cell subsets indices")
selection = np.asarray(selection)
if selection.dtype is np.dtype("bool"):
if len(selection) < self.adata.shape[0]:
raise ValueError("Mask must be same length as adata.")
selection = np.asarray(np.where(selection)[0].ravel())
# Sampling loop
px_scales = []
batch_ids = []
for batch_idx in batchid:
idx = np.random.choice(np.arange(self.adata.shape[0])[selection], n_samples)
px_scales.append(
self.model_fn(self.adata, indices=idx, transform_batch=batch_idx)
)
batch_idx = batch_idx if batch_idx is not None else np.nan
batch_ids.append([batch_idx] * px_scales[-1].shape[0])
px_scales = np.concatenate(px_scales)
batch_ids = np.concatenate(batch_ids).reshape(-1)
if px_scales.shape[0] != batch_ids.shape[0]:
raise ValueError("sampled scales and batches have inconsistent shapes")
if give_mean:
px_scales = px_scales.mean(0)
return dict(scale=px_scales, batch=batch_ids)
def pairs_sampler(
arr1: Union[List[float], np.ndarray, torch.Tensor],
arr2: Union[List[float], np.ndarray, torch.Tensor],
use_permutation: bool = True,
m_permutation: int = None,
sanity_check_perm: bool = False,
weights1: Union[List[float], np.ndarray, torch.Tensor] = None,
weights2: Union[List[float], np.ndarray, torch.Tensor] = None,
) -> tuple:
"""
Creates more pairs.
In a context where we want to estimate a double sum, virtually increases the number
of samples by considering more pairs so as to better estimate the double summation operation
Parameters
----------
arr1
samples from population 1
arr2
samples from population 2
use_permutation
Whether to mix samples from both populations
m_permutation
param sanity_check_perm: If True, resulting mixed arrays arr1 and arr2 are mixed together
In most cases, this parameter should remain False
sanity_check_perm
TODO
weights1
probabilities associated to array 1 for random sampling
weights2
probabilities associated to array 2 for random sampling
Returns
-------
type
new_arr1, new_arr2
"""
if use_permutation is True:
# prepare the pairs for sampling
n_arr1 = arr1.shape[0]
n_arr2 = arr2.shape[0]
if not sanity_check_perm:
# case1: no permutation, sample from A and then from B
u, v = (
np.random.choice(n_arr1, size=m_permutation, p=weights1),
np.random.choice(n_arr2, size=m_permutation, p=weights2),
)
first_set = arr1[u]
second_set = arr2[v]
else:
# case2: permutation, sample from A+B twice (sanity check)
u, v = (
np.random.choice(n_arr1 + n_arr2, size=m_permutation),
np.random.choice(n_arr1 + n_arr2, size=m_permutation),
)
concat_arr = np.concatenate((arr1, arr2))
first_set = concat_arr[u]
second_set = concat_arr[v]
else:
first_set = arr1
second_set = arr2
return first_set, second_set
def credible_intervals(
ary: np.ndarray, confidence_level: Union[float, List[float], np.ndarray] = 0.94
) -> np.ndarray:
"""
Calculate highest posterior density (HPD) of array for given credible_interval.
Taken from the arviz package
The HPD is the minimum width Bayesian credible interval (BCI). This implementation works only
for unimodal distributions.
Parameters
----------
ary
posterior samples
confidence_level
confidence level
Returns
-------
type
intervals minima, intervals maxima
"""
if ary.ndim > 1:
hpd = np.array(
[
credible_intervals(row, confidence_level=confidence_level)
for row in ary.T
]
)
return hpd
# Make a copy of trace
ary = ary.copy()
n = len(ary)
ary = np.sort(ary)
interval_idx_inc = int(np.floor(confidence_level * n))
n_intervals = n - interval_idx_inc
interval_width = ary[interval_idx_inc:] - ary[:n_intervals]
if len(interval_width) == 0:
raise ValueError(
"Too few elements for interval calculation. "
"Check that credible_interval meets condition 0 =< credible_interval < 1"
)
min_idx = np.argmin(interval_width)
hdi_min = ary[min_idx]
hdi_max = ary[min_idx + interval_idx_inc]
return np.array([hdi_min, hdi_max])
def describe_continuous_distrib(
samples: Union[np.ndarray, torch.Tensor],
credible_intervals_levels: Optional[Union[List[float], np.ndarray]] = None,
) -> dict:
"""
Computes properties of distribution based on its samples.
Parameters
----------
samples
samples of shape (n_samples, n_features)
credible_intervals_levels
Confidence in (0, 1)
of credible intervals to be computed
Returns
-------
type
properties of distribution
"""
dist_props = dict(
mean=samples.mean(0),
median=np.median(samples, 0),
std=samples.std(0),
min=samples.min(0),
max=samples.max(0),
)
credible_intervals_levels = (
[] if credible_intervals_levels is None else credible_intervals_levels
)
for confidence in credible_intervals_levels:
intervals = credible_intervals(samples, confidence_level=confidence)
interval_min, interval_max = intervals[:, 0], intervals[:, 1]
conf_str = str(confidence)[:5]
dist_props["confidence_interval_{}_min".format(conf_str)] = interval_min
dist_props["confidence_interval_{}_max".format(conf_str)] = interval_max
return dist_props
def save_cluster_xlsx(
filepath: str, de_results: List[pd.DataFrame], cluster_names: List
):
"""
Saves multi-clusters DE in an xlsx sheet.
Parameters
----------
filepath
xslx save path
de_results
list of pandas Dataframes for each cluster
cluster_names
list of cluster names
"""
writer = pd.ExcelWriter(filepath, engine="xlsxwriter")
for i, x in enumerate(cluster_names):
de_results[i].to_excel(writer, sheet_name=str(x))
writer.close()
| [
"torch.no_grad"
] | 1.3 | gitter-badger/scvi-tools | 8948405f6b393baede73ccd6a0a5ac0824e16c0d |
1.1 | import torch
import copy
from utils.misc import deprecated
def unprocessed_collate(batch):
"""
A dummy function to prevent Pytorch's data loader from converting and stacking batch data.
:param batch:
:return:
"""
return batch # List of data tuples (sequence, timeline, label)
@deprecated
def custom_collate(batch):
"""This helper function only works for batch training many-to-one RNN."""
data = [item[0] for item in batch]
start = [item[1] for item in batch]
end = [item[2] for item in batch]
target = [item[3] for item in batch]
target = torch.LongTensor(target)
return [data, start, end, target]
@deprecated
def pad_tensor(vec, pad, dim):
"""
Warning: DO NOT use this function to pad sequence, otherwise the model will not learn probably
:param vec: tensor to pad
:param pad: the size to pad to
:param dim: dimension to pad
:return: a new tensor padded to 'pad' in dimension 'dim'
"""
pad_size = list(vec.shape)
pad_size[dim] = pad - vec.size(dim)
return torch.cat([vec.float(), torch.zeros(*pad_size)], dim=dim)
@deprecated
class PadCollate:
"""
A variant of callate_fn that pads according to the longest sequence in
a batch of sequences. Warning: DO NOT use this helper for torch data loader,
or the model will not learn probably.
"""
def __init__(self, dim=0):
"""
:param dim: the dimension to be padded (dimension of time in sequences)
"""
self.dim = dim
def pad_collate(self, batch):
"""
:param batch: list of (tensor, label)
:returns
xs: a tensor of all examples in 'batch' after padding
ys: a LongTensor of all labels in batch
"""
# find longest sequence
max_len = max(map(lambda x: x[0].shape[self.dim], batch))
# pad according to max_len
batch = map(lambda x: (pad_tensor(x[0], pad=max_len, dim=self.dim), x[1]), batch)
temp = copy.deepcopy(batch)
# stack all
xs = torch.stack(list(map(lambda x: x[0], batch)), dim=0)
ys = torch.LongTensor(list(map(lambda x: x[1], temp)))
return xs, ys
def __call__(self, batch):
return self.pad_collate(batch)
def get_gaussian_confidence(t, event_time, sigma):
"""Event can be either start or end of an action. Time is actually frame index for actual sequences."""
return torch.exp(-0.5 * torch.pow((t - event_time) / sigma, 2.))
def get_confidence_matrix(label_tensor: torch.tensor, num_classes: int, sigma: float):
seq_len = len(label_tensor)
confidence_mat = torch.zeros(seq_len, num_classes + 1, 2)
last_label_idx = num_classes # denotes unknown action
start_frames = []
end_frames = []
for frame_idx, label_idx in enumerate(label_tensor, 0):
assert label_idx <= num_classes, 'Unexpected index of action class found.'
if label_idx != last_label_idx and label_idx != num_classes: # transition of action class
if last_label_idx == num_classes:
# start of an action
start_frames.append((label_idx, frame_idx))
elif frame_idx != seq_len - 1:
# end of an action
end_frames.append((label_idx, frame_idx))
last_label_idx = label_idx
elif frame_idx == seq_len - 1 and label_idx != num_classes:
# end of an action is the same as end of the sequence (trimmed action sequence)
end_frames.append((label_idx, frame_idx))
for start_frame in start_frames:
confidence_mat[:, start_frame[0], 0] += get_gaussian_confidence(torch.FloatTensor(range(0, seq_len)),
start_frame[1],
sigma)
for end_frame in end_frames:
confidence_mat[:, end_frame[0], 1] += get_gaussian_confidence(torch.FloatTensor(range(0, seq_len)),
end_frame[1],
sigma)
confidence_mat = torch.clamp(confidence_mat, min=0, max=1)
return confidence_mat
@deprecated
def get_confidence_matrix_old(seq_len: int, timeline: torch.tensor, label_idx: int, num_classes: int, sigma: float):
# This version does not work with multi-class sequence
timeline = timeline.squeeze()
confidence_mat = torch.zeros(seq_len, num_classes + 1, 2)
start = int(timeline[0].long().numpy())
end = int(timeline[-1].long().numpy())
confidence_mat[:, label_idx, :] = timeline.expand(2, seq_len).permute(1, 0) # TODO timeline == seq_len
confidence_mat[:, label_idx, 0] = get_gaussian_confidence(confidence_mat[:, label_idx, 0],
start,
sigma)
confidence_mat[:, label_idx, 1] = get_gaussian_confidence(confidence_mat[:, label_idx, 1],
end,
sigma)
return confidence_mat
def is_model_on_gpu(model: torch.nn.Module):
return next(model.parameters()).is_cuda
| [
"torch.zeros",
"torch.LongTensor",
"torch.clamp",
"torch.pow"
] | 1.1.0 | howieraem/KinectActionDetection | ff64030e9fa2eb3d512b5cc1dae79e6a07ab8e5c |
1.10 | ####################################################################################################################################################
####################################################################################################################################################
"""
Dataloader definitions for all the datasets used in our paper.
The datasets need to be downloaded manually and placed inside a same folder.
Specify your folder location in the following line
# directory containing all the datasets
ROOT_DATA_DIR = Path("")
"""
####################################################################################################################################################
####################################################################################################################################################
import os
import random
import numpy as np
from PIL import Image
from pathlib import Path
import torch
from torch.utils.data import Dataset
import torchvision.transforms.functional as TF
from torchvision.datasets import FashionMNIST as TFashionMNIST
from torchvision.datasets import CIFAR10 as TCIFAR10
from torchvision.datasets import SVHN as TSVHN
from torchvision.datasets import Omniglot as TOmniglot
from torchvision.datasets import Places365 as TPlaces365
from torchvision.datasets import LSUN as TLSUN
from torchvision.datasets import MNIST as TMNIST
import albumentations as album
from collections import defaultdict
####################################################################################################################################################
####################################################################################################################################################
# directory containing all the datasets
ROOT_DATA_DIR = Path("")
####################################################################################################################################################
class BaseDatasetCar(Dataset):
"""
Base class for all dataset classes for the vehicle interior.
"""
def __init__(self, root_dir, car, split, make_scene_impossible, make_instance_impossible, augment=False, nbr_of_samples_per_class=-1):
# path to the main folder
self.root_dir = Path(root_dir)
# which car are we using?
self.car = car
# train or test split
self.split = split
# are we using training data
self.is_train = True if "train" in self.split else False
# normal or impossible reconstruction loss?
self.make_scene_impossible = make_scene_impossible
self.make_instance_impossible = make_instance_impossible
# pre-process the data if necessary
self._pre_process_dataset()
# load the data into the memory
self._get_data()
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
# dict to match the concatenations of the three seat position classes into a single integer
self.label_str_to_int = {'0_0_0': 0, '0_0_3': 1, '0_3_0': 2, '3_0_0': 3, '0_3_3': 4, '3_0_3': 5, '3_3_0': 6, '3_3_3': 7}
# the revers of the above, to transform int labels back into strings
self.int_to_label_str = {v:k for k,v in self.label_str_to_int.items()}
def _get_subset_of_data(self):
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.labels):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# only take the subset of indices based on how many samples per class to keep
self.images = [x for idx, x in enumerate(self.images) if idx in keep_indices]
self.labels = [x for idx, x in enumerate(self.labels) if idx in keep_indices]
def __len__(self):
"""
Return the total number of samples in the dataset.
"""
# number of images to use
return len(self.images)
def _get_data(self):
# get all folders with the sceneries
if self.car.lower() == "all":
self.folders = sorted(list(self.root_dir.glob(f"*/pp_{self.split}_64/*")))
else:
self.folders = sorted(list(self.root_dir.glob(f"{self.car}/pp_{self.split}_64/*")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each folder
for idx, folder in enumerate(self.folders):
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(folder)
# each scene will be an array of images
self.images.append([])
# get all the images for this scene
files = sorted(list(folder.glob("*.png")))
# for each file
for file in files:
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# append the image to the placeholder
self.images[idx].append(img)
# append label to placeholder
self.labels.append(classif_labels)
def _get_classif_label(self, file_path):
# get the filename only of the path
name = file_path.stem
# split at GT
gts = name.split("GT")[-1]
# split the elements at _
# first element is empty string, remove it
clean_gts = gts.split("_")[1:]
# convert the strings to ints
clean_gts = [int(x) for x in clean_gts]
# convert sviro labels to compare with other datasets
for index, value in enumerate(clean_gts):
# everyday objects and child seats to background
if value in [1,2,4,5,6]:
clean_gts[index] = 0
return clean_gts
def _get_classif_str(self, label):
return str(label[0]) + "_" + str(label[1]) + "_" + str(label[2])
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*")
# for each variation
for folder in data_folder_variations:
# for each split
for pre_processed_split in ["pp_train_64", "pp_test_64"]:
# create the path
path_to_preprocessed_split = folder / pre_processed_split
path_to_vanilla_split = folder / pre_processed_split.split("_")[1]
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and split: {pre_processed_split} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob(f"**/*.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the folder for the experiment
save_folder = path_to_preprocessed_split / curr_file.parent.stem
save_folder.mkdir(exist_ok=True)
# save the processed image
img.save(save_folder / curr_file.name)
def _get_positive(self, rand_indices, positive_label, positive_images):
# get all the potential candidates which have the same label
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# if there is no other image with the same label
if not masked:
new_rand_indices = random.sample(range(0,len(positive_images)), 2)
positive_input_image = positive_images[new_rand_indices[0]]
positive_output_image = positive_images[new_rand_indices[1]] if self.make_scene_impossible else positive_images[new_rand_indices[0]]
positive_input_image = TF.to_tensor(positive_input_image)
positive_output_image = TF.to_tensor(positive_output_image)
else:
# choose one index randomly from the masked subset
index = np.random.choice(masked)
positive_input_image = self.images[index][rand_indices[0]]
positive_output_image = self.images[index][rand_indices[1]] if self.make_scene_impossible else self.images[index][rand_indices[0]]
positive_input_image = TF.to_tensor(positive_input_image)
positive_output_image = TF.to_tensor(positive_output_image)
return positive_input_image, positive_output_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
images = self.images[index]
label = self.labels[index]
str_label = self._get_classif_str(label)
# randomly selected
# .) the input images
# .) the output images
rand_indices = random.sample(range(0,len(images)), 2)
# get the image to be used as input
input_image = images[rand_indices[0]]
# get the image to be used for the reconstruction error
output_image = images[rand_indices[1]] if self.make_scene_impossible else images[rand_indices[0]]
# make sure its a tensor
input_image = TF.to_tensor(input_image)
output_image = TF.to_tensor(output_image)
if self.make_instance_impossible:
_, output_image = self._get_positive(rand_indices, label, images)
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: input_image = torch.from_numpy(self.augment(image=np.array(input_image)[0])["image"][None,:])
return {"image":input_image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class SVIRO(BaseDatasetCar):
"""
https://sviro.kl.dfki.de
You only need the grayscale images for the whole scene.
Make sure to have a folder structure as follows:
SVIRO
├── aclass
│ ├── train
│ │ └──── grayscale_wholeImage
│ └── test
│ └──── grayscale_wholeImage
⋮
⋮
⋮
└── zoe
├── train
│ └──── grayscale_wholeImage
└── test
└──── grayscale_wholeImage
"""
def __init__(self, car, which_split, make_instance_impossible, augment):
# path to the main folder
root_dir = ROOT_DATA_DIR / "SVIRO"
# call the init function of the parent class
super().__init__(root_dir=root_dir, car=car, split=which_split, make_scene_impossible=False, make_instance_impossible=make_instance_impossible, augment=augment)
def _get_data(self):
# get all the png files, i.e. experiments
if self.car.lower() == "all":
self.files = sorted(list(self.root_dir.glob(f"*/{self.split}/grayscale_wholeImage_pp_640_64/*.png")))
else:
self.files = sorted(list(self.root_dir.glob(f"{self.car}/{self.split}/grayscale_wholeImage_pp_640_64/*.png")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each file
for file in self.files:
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(file)
# do not get child seats or everyday objects
if 1 in classif_labels or 2 in classif_labels or 4 in classif_labels or 5 in classif_labels or 6 in classif_labels:
continue
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# each scene will be an array of images
# append the image to the placeholder
self.images.append([img])
# append label to placeholder
self.labels.append(classif_labels)
def _get_classif_label(self, file_path):
# get the filename only of the path
name = file_path.stem
# split at GT
gts = name.split("GT")[-1]
# split the elements at _
# first element is empty string, remove it
clean_gts = gts.split("_")[1:]
# convert the strings to ints
clean_gts = [int(x) for x in clean_gts]
return clean_gts
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*/*")
# for each variation
for folder in data_folder_variations:
# create the path
path_to_preprocessed_split = folder / "grayscale_wholeImage_pp_640_64"
path_to_vanilla_split = folder / "grayscale_wholeImage"
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob("*.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the path to the file
save_path = path_to_preprocessed_split / curr_file.name
# save the processed image
img.save(save_path)
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index][0]
input_image = TF.to_tensor(input_image)
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index][0]
label = self.labels[index]
str_label = self._get_classif_str(label)
# transform it for pytorch (normalized and transposed)
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class SVIROUncertainty(BaseDatasetCar):
"""
https://sviro.kl.dfki.de
Make sure to have a folder structure as follows:
SVIRO-Illumination
└── sharan
├── train
├── test-adults
├── test-objects
└── test-adults-and-objects
"""
def __init__(self, car, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = ROOT_DATA_DIR / "SVIRO-Uncertainty"
# call the init function of the parent class
super().__init__(root_dir=root_dir, car=car, split=which_split, make_scene_impossible=False, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
def _get_data(self):
# get all the png files, i.e. experiments
self.files = sorted(list(self.root_dir.glob(f"{self.car}/pp_{self.split}_64/*/ir.png")))
# placeholder for all images and labels
self.images = []
self.labels = []
# for each file
for file in self.files:
# get classification labels for each seat from folder name
classif_labels = self._get_classif_label(file.parent)
# open the image specified by the path
# make sure it is a grayscale image
img = np.array(Image.open(file).convert("L"))
# each scene will be an array of images
# append the image to the placeholder
self.images.append([img])
# append label to placeholder
self.labels.append(classif_labels)
def _pre_process_dataset(self):
# get all the subfolders inside the dataset folder
data_folder_variations = self.root_dir.glob("*")
# for each variation
for folder in data_folder_variations:
# for each split
for pre_processed_split in ["pp_train-adults_64", "pp_train-adults-and-seats_64", "pp_test-adults_64", "pp_test-objects_64", "pp_test-seats_64", "pp_test-adults-and-objects_64", "pp_test-adults-and-seats_64", "pp_test-adults-and-seats-and-objects_64"]:
# create the path
path_to_preprocessed_split = folder / pre_processed_split
path_to_vanilla_split = folder / pre_processed_split.split("_")[1]
# if no pre-processing for these settings exists, then create them
if not path_to_preprocessed_split.exists():
print("-" * 37)
print(f"Pre-process and save data for folder: {folder} and split: {pre_processed_split} and downscale size: 64 ...")
self.pre_process_and_save_data(path_to_preprocessed_split, path_to_vanilla_split)
print("Pre-processing and saving finished.")
print("-" * 37)
def pre_process_and_save_data(self, path_to_preprocessed_split, path_to_vanilla_split):
"""
To speed up training, it is beneficial to do the rudementary pre-processing once and save the data.
"""
# create the folders to save the pre-processed data
path_to_preprocessed_split.mkdir()
# get all the files in all the subfolders
files = list(path_to_vanilla_split.glob(f"**/ir.png")) + list(path_to_vanilla_split.glob(f"**/rgb.png"))
# for each image
for curr_file in files:
# open the image specified by the path
img = Image.open(curr_file).convert("L") if "ir" in curr_file.name else Image.open(curr_file).convert("RGB")
# center crop the image using the smaller size (i.e. width or height)
# to define the new size of the image (basically we remove only either the width or height)
img = TF.center_crop(img, np.min(img.size))
# then resize the image to the one we want to use for training
img = TF.resize(img, 64)
# create the folder for the experiment
save_folder = path_to_preprocessed_split / curr_file.parent.stem
save_folder.mkdir(exist_ok=True)
# save the processed image
img.save(save_folder / curr_file.name)
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index][0]
input_image = TF.to_tensor(input_image)
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index][0]
label = self.labels[index]
str_label = self._get_classif_str(label)
# transform it for pytorch (normalized and transposed)
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt": self.label_str_to_int[str_label]}
####################################################################################################################################################
class Fashion(TFashionMNIST):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def _get_subset_of_data(self):
self.images = self.data
self.labels = self.targets
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.labels):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# only take the subset of indices based on how many samples per class to keep
self.data = [x for idx, x in enumerate(self.images) if idx in keep_indices]
self.targets = [x for idx, x in enumerate(self.labels) if idx in keep_indices]
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.targets)-1)
if int(self.targets[index]) == positive_label:
image = self.data[index]
image = Image.fromarray(image.numpy(), mode='L')
image = TF.resize(image, [64, 64])
image = TF.to_tensor(image)
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.data[index]
label = int(self.targets[index])
# doing this so that it is consistent with all other datasets to return a PIL Image
image = Image.fromarray(image.numpy(), mode='L')
# transform it for pytorch (normalized and transposed)
image = TF.resize(image, [64, 64])
image = TF.to_tensor(image)
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class MNIST(TMNIST):
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/MNIST")
# train or test split, digits or letters
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# dict to transform integers to string labels
self.int_to_label_str = {x:str(x) for x in range(10)}
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.targets[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(self.data[idx].numpy(), mode="L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class GTSRB(Dataset):
string_labels_to_integer_dict = dict()
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# train or test
self.is_train = True if which_split.lower() == "train" else False
if which_split.lower() == "train":
self.folder = "train_png"
elif which_split.lower() == "test":
self.folder = "test_png"
elif which_split.lower() == "ood":
self.folder = "ood_png"
else:
raise ValueError
# path to the main folder
self.root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/GTSRB") / self.folder
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
"""
Return the total number of samples in the dataset.
"""
# number of images to use
return len(self.images)
def _get_subset_of_data(self):
self.all_images = list(self.root_dir.glob("*/*.png"))
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, img in enumerate(self.all_images):
# get the label
label = self._get_label_from_path(img)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.all_images[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.open(self.all_images[idx]).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self._get_label_from_path(self.all_images[idx])
# keep it
self.images.append(current_image)
self.labels.append(current_label)
def _get_label_from_path(self, path):
# get the name from the parent folder
if self.folder == "ood":
if int(path.parent.name) < 10:
return int(path.parent.name)
else:
return int(path.parent.name)-10
else:
return int(path.parent.name)-10
def _get_positive(self, positive_label):
# get all the potential candidates from the real images which have the same label as the synthetic one
masked = [idx for idx, x in enumerate(self.labels) if x==positive_label]
# choose one index randomly from the masked subset
index = np.random.choice(masked)
input_image = self.images[index]
return input_image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = self.labels[index]
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class CIFAR10(TCIFAR10):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/CIFAR10")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, train=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.data[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(self.data[idx]).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class SVHN(TSVHN):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/SVHN")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, split="train" if self.is_train else "test", download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.RandomBrightnessContrast(always_apply=False, p=0.4, brightness_limit=(0.0, 0.33), contrast_limit=(0.0, 0.33), brightness_by_max=False),
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.targets = self.labels
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, label in enumerate(self.targets):
# make sure its a string
label = self._get_classif_str(label)
# increase the counter for this label
counter[label] += 1
# if we are above the theshold for this label
if counter[label] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.data[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
current_image = Image.fromarray(np.transpose(self.data[idx], (1, 2, 0))).convert("L")
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# get label
current_label = self.targets[idx]
# keep it
self.images.append(current_image)
self.labels.append(current_label)
del self.targets
del self.data
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class Omniglot(TOmniglot):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/Omniglot")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, background=self.is_train, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, (_, character_class) in enumerate(self._flat_character_images):
# increase the counter for this label
counter[character_class] += 1
# if we are above the theshold for this label
if counter[character_class] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self._flat_character_images[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
image_name, character_class = self._flat_character_images[idx]
image_path = os.path.join(self.target_folder, self._characters[character_class], image_name)
current_image = Image.open(image_path, mode='r').convert('L')
# transform it for pytorch (normalized and transposed)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(character_class)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class Places365(TPlaces365):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/Places365")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, split="train-standard" if self.is_train else "val", small=True, download=False)
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx, (_, target) in enumerate(self.imgs):
# increase the counter for this label
counter[target] += 1
# if we are above the theshold for this label
if counter[target] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx, _ in enumerate(self.imgs[0:10_000])]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
# get the image
file, target = self.imgs[idx]
current_image = self.loader(file)
# transform it for pytorch (normalized and transposed)
current_image = TF.rgb_to_grayscale(current_image, num_output_channels=1)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(target)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
class LSUN(TLSUN):
# dict to transform integers to string labels
int_to_label_str = {x:str(x) for x in range(10)}
def __init__(self, which_split, make_instance_impossible, nbr_of_samples_per_class, augment):
# path to the main folder
root_dir = Path(f"/data/local_data/workingdir_g02/sds/data/LSUN")
# train or test split
self.split = which_split
self.is_train = True if self.split.lower() == "train" else False
# normal or impossible reconstruction loss?
self.make_instance_impossible = make_instance_impossible
# number of samples to keep for each class
self.nbr_of_samples_per_class = nbr_of_samples_per_class
# call the init function of the parent class
super().__init__(root=root_dir, classes="train" if self.is_train else "test")
# only get a subset of the data
self._get_subset_of_data()
# augmentations if needed
if augment:
self.augment = album.Compose(
[
album.Blur(always_apply=False, p=0.4, blur_limit=7),
album.MultiplicativeNoise(always_apply=False, p=0.4, elementwise=True, multiplier=(0.75, 1.25)),
]
)
else:
self.augment = False
def _get_classif_str(self, label):
return int(label)
def __len__(self):
return len(self.images)
def _get_subset_of_data(self):
self.images = []
self.labels = []
# if we are training
if self.nbr_of_samples_per_class > 0:
# keep track of samples per class
counter = defaultdict(int)
# and the corresponding indices
keep_indices = []
# for each label
for idx in range(self.length):
target = 0
for ind in self.indices:
if idx < ind:
break
target += 1
# increase the counter for this label
counter[target] += 1
# if we are above the theshold for this label
if counter[target] >= (self.nbr_of_samples_per_class+1):
# then skip it
continue
else:
# otherwise keep track of the label
keep_indices.append(idx)
# testing
else:
keep_indices = [idx for idx in range(10_000)]
# only take the subset of indices based on how many samples per class to keep
for idx in keep_indices:
target = 0
sub = 0
for ind in self.indices:
if idx < ind:
break
target += 1
sub = ind
db = self.dbs[target]
idx = idx - sub
current_image, _ = db[idx]
# transform it for pytorch (normalized and transposed)
current_image = TF.rgb_to_grayscale(current_image, num_output_channels=1)
current_image = TF.resize(current_image, [64, 64])
current_image = TF.to_tensor(current_image)
# keep it
self.images.append(current_image)
self.labels.append(target)
def _get_positive(self, positive_label):
while True:
index = random.randint(0, len(self.labels)-1)
if int(self.labels[index]) == positive_label:
image = self.images[index]
return image
def __getitem__(self, index):
"""
Return an element from the dataset based on the index.
Parameters:
index -- an integer for data indexing
"""
# get the image and labels
image = self.images[index]
label = int(self.labels[index])
if self.make_instance_impossible:
output_image = self._get_positive(label)
else:
output_image = image.clone()
# augment image if necessary (we need 0-channel input, not 1-channel input)
if self.augment: image = torch.from_numpy(self.augment(image=np.array(image)[0])["image"][None,:])
return {"image":image, "target":output_image, "gt":label}
####################################################################################################################################################
def print_dataset_statistics(dataset, which_dataset, which_split):
# if a vehicle dataset
if which_dataset.lower() in ["sviro", "sviro_uncertainty"]:
# get the int label for all labels
labels = np.array([dataset.label_str_to_int["_".join([str(y) for y in x])] for x in dataset.labels])
int_to_label_str = dataset.int_to_label_str
elif hasattr(dataset, "labels"):
labels = np.array(dataset.labels)
int_to_label_str = None
elif hasattr(dataset, "targets"):
labels = np.array(dataset.targets)
int_to_label_str = None
else:
print("No targets or labels attribute.")
return
unique_labels, labels_counts = np.unique(labels, return_counts=True)
if int_to_label_str is None:
int_to_label_str = {x:str(x) for x in unique_labels}
print("=" * 37)
print("Dataset used: \t", dataset)
print("Split: \t\t", which_split)
print("Samples: \t", len(dataset))
print("-" * 37)
# print the label and its number of occurences
for label, count in zip(unique_labels, labels_counts):
print(f"Label {int_to_label_str[label]}: {count}")
print("=" * 37)
####################################################################################################################################################
def create_dataset(which_dataset, which_factor, which_split, make_scene_impossible=False, make_instance_impossible=False, augment=False, batch_size=64, shuffle=True, nbr_of_samples_per_class=-1, print_dataset=True):
# create the dataset
if which_dataset.lower() == "sviro":
dataset = SVIRO(car=which_factor, which_split=which_split, make_instance_impossible=make_instance_impossible, augment=augment)
elif which_dataset.lower() == "sviro_uncertainty":
dataset = SVIROUncertainty(car=which_factor, which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "fashion":
dataset = Fashion(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "mnist":
dataset = MNIST(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "gtsrb":
dataset = GTSRB(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "cifar10":
dataset = CIFAR10(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "svhn":
dataset = SVHN(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "omniglot":
dataset = Omniglot(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "places365":
dataset = Places365(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
elif which_dataset.lower() == "lsun":
dataset = LSUN(which_split=which_split, make_instance_impossible=make_instance_impossible, nbr_of_samples_per_class=nbr_of_samples_per_class, augment=augment)
else:
raise ValueError
if len(dataset) == 0:
raise ValueError("The length of the dataset is zero. There is probably a problem with the folder structure for the dataset you want to consider. Have you downloaded the dataset and used the correct folder name and folder tree structure?")
# for reproducibility
# https://pytorch.org/docs/1.9.0/notes/randomness.html?highlight=reproducibility
g = torch.Generator()
g.manual_seed(0)
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# create loader for the defined dataset
train_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=4,
pin_memory=True,
worker_init_fn=seed_worker,
generator=g,
)
if print_dataset:
print_dataset_statistics(dataset, which_dataset, which_split)
return train_loader
#################################################################################################################################################### | [
"torch.initial_seed",
"torch.Generator",
"torch.utils.data.DataLoader"
] | 1.10 | SteveCruz/icpr2022-autoencoder-attractors | 0935179b514fd49e1d2410005d91ff49db9978ac |
1.0 | import os
import sys
import torch
import mydatasets
import torch.autograd as autograd
import argparse
import torchtext.data as data
torch.manual_seed(3)
parser = argparse.ArgumentParser(description='Predictor api')
parser.add_argument('--snapshot', type=str, default='saved-models/best-cnn.pt', help='filename of model snapshot [default: None]')
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('--input', help='Input file name', required=True)
parser.add_argument('--output', type=str, default = 'output.txt', help='File path of the predictions')
# Path to training data for vocab
parser.add_argument('--dataset-dir', default='data', help = 'dataset directory path which contains negetive/positive/neutral files')
parser.add_argument('--rnn', action = 'store_true', default = False, help = 'activate char rnn')
parser.add_argument('--max-length', type=int, default=600, help='The maximum number of characters in sequence.')
parser.add_argument('--min-freq', type=int, default=20, help='The minimum frequency of a character to be a vocab member')
parser.add_argument('-batch-size', type=int, default=64, help='batch size for training [default: 64]')
args = parser.parse_args()
tokenizer = lambda sent: list(sent[::-1])
#For building the same vocab an easier alternative would be to save vocab but the dataset size is not large.
# Umm I couldn't find a fix for it.
print("\nLoading data...")
text_field = data.Field(lower=True, tokenize = tokenizer, fix_length = args.max_length)
label_field = data.Field(sequential=False)
train_iter, dev_iter = mydatasets.load_twitter_dataset_vocab(text_field, label_field, args.dataset_dir,
args.min_freq, args.batch_size, args.rnn)
# Optimal for inferencing one instance at a time.
def predict(text, model, text_field, label_feild, cuda_flag):
assert isinstance(text, str)
model.eval()
text = text_field.tokenize(text)
text = [[text_field.vocab.stoi[x] for x in text]]
x = torch.tensor(text)
x = autograd.Variable(x)
if args.rnn:
output = model(x, torch.Tensor([len(text)]))
else:
output = model(x)
_, predicted = torch.max(output, 1)
return label_feild.vocab.itos[predicted.data[0]+1]
if __name__ == '__main__':
model = torch.load(args.snapshot, map_location=lambda storage, loc: storage)
model.cpu()
model.eval()
output_file = open(args.output, 'w')
input_file = open(args.input)
for line in input_file:
line = line.strip()
prediction = predict(line, model, text_field, label_field, True)
output_file.write(prediction + "\n")
output_file.close()
input_file.close()
| [
"torch.max",
"torch.autograd.Variable",
"torch.manual_seed",
"torch.tensor",
"torch.load"
] | 1.0.0 | rangwani-harsh/char-cnn-char-rnn-sentiment-analysis | 48238232ba053f8c12e66383fd65fc075c532dad |
1.7 | import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torchvision
from tqdm import tqdm
import utils
from model import DummyModel
def main():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
# Type of experiment
parser.add_argument('--test_type', type=str, default='accuracy', help="Experiment type")
# Training environment configuration
parser.add_argument('--num_gpus', type=int, default=2, help="Number of GPUs to use")
parser.add_argument('--deterministic', action='store_true', default=False, help="Deterministic mode for reproducibility")
parser.add_argument('--random_seed', type=int, default=7343, help="Random seed (optional)")
# Dataset and trained model (for resume training)
parser.add_argument('--dataset', type=str, help="Name of the dataset to use")
parser.add_argument('--model_checkpoint', type=str, help="Path to the model checkpoints")
args = parser.parse_args()
if args.deterministic:
utils.set_deterministic(args.random_seed)
run(args)
def run(args):
model = DummyModel()
rgb_mean = (0.4914, 0.4822, 0.4465)
rgb_std = (0.2023, 0.1994, 0.2010)
dataset_test = torchvision.datasets.CIFAR10('./data/datasets', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(rgb_mean, rgb_std),
]))
evaluator = Evaluator(args, model)
evaluator.load_checkpoint(args.model_checkpoint)
evaluator.eval(dataset_test)
class Evaluator:
def __init__(self, args, model):
self.args = args
self.model = nn.DataParallel(model, device_ids=[i for i in range(self.args.num_gpus)]).cuda(0)
def eval(self, dataset):
test_type = self.args.test_type.lower()
if test_type == 'accuracy':
self.test_accuracy(dataset)
else:
print('Unknown test type')
def test_accuracy(self, dataset):
print('Accuracy test started.')
self.model.eval()
criterion = nn.CrossEntropyLoss()
loader = data.DataLoader(dataset, 256)
total_loss = 0
total_correct_samples = 0
total_samples = 0
total_steps = 0
for i, batch in enumerate(tqdm(loader)):
images, labels = batch
images = images.cuda(0)
labels = labels.cuda(0)
batch_size = labels.size(0)
logits = self.model(images)
loss = criterion(logits, labels)
correct_samples = self.count_correct(logits, labels.data)
total_loss += loss.item()
total_correct_samples += correct_samples
total_samples += batch_size
total_steps += 1
total_loss = total_loss / total_steps
total_acc = total_correct_samples / total_samples
print('Accuracy test completed.')
print(f' Test loss: {total_loss}')
print(f' Test accuracy: {total_acc}')
def count_correct(self, logits, labels):
_, preds = torch.max(logits, 1)
num_correct = preds.eq(labels).sum().item()
return num_correct
def load_checkpoint(self, checkpoint_name):
path = utils.get_data_path('model_checkpoints', checkpoint_name)
map_location = {'cuda:%d' % 0: 'cuda:%d' % 0}
self.model.load_state_dict(torch.load(path, map_location))
if __name__ == '__main__':
main()
| [
"torch.max",
"torch.nn.CrossEntropyLoss",
"torch.load",
"torch.utils.data.DataLoader"
] | 1.7.0 | agemor/pytorch-project-template | 9b43db0578d6ea0aa40d2fec577cb50e86e57c7d |
1.7 | from pathlib import Path
from copy import deepcopy
from argparse import ArgumentParser
import torch
from torch.nn import BCEWithLogitsLoss
from torchvision.models import resnet
import pytorch_lightning as pl
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.metrics.classification import Precision, Recall, F1
from datasets.oscd_datamodule import ChangeDetectionDataModule
from models.segmentation import get_segmentation_model
from models.moco2_module import MocoV2
class SiamSegment(LightningModule):
def __init__(self, backbone, feature_indices, feature_channels):
super().__init__()
self.model = get_segmentation_model(backbone, feature_indices, feature_channels)
self.criterion = BCEWithLogitsLoss()
self.prec = Precision(num_classes=1, threshold=0.5)
self.rec = Recall(num_classes=1, threshold=0.5)
self.f1 = F1(num_classes=1, threshold=0.5)
def forward(self, x1, x2):
return self.model(x1, x2)
def training_step(self, batch, batch_idx):
img_1, img_2, mask, pred, loss, prec, rec, f1 = self.shared_step(batch)
self.log('train/loss', loss, prog_bar=True)
self.log('train/precision', prec, on_step=False, on_epoch=True, prog_bar=True)
self.log('train/recall', rec, on_step=False, on_epoch=True, prog_bar=True)
self.log('train/f1', f1, on_step=False, on_epoch=True, prog_bar=True)
tensorboard = self.logger.experiment
global_step = self.trainer.global_step
tensorboard.add_image('train/img_1', img_1[0], global_step)
tensorboard.add_image('train/img_2', img_2[0], global_step)
tensorboard.add_image('train/mask', mask[0], global_step)
tensorboard.add_image('train/out', pred[0], global_step)
return loss
def validation_step(self, batch, batch_idx):
img_1, img_2, mask, pred, loss, prec, rec, f1 = self.shared_step(batch)
self.log('val/loss', loss, prog_bar=True)
self.log('val/precision', prec, on_step=False, on_epoch=True, prog_bar=True)
self.log('val/recall', rec, on_step=False, on_epoch=True, prog_bar=True)
self.log('val/f1', f1, on_step=False, on_epoch=True, prog_bar=True)
tensorboard = self.logger.experiment
global_step = self.trainer.global_step
tensorboard.add_image('val/img_1', img_1[0], global_step)
tensorboard.add_image('val/img_2', img_2[0], global_step)
tensorboard.add_image('val/mask', mask[0], global_step)
tensorboard.add_image('val/out', pred[0], global_step)
return loss
def shared_step(self, batch):
img_1, img_2, mask = batch
out = self(img_1, img_2)
pred = torch.sigmoid(out)
loss = self.criterion(out, mask)
prec = self.prec(pred, mask.long())
rec = self.rec(pred, mask.long())
f1 = self.f1(pred, mask.long())
return img_1, img_2, mask, pred, loss, prec, rec, f1
def configure_optimizers(self):
# params = self.model.parameters()
params = set(self.model.parameters()).difference(self.model.encoder.parameters())
optimizer = torch.optim.Adam(params, lr=1e-3, weight_decay=1e-4)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
return [optimizer], [scheduler]
if __name__ == '__main__':
pl.seed_everything(42)
parser = ArgumentParser()
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--data_dir', type=str)
parser.add_argument('--patch_size', type=int, default=96)
parser.add_argument('--backbone_type', type=str, default='imagenet')
parser.add_argument('--ckpt_path', type=str, default=None)
args = parser.parse_args()
datamodule = ChangeDetectionDataModule(args.data_dir)
if args.backbone_type == 'random':
backbone = resnet.resnet18(pretrained=False)
elif args.backbone_type == 'imagenet':
backbone = resnet.resnet18(pretrained=True)
elif args.backbone_type == 'pretrain':
model = MocoV2.load_from_checkpoint(args.ckpt_path)
backbone = deepcopy(model.encoder_q)
else:
raise ValueError()
model = SiamSegment(backbone, feature_indices=(0, 4, 5, 6, 7), feature_channels=(64, 64, 128, 256, 512))
model.example_input_array = (torch.zeros((1, 3, 96, 96)), torch.zeros((1, 3, 96, 96)))
experiment_name = args.backbone_type
logger = TensorBoardLogger(save_dir=str(Path.cwd() / 'logs' / 'oscd'), name=experiment_name)
checkpoint_callback = ModelCheckpoint(filename='{epoch}', save_weights_only=True)
trainer = Trainer(gpus=args.gpus, logger=logger, callbacks=[checkpoint_callback], max_epochs=100, weights_summary='full')
trainer.fit(model, datamodule=datamodule)
| [
"torch.zeros",
"torch.sigmoid",
"torch.optim.Adam",
"torch.optim.lr_scheduler.ExponentialLR",
"torch.nn.BCEWithLogitsLoss"
] | 1.7.1 | SaeidAbdolian/seasonal-contrast | 5395c027922569f5c5b1785ad1ccddd839749c36 |
0.27 | # Copyright 2021 MosaicML. All Rights Reserved.
import functools
import numpy as np
import pytest
import torch
from PIL import Image
from torch.utils.data import DataLoader
from composer.algorithms import ColOut, ColOutHparams
from composer.algorithms.colout.colout import ColOutTransform, colout_batch
from composer.core import Event, State
from composer.loggers import Logger
from tests.common import RandomImageDataset
def verify_shape_image(orig: Image.Image, new: Image.Image, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed PIL Image."""
H_o, W_o = orig.height, orig.width
H_n, W_n = new.height, new.width
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert (H_n, W_n) == (H_t, W_t), f"Image shape mismatch: {(H_n, W_n)} != {(H_t, W_t)}"
def verify_shape_tensor(orig: torch.Tensor, new: torch.Tensor, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed image tensor."""
C, H_o, W_o = orig.shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new.shape == (C, H_t, W_t), f"Image tensor shape mismatch: {new.shape} != {(C, H_t, W_t)}"
def verify_shape_batch(orig: torch.Tensor, new: torch.Tensor, p_row: float, p_col: float) -> None:
"""Verify the shape of a transformed batch of images."""
N, C, H_o, W_o = orig.shape
H_t = int((1 - p_row) * H_o)
W_t = int((1 - p_col) * W_o)
assert new.shape == (N, C, H_t, W_t), f"Image batch shape mismatch: {new.shape} != {(N, C, H_t, W_t)}"
@pytest.fixture(params=[False, True])
def batch(request) -> bool:
"""Algorithm batch parameter."""
return request.param
@pytest.fixture(params=[0, 0.15])
def p_row(request) -> float:
"""Algorithm p_row parameter."""
return request.param
@pytest.fixture
def p_col(p_row) -> float:
"""Algorithm p_col parameter."""
return p_row
@pytest.fixture(params=[1, 3])
def C(request) -> int:
"""Number of image channels.
Testing BW and RGB.
"""
return request.param
@pytest.fixture
def H(request) -> int:
"""Default image height."""
return 32
@pytest.fixture
def W(H) -> int:
"""Default image width (equal to height)"""
return H
@pytest.fixture
def fake_image(H: int, W: int, C: int) -> Image.Image:
"""Fake PIL Image."""
rng = np.random.RandomState(0)
return Image.fromarray((255 * rng.uniform(size=(H, W, C)).squeeze()).astype(np.uint8))
@pytest.fixture
def fake_image_tensor(H: int, W: int, C: int) -> torch.Tensor:
"""Fake image tensor."""
torch.manual_seed(0)
return torch.rand(C, H, W)
@pytest.fixture
def fake_image_batch(H: int, W: int, C: int) -> torch.Tensor:
"""Fake batch of images."""
torch.manual_seed(0)
return torch.rand(16, C, H, W)
@pytest.fixture
def colout_algorithm(p_row: float, p_col: float, batch: bool) -> ColOut:
"""Reusable algorithm instance."""
return ColOut(p_row, p_col, batch)
class TestColOutTransform:
def test_single_image_drop_size(self, fake_image: Image.Image, p_row: float, p_col: float):
"""Test application to single PIL image."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image)
verify_shape_image(fake_image, new_image, p_row, p_col)
@pytest.mark.parametrize("W", [48])
def test_rectangular_image(self, fake_image: Image.Image, p_row: float, p_col: float):
"""Test application to a rectangular PIL image."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image)
verify_shape_image(fake_image, new_image, p_row, p_col) # type: ignore
def test_single_image_tensor_drop_size(self, fake_image_tensor: torch.Tensor, p_row: float, p_col: float):
"""Test application to a single torch image tensor."""
transform = ColOutTransform(p_row, p_col)
new_image = transform(fake_image_tensor)
verify_shape_tensor(fake_image_tensor, new_image, p_row, p_col) # type: ignore
def test_reproducibility_image(self, fake_image_tensor: torch.Tensor, p_row: float, p_col: float):
"""Test that transform is reproducible given the same seed."""
transform_1 = ColOutTransform(p_row, p_col)
transform_2 = ColOutTransform(p_row, p_col)
torch.manual_seed(42)
new_image_1 = transform_1(fake_image_tensor)
torch.manual_seed(42)
new_image_2 = transform_2(fake_image_tensor)
assert torch.allclose(new_image_1, new_image_2)
class TestColOutFunctional:
def test_reproducibility_batch(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test that batch augmentation is reproducible given the same seed."""
transform_1 = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
transform_2 = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
torch.manual_seed(42)
new_batch_1 = transform_1(fake_image_batch)
torch.manual_seed(42)
new_batch_2 = transform_2(fake_image_batch)
assert isinstance(new_batch_1, torch.Tensor)
assert isinstance(new_batch_2, torch.Tensor)
assert torch.allclose(new_batch_1, new_batch_2)
def test_batch_drop_size(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test application to a batch of images."""
colout = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
new_batch = colout(fake_image_batch)
assert isinstance(new_batch, torch.Tensor)
verify_shape_batch(fake_image_batch, new_batch, p_row, p_col)
@pytest.mark.parametrize("p_col", [0.05, 0.25])
def test_rectangle_batch_drop_size(self, fake_image_batch: torch.Tensor, p_row: float, p_col: float):
"""Test that unequal values of p_row and p_col work properly."""
colout = functools.partial(colout_batch, p_row=p_row, p_col=p_col)
new_batch = colout(fake_image_batch)
assert isinstance(new_batch, torch.Tensor)
verify_shape_batch(fake_image_batch, new_batch, p_row, p_col)
class TestColOutAlgorithm:
@pytest.mark.parametrize("event,batch", [(Event.AFTER_DATALOADER, True), (Event.FIT_START, False)])
def test_match_correct(self, event: Event, colout_algorithm: ColOut, minimal_state: State):
"""Algo should match AFTER_DATALOADER if batch else FIT_START."""
assert colout_algorithm.match(event, minimal_state)
@pytest.mark.parametrize("event,batch", [(Event.FIT_START, True), (Event.AFTER_DATALOADER, False),
(Event.EPOCH_END, True)])
def test_match_incorrect(self, event: Event, colout_algorithm: ColOut, minimal_state: State):
"""Algo should NOT match FIT_START if batch else AFTER_DATALOADER."""
assert not colout_algorithm.match(event, minimal_state)
@pytest.mark.parametrize("batch", [True])
def test_apply_batch(self, fake_image_batch: torch.Tensor, colout_algorithm: ColOut, minimal_state: State,
empty_logger: Logger):
"""Apply the algorithm to a fake batch."""
p_row = colout_algorithm.p_row
p_col = colout_algorithm.p_col
minimal_state.batch = (fake_image_batch, torch.Tensor())
colout_algorithm.apply(Event.AFTER_DATALOADER, minimal_state, empty_logger)
last_input, _ = minimal_state.batch
verify_shape_batch(fake_image_batch, last_input, p_row, p_col)
@pytest.mark.parametrize("batch", [False])
def test_apply_sample(self, colout_algorithm: ColOut, minimal_state: State, empty_logger: Logger):
"""Test that augmentation is added to dataset and functioning properly."""
p_row = colout_algorithm.p_row
p_col = colout_algorithm.p_col
dataset = RandomImageDataset(is_PIL=True)
dataloader = DataLoader(dataset)
original_image, _ = dataset[0]
assert isinstance(original_image, Image.Image)
minimal_state.train_dataloader = dataloader
colout_algorithm.apply(Event.INIT, minimal_state, empty_logger)
new_image, _ = dataset[0]
assert isinstance(new_image, Image.Image)
verify_shape_image(original_image, new_image, p_row, p_col)
def test_colout_hparams():
hparams = ColOutHparams()
algorithm = hparams.initialize_object()
assert isinstance(algorithm, ColOut)
@pytest.mark.parametrize("p_row,p_col", [(1.5, 0.15), (0.15, 1.5)])
def test_invalid_hparams(p_row: float, p_col: float):
"""Test that invalid hyperparameters error.
Ideally this could be caught by the Hparams, but that's not yet supported in yahp.
"""
with pytest.raises(ValueError):
ColOut(p_row, p_col, False)
| [
"torch.rand",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.allclose",
"torch.Tensor"
] | 0.27 | vahidfazelrezai/composer | a18a1bc3d965b0877f782e1d43a39a4ce6721c24 |
0.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.base.base_net import BaseNet
class CIFAR10_LeNet(BaseNet):
def __init__(self):
super().__init__()
# 这里的这个fc的输出维度和mnist的不同
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class CIFAR10_LeNet_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.leaky_relu(x)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ConvTranspose2d",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.nn.init.calculate_gain",
"torch.nn.functional.leaky_relu"
] | 0.4.1 | Flsahkong/Deep-SVDD-PyTorch | c20442fb394f679222ae49d299bcb3c95e2d67c8 |
1.4 | import os, sys
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
from opt import get_opts
import torch
import torchvision.transforms as transforms
from collections import defaultdict
from torch.utils.data import DataLoader
from datasets import dataset_dict
# models
from models.nerf import Embedding, NeRF
from models.rendering import render_rays
# optimizer, scheduler, visualization
from utils import *
# losses
from losses import loss_dict, FeatureLoss
# metrics
from metrics import *
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.loggers.test_tube import TestTubeLogger
def image_loader(image_name, imsize):
image = Image.open(image_name)
loader = transforms.Compose([
transforms.Resize(imsize), # scale imported image
transforms.ToTensor()] # transform it into a torch tensor
)
# fake batch dimension required to fit network's input dimensions
image = loader(image).unsqueeze(0)
return image.to(torch.float)
class NeRFSystem(LightningModule):
def __init__(self, hparams):
super(NeRFSystem, self).__init__()
self.hparams = hparams
self.stage = hparams.stage
if self.stage == 'style':
style_img_path = self.hparams.style_img
print(style_img_path)
self.style_img = image_loader(
image_name=style_img_path,
imsize=self.hparams.img_wh[0]
)
self.loss = FeatureLoss(
style_img=self.style_img,
style_weight=1000000,
content_weight=1
)
else:
self.loss = loss_dict[hparams.loss_type]()
self.embedding_xyz = Embedding(3, 10) # 10 is the default number
self.embedding_dir = Embedding(3, 4) # 4 is the default number
self.embeddings = [self.embedding_xyz, self.embedding_dir]
self.nerf_coarse = NeRF(stage=self.stage)
if self.stage == 'style':
load_ckpt(self.nerf_coarse, hparams.ckpt_path, model_name='nerf_coarse')
self.models = [self.nerf_coarse]
if hparams.N_importance > 0:
self.nerf_fine = NeRF(stage=self.stage)
if self.stage == 'style':
load_ckpt(self.nerf_fine, hparams.ckpt_path, model_name='nerf_fine')
self.models += [self.nerf_fine]
def decode_batch(self, batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
# TODO you can also collect the valid mask here during val/style
# validation step (stage = style):
# - rays.shape = [1, w*h, 8]
# - rgbs.shape =[1, w*h, 3]
# train step (stage = style):
# - rays.shape = [1, w*h, 8]
# - rgbs.shape =[1, w*h, 3]
return rays, rgbs
def forward(self, rays):
"""Do batched inference on rays using chunk."""
B = rays.shape[0]
results = defaultdict(list)
for i in range(0, B, self.hparams.chunk):
rendered_ray_chunks = \
render_rays(self.models,
self.embeddings,
rays[i:i+self.hparams.chunk],
self.hparams.N_samples,
self.hparams.use_disp,
self.hparams.perturb,
self.hparams.noise_std,
self.hparams.N_importance,
self.hparams.chunk, # chunk size is effective in val mode
self.train_dataset.white_back)
for k, v in rendered_ray_chunks.items():
results[k] += [v]
for k, v in results.items():
results[k] = torch.cat(v, 0)
return results
def prepare_data(self):
dataset = dataset_dict[self.hparams.dataset_name]
kwargs = {'root_dir': self.hparams.root_dir,
'img_wh': tuple(self.hparams.img_wh)}
if self.hparams.dataset_name == 'llff':
kwargs['spheric_poses'] = self.hparams.spheric_poses
kwargs['val_num'] = self.hparams.num_gpus
self.train_dataset = dataset(
split='train',
stage=self.stage,
**kwargs
)
self.val_dataset = dataset(
split='val',
stage=self.stage,
**kwargs
)
def configure_optimizers(self):
self.optimizer = get_optimizer(self.hparams, self.models)
scheduler = get_scheduler(self.hparams, self.optimizer)
return [self.optimizer], [scheduler]
def train_dataloader(self):
if self.stage == 'density':
return DataLoader(
self.train_dataset,
shuffle=True,
num_workers=4,
batch_size=self.hparams.batch_size,
pin_memory=True
)
elif self.stage == 'style':
return DataLoader(
self.train_dataset,
shuffle=True,
num_workers=4,
batch_size=1, # style one image at a time
pin_memory=True
)
def val_dataloader(self):
return DataLoader(self.val_dataset,
shuffle=False,
num_workers=4, # set back to 4 after debug
batch_size=1, # validate one image (H*W rays) at a time
pin_memory=True)
def training_step(self, batch, batch_nb):
log = {'lr': get_learning_rate(self.optimizer)}
if self.stage == 'density':
rays, rgbs = self.decode_batch(batch)
results = self(rays)
log['train/loss'] = loss = self.loss(results, rgbs)
elif self.stage == 'style':
rays, rgbs = self.decode_batch(batch)
rays = rays.squeeze() # (H*W, 8)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self(rays)
target = self._prepare_for_feature_loss(rgbs) #(1,3,W,H)
course_result = self._prepare_for_feature_loss(results['rgb_coarse']) #(1,3,W,H)
course_loss, _, _ = self.loss(course_result, target)
if 'rgb_fine' in results:
fine_result = self._prepare_for_feature_loss(results['rgb_fine']) #(1,3,W,H)
fine_loss, _, _ = self.loss(fine_result, target)
loss = fine_loss + course_loss
else:
loss = course_loss
log['train/loss'] = loss
typ = 'fine' if 'rgb_fine' in results else 'coarse'
with torch.no_grad():
psnr_ = psnr(results[f'rgb_{typ}'], rgbs)
log['train/psnr'] = psnr_
self.log('train/loss', loss, prog_bar=True)
self.log('train/psnr', psnr_, prog_bar=True)
return loss
def _prepare_for_feature_loss(self, img:torch.tensor):
'''img of shape (H*W, 3) -> (1, 3, w, h)'''
img = img.permute(1,0) #(3, H*W)
img = img.view(3, self.hparams.img_wh[0], self.hparams.img_wh[1]) #(3,W,H)
img = img.unsqueeze(0) # (1,3,W,H)
return img
def validation_step(self, batch, batch_nb):
rays, rgbs = self.decode_batch(batch)
rays = rays.squeeze() # (H*W, 8)
rgbs = rgbs.squeeze() # (H*W, 3)
results = self(rays)
if self.stage == 'density':
log = {'val_loss': self.loss(results, rgbs)}
elif self.stage == 'style':
target = self._prepare_for_feature_loss(rgbs) #(1,3,W,H)
course_result = self._prepare_for_feature_loss(results['rgb_coarse']) #(1,3,W,H)
course_loss, _, _ = self.loss(course_result, target)
if 'rgb_fine' in results:
fine_result = self._prepare_for_feature_loss(results['rgb_fine']) #(1,3,W,H)
fine_loss, _, _ = self.loss(fine_result, target)
loss = fine_loss + course_loss
else:
loss = course_loss
log = {'val_loss': loss}
typ = 'fine' if 'rgb_fine' in results else 'coarse'
if batch_nb == 0:
W, H = self.hparams.img_wh
img = results[f'rgb_{typ}'].view(H, W, 3).cpu()
img = img.permute(2, 0, 1) # (3, H, W)
img_gt = rgbs.view(H, W, 3).permute(2, 0, 1).cpu() # (3, H, W)
depth = visualize_depth(results[f'depth_{typ}'].view(H, W)) # (3, H, W)
stack = torch.stack([img_gt, img, depth]) # (3, 3, H, W)
self.logger.experiment.add_images('val/GT_pred_depth',
stack, self.global_step)
log['val_psnr'] = psnr(results[f'rgb_{typ}'], rgbs)
return log
def validation_epoch_end(self, outputs):
mean_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
mean_psnr = torch.stack([x['val_psnr'] for x in outputs]).mean()
self.log('val/loss', mean_loss, prog_bar=True)
self.log('val/psnr', mean_psnr, prog_bar=True)
# return {'progress_bar': {'val_loss': mean_loss,
# 'val_psnr': mean_psnr},
# 'log': {'val/loss': mean_loss,
# 'val/psnr': mean_psnr}
# }
if __name__ == '__main__':
hparams = get_opts()
system = NeRFSystem(hparams)
checkpoint_callback = ModelCheckpoint(
dirpath=os.path.join(f'ckpts/{hparams.exp_name}','{epoch:d}'),
monitor='val/loss',
mode='min',
save_top_k=5,)
logger = TestTubeLogger(
save_dir="logs",
name=hparams.exp_name,
debug=False,
create_git_tag=False
)
trainer = Trainer(
max_epochs=hparams.num_epochs,
checkpoint_callback=checkpoint_callback,
# resume_from_checkpoint=hparams.ckpt_path, # causes problems with 2 stage training
logger=logger,
weights_summary=None,
progress_bar_refresh_rate=1,
gpus=hparams.num_gpus,
distributed_backend= None,
num_sanity_val_steps=1,
benchmark=True,
profiler=hparams.num_gpus==1
)
trainer.fit(system)
| [
"torch.cat",
"torch.stack",
"torch.utils.data.DataLoader",
"torch.no_grad"
] | 1.4.0 | Jake-Jay/StyleNeRF_pl | c9cc35bc0453a72f51b63512b3517e5f79da12a6 |
1.6 | from torch.nn import *
import torch
import torch.nn.functional as F
class BCEDiceLoss(Module):
def __init__(self):
super().__init__()
def forward(self, input, target):
bce = F.binary_cross_entropy_with_logits(input, target)
smooth = 1e-10
input = torch.sigmoid(input)
num = target.size(0)
input = input.view(num, -1)
target = target.view(num, -1)
intersection = (input * target)
dice = (2. * intersection.sum(1) + smooth) / (input.sum(1) + target.sum(1) + smooth)
dice = 1 - dice.sum() / num
return 0.5 * bce + dice
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.sigmoid"
] | 1.6.0 | Ian-Dx/DxTorchUtils | af1d522f58f1b7baed8f661757dd45c13343ddcd |
1.8 | from __future__ import annotations
from typing import Any, Iterable, Optional, Tuple, Union
import torch, warnings
from .protocols import _DeviceMovable
CPU = torch.device('cpu')
GPU = torch.device('cuda')
def data_parallel(raw_model: torch.nn.Module, *args, **kwargs) -> Tuple[Union[torch.nn.Module, torch.nn.parallel.DataParallel], bool]:
"""
Make a `torch.nn.Module` data parallel
- Parameters:
- raw_model: A target `torch.nn.Module`
- Returns: A `tuple` of either data paralleled `torch.nn.parallel.DataParallel` model if CUDA is available or a raw model if not, and a `bool` flag of if the model data paralleled successfuly.
"""
if torch.cuda.is_available():
model = torch.nn.parallel.DataParallel(raw_model, *args, **kwargs)
return model, True
else:
warnings.warn(f"[Device Warning]: CUDA is not available, unable to use multi-GPUs.", ResourceWarning)
return raw_model, False
def empty_cache() -> None:
"""Empty CUDA cache"""
if torch.cuda.is_available(): torch.cuda.empty_cache()
def find(specified: Optional[torch.device] = None) -> Tuple[torch.device, torch.device]:
"""
Find available devices
- Pameters:
- specified: An optional `torch.device` of specified
- Returns: A `tuple` of CPU in `torch.device` and available device in `torch.device`
"""
if specified is None:
return (CPU, GPU) if torch.cuda.is_available() else (CPU, CPU)
else:
warnings.warn(f"[Device Warning]: Using specified device {specified}.", ResourceWarning)
return CPU, specified
def move_to_device(target: Any, device: torch.device) -> Any:
"""
Move a target variable to device
- Parameters:
- target: `Any` type of target
- device: A `torch.device` of target device
- Returns: The same type of target but moved to target device
"""
if isinstance(target, _DeviceMovable):
target = target.to(device)
elif isinstance(target, dict):
target = {k: move_to_device(t, device) for k, t in target.items()}
elif isinstance(target, Iterable):
target = [move_to_device(t, device) for t in target]
return target | [
"torch.device",
"torch.nn.parallel.DataParallel",
"torch.cuda.is_available",
"torch.cuda.empty_cache"
] | 1.8.2 | kisonho/torchmanager | ac01c61a132238bc0d39bf2173dfd37f44dbbf30 |
1.1 | import os
from argparse import Namespace
import numpy as np
import torch
# from pl_examples import LightningTemplateModel
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TestTubeLogger, TensorBoardLogger
from tests.base import LightningTestModel
from tests.base.datasets import PATH_DATASETS
# generate a list of random seeds for each test
RANDOM_PORTS = list(np.random.randint(12000, 19000, 1000))
ROOT_SEED = 1234
torch.manual_seed(ROOT_SEED)
np.random.seed(ROOT_SEED)
RANDOM_SEEDS = list(np.random.randint(0, 10000, 1000))
ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
def run_model_test_no_loggers(trainer_options, model, min_acc=0.50):
# save_dir = trainer_options['default_root_dir']
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'amp + ddp model failed to complete'
# test model loading
pretrained_model = load_model(trainer.logger,
trainer.checkpoint_callback.dirpath,
path_expt=trainer_options.get('default_root_dir'))
# test new model accuracy
test_loaders = model.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
for dataloader in test_loaders:
run_prediction(dataloader, pretrained_model, min_acc=min_acc)
if trainer.use_ddp:
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.model = pretrained_model
trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()
def run_model_test(trainer_options, model, on_gpu=True):
save_dir = trainer_options['default_root_dir']
# logger file to get meta
logger = get_default_testtube_logger(save_dir, False)
# logger file to get weights
checkpoint = init_checkpoint_callback(logger)
# add these to the trainer options
trainer_options['checkpoint_callback'] = checkpoint
trainer_options['logger'] = logger
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'amp + ddp model failed to complete'
# test model loading
pretrained_model = load_model(logger, trainer.checkpoint_callback.dirpath)
# test new model accuracy
test_loaders = model.test_dataloader()
if not isinstance(test_loaders, list):
test_loaders = [test_loaders]
[run_prediction(dataloader, pretrained_model) for dataloader in test_loaders]
if trainer.use_ddp or trainer.use_ddp2:
# on hpc this would work fine... but need to hack it for the purpose of the test
trainer.model = pretrained_model
trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \
trainer.init_optimizers(pretrained_model)
# test HPC loading / saving
trainer.hpc_save(save_dir, logger)
trainer.hpc_load(save_dir, on_gpu=on_gpu)
def get_default_hparams(continue_training=False, hpc_exp_number=0):
_ = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
args = {
'drop_prob': 0.2,
'batch_size': 32,
'in_features': 28 * 28,
'learning_rate': 0.001 * 8,
'optimizer_name': 'adam',
'data_root': PATH_DATASETS,
'out_features': 10,
'hidden_dim': 1000,
}
if continue_training:
args['test_tube_do_checkpoint_load'] = True
args['hpc_exp_number'] = hpc_exp_number
hparams = Namespace(**args)
return hparams
def get_default_model(lbfgs=False):
# set up model with these hyperparams
hparams = get_default_hparams()
if lbfgs:
setattr(hparams, 'optimizer_name', 'lbfgs')
setattr(hparams, 'learning_rate', 0.002)
model = LightningTestModel(hparams)
return model, hparams
def get_default_testtube_logger(save_dir, debug=True, version=None):
# set up logger object without actually saving logs
logger = TestTubeLogger(save_dir, name='lightning_logs', debug=debug, version=version)
return logger
def get_data_path(expt_logger, path_dir=None):
# some calls contain only experiment not complete logger
expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger
# each logger has to have these attributes
name, version = expt_logger.name, expt_logger.version
# only the test-tube experiment has such attribute
if hasattr(expt, 'get_data_path'):
return expt.get_data_path(name, version)
# the other experiments...
if not path_dir:
path_dir = ROOT_PATH
path_expt = os.path.join(path_dir, name, 'version_%s' % version)
# try if the new sub-folder exists, typical case for test-tube
if not os.path.isdir(path_expt):
path_expt = path_dir
return path_expt
def load_model(exp, root_weights_dir, module_class=LightningTestModel, path_expt=None):
# load trained model
path_expt_dir = get_data_path(exp, path_dir=path_expt)
tags_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_CSV_TAGS)
checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]
weights_dir = os.path.join(root_weights_dir, checkpoints[0])
trained_model = module_class.load_from_checkpoint(
checkpoint_path=weights_dir,
tags_csv=tags_path
)
assert trained_model is not None, 'loading model failed'
return trained_model
def load_model_from_checkpoint(root_weights_dir, module_class=LightningTestModel):
# load trained model
checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]
weights_dir = os.path.join(root_weights_dir, checkpoints[0])
trained_model = module_class.load_from_checkpoint(
checkpoint_path=weights_dir,
)
assert trained_model is not None, 'loading model failed'
return trained_model
def run_prediction(dataloader, trained_model, dp=False, min_acc=0.5):
# run prediction on 1 batch
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
if dp:
output = trained_model(batch, 0)
acc = output['val_acc']
acc = torch.mean(acc).item()
else:
y_hat = trained_model(x)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
acc = torch.tensor(acc)
acc = acc.item()
assert acc >= min_acc, f"This model is expected to get > {min_acc} in test set (it got {acc})"
def assert_ok_model_acc(trainer, key='test_acc', thr=0.5):
# this model should get 0.80+ acc
acc = trainer.training_tqdm_dict[key]
assert acc > thr, f"Model failed to get expected {thr} accuracy. {key} = {acc}"
def reset_seed():
seed = RANDOM_SEEDS.pop()
torch.manual_seed(seed)
np.random.seed(seed)
def set_random_master_port():
port = RANDOM_PORTS.pop()
os.environ['MASTER_PORT'] = str(port)
def init_checkpoint_callback(logger, path_dir=None):
exp_path = get_data_path(logger, path_dir=path_dir)
ckpt_dir = os.path.join(exp_path, 'checkpoints')
os.mkdir(ckpt_dir)
checkpoint = ModelCheckpoint(ckpt_dir)
return checkpoint
| [
"torch.manual_seed",
"torch.tensor",
"torch.mean",
"torch.argmax",
"torch.sum"
] | 1.1 | baldassarreFe/pytorch-lightning | 3f1e4b953f84ecdac7dada0c6b57d908efc9c3d3 |
1.0 | # coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Utils to train DistilBERT
adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM)
"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def git_log(folder_path: str):
"""
Log commit info.
"""
repo = git.Repo(folder_path, search_parent_directories=True)
repo_infos = {
"repo_id": str(repo),
"repo_sha": str(repo.head.object.hexsha),
"repo_branch": str(repo.active_branch),
}
with open(os.path.join(folder_path, "git_log.json"), "w") as f:
json.dump(repo_infos, f, indent=4)
def init_gpu_params(params):
"""
Handle single and multi-GPU / multi-node.
"""
if params.n_gpu <= 0:
params.local_rank = 0
params.master_port = -1
params.is_master = True
params.multi_gpu = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs")
if params.n_gpu > 1:
assert params.local_rank != -1
params.world_size = int(os.environ["WORLD_SIZE"])
params.n_gpu_per_node = int(os.environ["N_GPU_NODE"])
params.global_rank = int(os.environ["RANK"])
# number of nodes / node ID
params.n_nodes = params.world_size // params.n_gpu_per_node
params.node_id = params.global_rank // params.n_gpu_per_node
params.multi_gpu = True
assert params.n_nodes == int(os.environ["N_NODES"])
assert params.node_id == int(os.environ["NODE_RANK"])
# local job (single GPU)
else:
assert params.local_rank == -1
params.n_nodes = 1
params.node_id = 0
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
params.n_gpu_per_node = 1
params.multi_gpu = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
params.is_master = params.node_id == 0 and params.local_rank == 0
params.multi_node = params.n_nodes > 1
# summary
PREFIX = f"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes)
logger.info(PREFIX + "Node ID : %i" % params.node_id)
logger.info(PREFIX + "Local rank : %i" % params.local_rank)
logger.info(PREFIX + "World size : %i" % params.world_size)
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node)
logger.info(PREFIX + "Master : %s" % str(params.is_master))
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node))
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu))
logger.info(PREFIX + "Hostname : %s" % socket.gethostname())
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed")
torch.distributed.init_process_group(
init_method="env://",
backend="nccl",
)
def set_seed(args):
"""
Set the random seed.
"""
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
| [
"torch.cuda.manual_seed_all",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.cuda.is_available"
] | 1.0 | sripadh8/transformers | 9f6a0fa573b25c90191d58443661db7d187de511 |
1.2 | #!/usr/bin/env python
""" ImageNet Validation Script
This is intended to be a lean and easily modifiable ImageNet validation script for evaluating pretrained
models or training checkpoints against ImageNet or similarly organized image datasets. It prioritizes
canonical PyTorch, standard Python style, and good performance. Repurpose as you see fit.
Hacked together by Ross Wightman (https://github.com/rwightman)
"""
import argparse
import os
import csv
import glob
import time
import logging
import torch
import torch.nn as nn
import torch.nn.parallel
from collections import OrderedDict
from timm.models import create_model, apply_test_time_pool, load_checkpoint, is_model, list_models
from timm.data import Dataset, DatasetTar, create_loader, resolve_data_config
from timm.utils import accuracy, AverageMeter, natural_key, setup_default_logging
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', '-m', metavar='MODEL', default='dpn92',
help='model architecture (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--crop-pct', default=None, type=float,
metavar='N', help='Input image center crop pct')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--num-classes', type=int, default=1000,
help='Number classes in dataset')
parser.add_argument('--class-map', default='', type=str, metavar='FILENAME',
help='path to class to idx mapping file (default: "")')
parser.add_argument('--log-freq', default=10, type=int,
metavar='N', help='batch logging frequency (default: 10)')
parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--no-test-pool', dest='no_test_pool', action='store_true',
help='disable test time pool')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--fp16', action='store_true', default=False,
help='Use half precision (fp16)')
parser.add_argument('--tf-preprocessing', action='store_true', default=False,
help='Use Tensorflow preprocessing pipeline (require CPU TF installed')
parser.add_argument('--use-ema', dest='use_ema', action='store_true',
help='use ema version of weights if present')
parser.add_argument('--torchscript', dest='torchscript', action='store_true',
help='convert model torchscript for inference')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for validation results (summary)')
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
# create model
model = create_model(
args.model,
num_classes=args.num_classes,
in_chans=3,
pretrained=args.pretrained)
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
logging.info('Model %s created, param count: %d' % (args.model, param_count))
data_config = resolve_data_config(vars(args), model=model)
model, test_time_pool = apply_test_time_pool(model, data_config, args)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model = model.cuda()
if args.fp16:
model = model.half()
criterion = nn.CrossEntropyLoss().cuda()
#from torchvision.datasets import ImageNet
#dataset = ImageNet(args.data, split='val')
if os.path.splitext(args.data)[1] == '.tar' and os.path.isfile(args.data):
dataset = DatasetTar(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map)
else:
dataset = Dataset(args.data, load_bytes=args.tf_preprocessing, class_map=args.class_map)
crop_pct = 1.0 if test_time_pool else data_config['crop_pct']
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
fp16=args.fp16,
tf_preprocessing=args.tf_preprocessing)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.fp16:
input = input.half()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.log_freq == 0:
logging.info(
'Test: [{0:>4d}/{1}] '
'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Prec@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) '
'Prec@5: {top5.val:>7.3f} ({top5.avg:>7.3f})'.format(
i, len(loader), batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses, top1=top1, top5=top5))
results = OrderedDict(
top1=round(top1.avg, 4), top1_err=round(100 - top1.avg, 4),
top5=round(top5.avg, 4), top5_err=round(100 - top5.avg, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config['input_size'][-1],
cropt_pct=crop_pct,
interpolation=data_config['interpolation'])
logging.info(' * Prec@1 {:.3f} ({:.3f}) Prec@5 {:.3f} ({:.3f})'.format(
results['top1'], results['top1_err'], results['top5'], results['top5_err']))
return results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + '/*.pth.tar')
checkpoints += glob.glob(args.checkpoint + '/*.pth')
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == 'all':
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(pretrained=True)
model_cfgs = [(n, '') for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, '') for n in model_names]
if len(model_cfgs):
results_file = args.results_file or './results-all.csv'
logging.info('Running bulk validation on these pretrained models: {}'.format(', '.join(model_names)))
results = []
try:
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
result = OrderedDict(model=args.model)
r = validate(args)
result.update(r)
if args.checkpoint:
result['checkpoint'] = args.checkpoint
results.append(result)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x['top1'], reverse=True)
if len(results):
write_results(results_file, results)
else:
validate(args)
def write_results(results_file, results):
with open(results_file, mode='w') as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == '__main__':
main()
| [
"torch.nn.CrossEntropyLoss",
"torch.jit.script",
"torch.no_grad",
"torch.jit.optimized_execution"
] | 1.2.0 | FDSJK/pytorch-image-models | 5eb0e363a63e823f27810ea6bf5b6b8e136c4176 |
1.5 | import os
import gym
import numpy as np
import torch
from gym.spaces.box import Box
from gym.spaces.dict import Dict
from baselines import bench
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env import VecEnvWrapper
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
# from baselines.common.vec_env.shmem_vec_env import ShmemVecEnv
from pytorchBaselines.a2c_ppo_acktr.shmem_vec_env import ShmemVecEnv
from baselines.common.vec_env.vec_normalize import \
VecNormalize as VecNormalize_
try:
import dm_control2gym
except ImportError:
pass
try:
import roboschool
except ImportError:
pass
try:
import pybullet_envs
except ImportError:
pass
def make_env(env_id, seed, rank, log_dir, allow_early_resets, config=None, envNum=1, ax=None, test_case=-1):
def _thunk():
if env_id.startswith("dm"):
_, domain, task = env_id.split('.')
env = dm_control2gym.make(domain_name=domain, task_name=task)
else:
env = gym.make(env_id)
is_atari = hasattr(gym.envs, 'atari') and isinstance(
env.unwrapped, gym.envs.atari.atari_env.AtariEnv)
if is_atari:
env = make_atari(env_id)
env.configure(config)
envSeed = seed + rank if seed is not None else None
# environment.render_axis = ax
env.thisSeed = envSeed
env.nenv = envNum
if envNum > 1:
env.phase = 'train'
else:
env.phase = 'test'
if ax:
env.render_axis = ax
if test_case >= 0:
env.test_case = test_case
env.seed(seed + rank)
if str(env.__class__.__name__).find('TimeLimit') >= 0:
env = TimeLimitMask(env)
# if log_dir is not None:
env = bench.Monitor(
env,
None,
allow_early_resets=allow_early_resets)
print(env)
if isinstance(env.observation_space, Box):
if is_atari:
if len(env.observation_space.shape) == 3:
env = wrap_deepmind(env)
elif len(env.observation_space.shape) == 3:
raise NotImplementedError(
"CNN models work only for atari,\n"
"please use a custom wrapper for a custom pixel input env.\n"
"See wrap_deepmind for an example.")
# If the input has shape (W,H,3), wrap for PyTorch convolutions
obs_shape = env.observation_space.shape
if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:
env = TransposeImage(env, op=[2, 0, 1])
return env
return _thunk
def make_vec_envs(env_name,
seed,
num_processes,
gamma,
log_dir,
device,
allow_early_resets,
num_frame_stack=None,
config=None,
ax=None, test_case=-1):
envs = [
make_env(env_name, seed, i, log_dir, allow_early_resets, config=config,
envNum=num_processes, ax=ax, test_case=test_case)
for i in range(num_processes)
]
if len(envs) > 1:
envs = ShmemVecEnv(envs, context='fork')
else:
envs = DummyVecEnv(envs)
if isinstance(envs.observation_space, Box):
if len(envs.observation_space.shape) == 1:
if gamma is None:
envs = VecNormalize(envs, ret=False, ob=False)
else:
envs = VecNormalize(envs, gamma=gamma, ob=False, ret=False)
envs = VecPyTorch(envs, device)
if num_frame_stack is not None:
envs = VecPyTorchFrameStack(envs, num_frame_stack, device)
elif isinstance(envs.observation_space, Box):
if len(envs.observation_space.shape) == 3:
envs = VecPyTorchFrameStack(envs, 4, device)
return envs
# Checks whether done was caused my timit limits or not
class TimeLimitMask(gym.Wrapper):
def step(self, action):
obs, rew, done, info = self.env.step(action)
if done and self.env._max_episode_steps == self.env._elapsed_steps:
info['bad_transition'] = True
return obs, rew, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
# Can be used to test recurrent policies for Reacher-v2
class MaskGoal(gym.ObservationWrapper):
def observation(self, observation):
if self.env._elapsed_steps > 0:
observation[-2:] = 0
return observation
class TransposeObs(gym.ObservationWrapper):
def __init__(self, env=None):
"""
Transpose observation space (base class)
"""
super(TransposeObs, self).__init__(env)
class TransposeImage(TransposeObs):
def __init__(self, env=None, op=[2, 0, 1]):
"""
Transpose observation space for images
"""
super(TransposeImage, self).__init__(env)
assert len(op) == 3, "Error: Operation, " + str(op) + ", must be dim3"
self.op = op
obs_shape = self.observation_space.shape
self.observation_space = Box(
self.observation_space.low[0, 0, 0],
self.observation_space.high[0, 0, 0], [
obs_shape[self.op[0]], obs_shape[self.op[1]],
obs_shape[self.op[2]]
],
dtype=self.observation_space.dtype)
def observation(self, ob):
return ob.transpose(self.op[0], self.op[1], self.op[2])
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
# TODO: Fix data types
def reset(self):
obs = self.venv.reset()
if isinstance(obs, dict):
for key in obs:
obs[key]=torch.from_numpy(obs[key]).to(self.device)
else:
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def step_async(self, actions):
if isinstance(actions, torch.LongTensor):
# Squeeze the dimension for discrete actions
actions = actions.squeeze(1)
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
obs, reward, done, info = self.venv.step_wait()
if isinstance(obs, dict):
for key in obs:
obs[key] = torch.from_numpy(obs[key]).to(self.device)
else:
obs = torch.from_numpy(obs).float().to(self.device)
reward = torch.from_numpy(reward).unsqueeze(dim=1).float()
return obs, reward, done, info
def render_traj(self, path, episode_num):
if self.venv.num_envs == 1:
return self.venv.envs[0].env.render_traj(path, episode_num)
else:
for i, curr_env in enumerate(self.venv.envs):
curr_env.env.render_traj(path, str(episode_num) + '.' + str(i))
class VecNormalize(VecNormalize_):
def __init__(self, *args, **kwargs):
super(VecNormalize, self).__init__(*args, **kwargs)
self.training = True
def _obfilt(self, obs, update=True):
if self.ob_rms:
if self.training and update:
self.ob_rms.update(obs)
obs = np.clip((obs - self.ob_rms.mean) /
np.sqrt(self.ob_rms.var + self.epsilon),
-self.clipob, self.clipob)
return obs
else:
return obs
def train(self):
self.training = True
def eval(self):
self.training = False
# Derived from
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py
class VecPyTorchFrameStack(VecEnvWrapper):
def __init__(self, venv, nstack, device=None):
self.venv = venv
self.nstack = nstack
wos = venv.observation_space # wrapped ob space
self.shape_dim0 = wos.shape[0]
low = np.repeat(wos.low, self.nstack, axis=0)
high = np.repeat(wos.high, self.nstack, axis=0)
if device is None:
device = torch.device('cpu')
self.stacked_obs = torch.zeros((venv.num_envs, ) +
low.shape).to(device)
observation_space = gym.spaces.Box(
low=low, high=high, dtype=venv.observation_space.dtype)
VecEnvWrapper.__init__(self, venv, observation_space=observation_space)
def step_wait(self):
obs, rews, news, infos = self.venv.step_wait()
self.stacked_obs[:, :-self.shape_dim0] = \
self.stacked_obs[:, self.shape_dim0:].clone()
for (i, new) in enumerate(news):
if new:
self.stacked_obs[i] = 0
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs, rews, news, infos
def reset(self):
obs = self.venv.reset()
if torch.backends.cudnn.deterministic:
self.stacked_obs = torch.zeros(self.stacked_obs.shape)
else:
self.stacked_obs.zero_()
self.stacked_obs[:, -self.shape_dim0:] = obs
return self.stacked_obs
def close(self):
self.venv.close()
| [
"torch.zeros",
"torch.device",
"torch.from_numpy"
] | 1.5.0 | sriyash421/CrowdNav_DSRNN | 968f54f1f37ae65ee0a13a5a8e96eda1af1916ab |
1.3 | """File for accessing YOLOv5 via PyTorch Hub https://pytorch.org/hub/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5_', 'yolov5s', pretrained=True, channels=3, classes=80)
"""
dependencies = ['torch', 'yaml']
import torch
from models.yolo import Model
from utils import google_utils
def create(name, pretrained, channels, classes):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
Returns:
pytorch model
"""
model = Model('models/%s.yaml' % name, channels, classes)
if pretrained:
ckpt = '%s.pt' % name # checkpoint filename
google_utils.attempt_download(ckpt) # download if not found locally
state_dict = torch.load(ckpt)['model'].state_dict()
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].numel() == v.numel()} # filter
model.load_state_dict(state_dict, strict=False) # load
return model
def yolov5s(pretrained=False, channels=3, classes=80):
"""YOLOv5-small model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5s', pretrained, channels, classes)
def yolov5m(pretrained=False, channels=3, classes=80):
"""YOLOv5-medium model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5m', pretrained, channels, classes)
def yolov5l(pretrained=False, channels=3, classes=80):
"""YOLOv5-large model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5l', pretrained, channels, classes)
def yolov5x(pretrained=False, channels=3, classes=80):
"""YOLOv5-xlarge model from https://github.com/ultralytics/yolov5
Arguments:
pretrained (bool): load pretrained weights into the model, default=False
channels (int): number of input channels, default=3
classes (int): number of model classes, default=80
Returns:
pytorch model
"""
return create('yolov5x', pretrained, channels, classes)
| [
"torch.load"
] | 1.3.0 | paszti96/RODSIE_yolov5 | 2419802dbd897028f02ad45232342316d4a73233 |
1.8 | """ Test a finetuned model. """
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
import wandb
from transformers import MBart50TokenizerFast, MBartForConditionalGeneration, MBartConfig
from datasets import load_dataset
from itertools import combinations
import time
from common.preprocess import pad_sequence, filter_languages
from common.utils import accuracy_fn, to_devices
from common.metrics import BLEU
from common import data_logger as logging
from hyperparams.schedule import WarmupDecay
from finetune import LANG_CODES
from common.preprocess import detokenize
from common.utils import mask_after_stop
def main(params):
""" Evaluates a finetuned model on the test or validation dataset."""
# load model and tokenizer
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50")
config = MBartConfig.from_pretrained("facebook/mbart-large-50")
model = MBartForConditionalGeneration(config).to(device)
checkpoint_location = params.location+'/'+params.name+'/checkpoint/checkpoint'
model, _, _, _ = logging.load_checkpoint(checkpoint_location, device, model)
def pipeline(dataset, langs, batch_size, max_len):
cols = ['input_ids_' + l for l in langs]
def tokenize_fn(example):
"""apply tokenization"""
l_tok = []
for lang in langs:
encoded = tokenizer.encode(example[lang])
encoded[0] = tokenizer.lang_code_to_id[LANG_CODES[lang]]
l_tok.append(encoded)
return {'input_ids_' + l: tok for l, tok in zip(langs, l_tok)}
def pad_seqs(examples):
"""Apply padding"""
ex_langs = list(zip(*[tuple(ex[col] for col in cols) for ex in examples]))
ex_langs = tuple(pad_sequence(x, batch_first=True, max_len=max_len) for x in ex_langs)
return ex_langs
dataset = filter_languages(dataset, langs)
dataset = dataset.map(tokenize_fn)
dataset.set_format(type='torch', columns=cols)
num_examples = len(dataset)
print('-'.join(langs) + ' : {} examples.'.format(num_examples))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
collate_fn=pad_seqs)
return dataloader, num_examples
# load data
if params.split == 'val':
test_dataset = load_dataset('ted_multi', split='validation')
elif params.split == 'test':
test_dataset = load_dataset('ted_multi', split='test')
elif params.split == 'combine':
test_dataset = load_dataset('ted_multi', split='validation+test')
else:
raise NotImplementedError
# preprocess splits for each direction
test_dataloaders = {}
for l1, l2 in combinations(params.langs, 2):
test_dataloaders[l1+'-'+l2], _ = pipeline(test_dataset, [l1, l2], params.batch_size, params.max_len)
# evaluate the model
def evaluate(x, y, y_code, bleu):
y_inp, y_tar = y[:,:-1].contiguous(), y[:,1:].contiguous()
enc_mask = (x != 0)
x, y_inp, y_tar, enc_mask = to_devices(
(x, y_inp, y_tar, enc_mask), device)
model.eval()
y_pred = model.generate(input_ids=x, decoder_start_token_id=y_code,
attention_mask=enc_mask, max_length=x.size(1)+1,
num_beams=params.num_beams, length_penalty=params.length_penalty,
early_stopping=True)
bleu(y_pred[:, 1:], y_tar)
test_results = {}
for direction, loader in test_dataloaders.items():
alt_direction = '-'.join(reversed(direction.split('-')))
bleu1, bleu2 = BLEU(), BLEU()
bleu1.set_excluded_indices([0, 2])
bleu2.set_excluded_indices([0, 2])
x_code = tokenizer.lang_code_to_id[LANG_CODES[direction.split('-')[0]]]
y_code = tokenizer.lang_code_to_id[LANG_CODES[direction.split('-')[-1]]]
start_ = time.time()
for i, (x, y) in enumerate(loader):
if params.test_batches is not None:
if i > params.test_batches:
break
evaluate(x, y, y_code, bleu1)
if not params.single_direction:
evaluate(y, x, x_code, bleu2)
if i % params.verbose == 0:
bl1, bl2 = bleu1.get_metric(), bleu2.get_metric()
print('Batch {} Bleu1 {:.4f} Bleu2 {:.4f} in {:.4f} secs per batch'.format(
i, bl1, bl2, (time.time() - start_)/(i+1)))
bl1, bl2 = bleu1.get_metric(), bleu2.get_metric()
test_results[direction] = [bl1]
test_results[alt_direction] = [bl2]
print(direction, bl1, bl2)
# save test_results
pd.DataFrame(test_results).to_csv(params.location+'/'+params.name+'/test_results.csv', index=False)
class ExamplesLogger:
def __init__(self, params):
self.params = params
self.input_examples = []
self.target_examples = []
self.pred_examples = []
def log_examples(self, input_batch, target_batch, prediction_batch, tokenizer):
prediction_batch = mask_after_stop(prediction_batch, stop_token=2)
if isinstance(tokenizer, list):
inp_tokenizer = tokenizer[0]
out_tokenizer = tokenizer[1]
else:
inp_tokenizer = tokenizer
out_tokenizer = tokenizer
det_input = str(detokenize(input_batch, inp_tokenizer, batch=False)[0])
det_target = str(detokenize(target_batch, out_tokenizer, batch=False)[0])
det_pred = str(detokenize(prediction_batch, out_tokenizer, batch=False)[0])
self.target_examples.append(det_target)
self.pred_examples.append(det_pred)
self.input_examples.append(det_input)
def dump_examples(self):
with open(self.params.location+'/'+self.params.name + '_examples.txt', 'w') as f:
for inp, pred, target in zip(self.input_examples, self.pred_examples, self.target_examples):
f.write("Input: {} \n \n".format(inp))
f.write("Target: {} \n \n".format(target))
f.write("Prediction: {} \n \n".format(pred))
f.write("---------------------------------- \n \n")
if __name__ == '__main__':
from common.finetune_arguments import parser
params = parser.parse_args()
main(params)
| [
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.8.1 | IanYHWu/tied-representation-learning | bda9814dc40cf552f7bdd2ade78f5e2958a7ea83 |
1.3 | import torch
def euclidean_metric(a, b):
return torch.cdist(a, b, p=2) | [
"torch.cdist"
] | 1.3.0 | aiyolo/prototypical-network-pytorch-lightning | e6fb9397f98314cd8f4b42282ca3f28e46c51d4a |
1.8 | import torch
import torch.nn as nn
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.l1 = nn.Linear(input_size, hidden_size)
self.l2 = nn.Linear(hidden_size, hidden_size)
self.l3 = nn.Linear(hidden_size, num_classes)
self.relu = nn.ReLU()
def forward(self, x):
out = self.l1(x)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
# no activation and no softmax at the end
return out | [
"torch.nn.Linear",
"torch.nn.ReLU"
] | 1.8.1 | thomasbreydo/mental-health-nlp-chatbot | 62cb6f558f8e3282f24f4699770e838a272cc346 |
1.5 | import torch.utils.data as torchdata
from modules.data.utils import collate_fn
from modules.data.dataset import ICDAR
class ICDARDataLoader:
def __init__(self, config):
self.config = config
self.batch_size = config['data_loader']['batch_size']
self.shuffle = config['data_loader']['shuffle']
self.num_workers = config['data_loader']['workers']
self.val_split_ratio = self.config['validation']['validation_split']
data_root = config['data_loader']['data_dir']
input_size = config['data_loader']['input_size']
icdar_dataset = ICDAR(data_root, input_size)
if self.val_split_ratio > 0:
self.__train_set, self.__val_set = self.__train_val_split(icdar_dataset)
else:
self.__train_set = icdar_dataset
def train(self):
trainLoader = torchdata.DataLoader(self.__train_set, num_workers=self.num_workers, batch_size=self.batch_size,
shuffle=self.shuffle, collate_fn=collate_fn)
return trainLoader
def val(self):
assert self.val_split_ratio > 0, 'Error: call val() method'
shuffle = self.config['validation']['shuffle']
valLoader = torchdata.DataLoader(self.__val_set, num_workers=self.num_workers, batch_size=self.batch_size,
shuffle=shuffle, collate_fn=collate_fn)
return valLoader
def __train_val_split(self, ds):
"""
:param ds: dataset
:return:
"""
split = self.val_split_ratio
try:
split = float(split)
except:
raise RuntimeError('Train and val splitting ratio is invalid.')
val_len = int(split * len(ds))
train_len = len(ds) - val_len
train, val = torchdata.random_split(ds, [train_len, val_len])
return train, val
| [
"torch.utils.data.random_split",
"torch.utils.data.DataLoader"
] | 1.5.0 | ishin-pie/e2e-scene-text-spotting | a3f5ba1f486c5d52bb6263aff6663a03ab4effbf |
0.2 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
import numpy as np
import torch
T = TypeVar("T")
V = TypeVar("V")
K = TypeVar("K")
X = TypeVar("X")
Y = TypeVar("Y")
def not_none(val: Optional[T]) -> T:
"""
Unbox an optional type.
Args:
val: the value to cast to a non ``None`` type
Retruns:
V: ``val`` when ``val`` is not ``None``
Throws:
ValueError if ``val`` is ``None``
"""
if val is None:
raise ValueError("Argument to `not_none` was None.")
return val
def checked_cast(typ: Type[T], val: V) -> T:
"""
Cast a value to a type (with a runtime safety check).
Returns the value unchanged and checks its type at runtime. This signals to the
typechecker that the value has the designated type.
Like `typing.cast`_ ``check_cast`` performs no runtime conversion on its argument,
but, unlike ``typing.cast``, ``checked_cast`` will throw an error if the value is
not of the expected type. The type passed as an argument should be a python class.
Args:
typ: the type to cast to
val: the value that we are casting
Returns:
the ``val`` argument, unchanged
.. _typing.cast: https://docs.python.org/3/library/typing.html#typing.cast
"""
if not isinstance(val, typ):
raise ValueError(f"Value was not of type {type}:\n{val}")
return val
def checked_cast_optional(typ: Type[T], val: Optional[V]) -> Optional[T]:
"""Calls checked_cast only if value is not None."""
if val is None:
return val
return checked_cast(typ, val)
def checked_cast_list(typ: Type[T], old_l: List[V]) -> List[T]:
"""Calls checked_cast on all items in a list."""
new_l = []
for val in old_l:
val = checked_cast(typ, val)
new_l.append(val)
return new_l
def checked_cast_dict(
key_typ: Type[K], value_typ: Type[V], d: Dict[X, Y]
) -> Dict[K, V]:
"""Calls checked_cast on all keys and values in the dictionary."""
new_dict = {}
for key, val in d.items():
val = checked_cast(value_typ, val)
key = checked_cast(key_typ, key)
new_dict[key] = val
return new_dict
# pyre-fixme[34]: `T` isn't present in the function's parameters.
def checked_cast_to_tuple(typ: Tuple[Type[V], ...], val: V) -> T:
"""
Cast a value to a union of multiple types (with a runtime safety check).
This function is similar to `checked_cast`, but allows for the type to be
defined as a tuple of types, in which case the value is cast as a union of
the types in the tuple.
Args:
typ: the tuple of types to cast to
val: the value that we are casting
Returns:
the ``val`` argument, unchanged
"""
if not isinstance(val, typ):
raise ValueError(f"Value was not of type {type!r}:\n{val!r}")
# pyre-fixme[7]: Expected `T` but got `V`.
return val
def numpy_type_to_python_type(value: Any) -> Any:
"""If `value` is a Numpy int or float, coerce to a Python int or float.
This is necessary because some of our transforms return Numpy values.
"""
if isinstance(value, np.integer):
value = int(value) # pragma: nocover (covered by generator tests)
if isinstance(value, np.floating):
value = float(value) # pragma: nocover (covered by generator tests)
return value
def torch_type_to_str(value: Any) -> str:
"""Converts torch types, commonly used in Ax, to string representations."""
if isinstance(value, torch.dtype):
return str(value)
if isinstance(value, torch.device):
return checked_cast(str, value.type) # pyre-fixme[16]: device has to attr. type
raise ValueError(f"Object {value} was of unexpected torch type.")
def torch_type_from_str(
identifier: str, type_name: str
) -> Union[torch.dtype, torch.device]:
if type_name == "device":
return torch.device(identifier)
if type_name == "dtype":
return getattr(torch, identifier[6:])
raise ValueError(f"Unexpected type: {type_name} for identifier: {identifier}.")
| [
"torch.device"
] | 0.2.2 | EricZLou/Ax | 3f8fc6f4a055e93cb69fda3799be41ee9572ef02 |
1.4 | import numpy as np
import torch
from dg_util.python_utils import misc_util
from dg_util.python_utils import pytorch_util as pt_util
from dg_util.python_utils.tensor_dataset import TensorDataset
class NPZDataset(TensorDataset):
"""
Convenience class for fast reading of saved numpy image arrays without the need for slicing and concating.
"""
def __init__(self, args, path, data_subset, num_data_points=None, contiguous=True):
with torch.no_grad():
self.args = args
self.data_subset = data_subset
npz_dataset = np.load(path.format(data_subset=data_subset))
data = npz_dataset["data"]
labels = pt_util.from_numpy(npz_dataset["labels"])
if num_data_points is None:
num_data_points = len(data)
if num_data_points < len(data):
np.random.seed(0)
rand_inds = np.random.choice(len(data), num_data_points, replace=False)
data = data[rand_inds]
labels = labels[rand_inds]
assert len(data.shape) == 4
if data.shape[1] == 3:
data = data.transpose(0, 2, 3, 1)
data = misc_util.resize(data, (args.input_width, args.input_height), height_channel=1, width_channel=2)
data = pt_util.from_numpy(data).permute(0, 3, 1, 2)
if contiguous:
data = data.contiguous()
super(NPZDataset, self).__init__(data, labels, self.args.batch_size)
| [
"torch.no_grad"
] | 1.4.0 | gabrielsluz/vince | f4e17a2cf70c080a7e01e46d15537e33224c869b |
1.1 | from typing import List, Dict, Tuple
import pickle
import torch
import numpy as np
from ditk import logging
from copy import deepcopy
from easydict import EasyDict
from torch.utils.data import Dataset
from ding.utils import DATASET_REGISTRY, import_module
from ding.rl_utils import discount_cumsum
@DATASET_REGISTRY.register('naive')
class NaiveRLDataset(Dataset):
def __init__(self, cfg) -> None:
assert type(cfg) in [str, EasyDict], "invalid cfg type: {}".format(type(cfg))
if isinstance(cfg, EasyDict):
self._data_path = cfg.policy.collect.data_path
elif isinstance(cfg, str):
self._data_path = cfg
with open(self._data_path, 'rb') as f:
self._data: List[Dict[str, torch.Tensor]] = pickle.load(f)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
return self._data[idx]
@DATASET_REGISTRY.register('d4rl')
class D4RLDataset(Dataset):
def __init__(self, cfg: dict) -> None:
import gym
try:
import d4rl # register d4rl enviroments with open ai gym
except ImportError:
logging.warning("not found d4rl env, please install it, refer to https://github.com/rail-berkeley/d4rl")
# Init parameters
data_path = cfg.policy.collect.get('data_path', None)
env_id = cfg.env.env_id
# Create the environment
if data_path:
d4rl.set_dataset_path(data_path)
env = gym.make(env_id)
dataset = d4rl.qlearning_dataset(env)
if cfg.policy.collect.get('normalize_states', None):
dataset = self._normalize_states(dataset)
self._data = []
self._load_d4rl(dataset)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
return self._data[idx]
def _load_d4rl(self, dataset: Dict[str, np.ndarray]) -> None:
for i in range(len(dataset['observations'])):
trans_data = {}
trans_data['obs'] = torch.from_numpy(dataset['observations'][i])
trans_data['next_obs'] = torch.from_numpy(dataset['next_observations'][i])
trans_data['action'] = torch.from_numpy(dataset['actions'][i])
trans_data['reward'] = torch.tensor(dataset['rewards'][i])
trans_data['done'] = dataset['terminals'][i]
self._data.append(trans_data)
def _normalize_states(self, dataset, eps=1e-3):
self._mean = dataset['observations'].mean(0, keepdims=True)
self._std = dataset['observations'].std(0, keepdims=True) + eps
dataset['observations'] = (dataset['observations'] - self._mean) / self._std
dataset['next_observations'] = (dataset['next_observations'] - self._mean) / self._std
return dataset
@property
def mean(self):
return self._mean
@property
def std(self):
return self._std
@DATASET_REGISTRY.register('hdf5')
class HDF5Dataset(Dataset):
def __init__(self, cfg: dict) -> None:
try:
import h5py
except ImportError:
logging.warning("not found h5py package, please install it trough 'pip install h5py' ")
data_path = cfg.policy.collect.get('data_path', None)
data = h5py.File(data_path, 'r')
self._load_data(data)
if cfg.policy.collect.get('normalize_states', None):
self._normalize_states()
def __len__(self) -> int:
return len(self._data['obs'])
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
return {k: self._data[k][idx] for k in self._data.keys()}
def _load_data(self, dataset: Dict[str, np.ndarray]) -> None:
self._data = {}
for k in dataset.keys():
logging.info(f'Load {k} data.')
self._data[k] = dataset[k][:]
def _normalize_states(self, eps=1e-3):
self._mean = self._data['obs'].mean(0, keepdims=True)
self._std = self._data['obs'].std(0, keepdims=True) + eps
self._data['obs'] = (self._data['obs'] - self._mean) / self._std
self._data['next_obs'] = (self._data['next_obs'] - self._mean) / self._std
@property
def mean(self):
return self._mean
@property
def std(self):
return self._std
@DATASET_REGISTRY.register('d4rl_trajectory')
class D4RLTrajectoryDataset(Dataset):
# from infos.py from official d4rl github repo
REF_MIN_SCORE = {
'halfcheetah': -280.178953,
'walker2d': 1.629008,
'hopper': -20.272305,
}
REF_MAX_SCORE = {
'halfcheetah': 12135.0,
'walker2d': 4592.3,
'hopper': 3234.3,
}
# calculated from d4rl datasets
D4RL_DATASET_STATS = {
'halfcheetah-medium-v2': {
'state_mean': [
-0.06845773756504059, 0.016414547339081764, -0.18354906141757965, -0.2762460708618164,
-0.34061527252197266, -0.09339715540409088, -0.21321271359920502, -0.0877423882484436,
5.173007488250732, -0.04275195300579071, -0.036108363419771194, 0.14053793251514435,
0.060498327016830444, 0.09550975263118744, 0.06739100068807602, 0.005627387668937445,
0.013382787816226482
],
'state_std': [
0.07472999393939972, 0.3023499846458435, 0.30207309126853943, 0.34417077898979187, 0.17619241774082184,
0.507205605506897, 0.2567007839679718, 0.3294812738895416, 1.2574149370193481, 0.7600541710853577,
1.9800915718078613, 6.565362453460693, 7.466367721557617, 4.472222805023193, 10.566964149475098,
5.671932697296143, 7.4982590675354
]
},
'halfcheetah-medium-replay-v2': {
'state_mean': [
-0.12880703806877136, 0.3738119602203369, -0.14995987713336945, -0.23479078710079193,
-0.2841278612613678, -0.13096535205841064, -0.20157982409000397, -0.06517726927995682,
3.4768247604370117, -0.02785065770149231, -0.015035249292850494, 0.07697279006242752,
0.01266712136566639, 0.027325302362442017, 0.02316424623131752, 0.010438721626996994,
-0.015839405357837677
],
'state_std': [
0.17019015550613403, 1.284424901008606, 0.33442774415016174, 0.3672759234905243, 0.26092398166656494,
0.4784106910228729, 0.3181420564651489, 0.33552637696266174, 2.0931615829467773, 0.8037433624267578,
1.9044333696365356, 6.573209762573242, 7.572863578796387, 5.069749355316162, 9.10555362701416,
6.085654258728027, 7.25300407409668
]
},
'halfcheetah-medium-expert-v2': {
'state_mean': [
-0.05667462572455406, 0.024369969964027405, -0.061670560389757156, -0.22351515293121338,
-0.2675151228904724, -0.07545716315507889, -0.05809682980179787, -0.027675075456500053,
8.110626220703125, -0.06136331334710121, -0.17986927926540375, 0.25175222754478455, 0.24186332523822784,
0.2519369423389435, 0.5879552960395813, -0.24090635776519775, -0.030184272676706314
],
'state_std': [
0.06103534251451492, 0.36054104566574097, 0.45544400811195374, 0.38476887345314026, 0.2218363732099533,
0.5667523741722107, 0.3196682929992676, 0.2852923572063446, 3.443821907043457, 0.6728139519691467,
1.8616976737976074, 9.575807571411133, 10.029894828796387, 5.903450012207031, 12.128185272216797,
6.4811787605285645, 6.378620147705078
]
},
'walker2d-medium-v2': {
'state_mean': [
1.218966007232666, 0.14163373410701752, -0.03704913705587387, -0.13814310729503632, 0.5138224363327026,
-0.04719110205769539, -0.47288352251052856, 0.042254164814949036, 2.3948874473571777,
-0.03143199160695076, 0.04466355964541435, -0.023907244205474854, -0.1013401448726654,
0.09090937674045563, -0.004192637279629707, -0.12120571732521057, -0.5497063994407654
],
'state_std': [
0.12311358004808426, 0.3241879940032959, 0.11456084251403809, 0.2623065710067749, 0.5640279054641724,
0.2271878570318222, 0.3837319612503052, 0.7373676896095276, 1.2387926578521729, 0.798020601272583,
1.5664079189300537, 1.8092705011367798, 3.025604248046875, 4.062486171722412, 1.4586567878723145,
3.7445690631866455, 5.5851287841796875
]
},
'walker2d-medium-replay-v2': {
'state_mean': [
1.209364652633667, 0.13264022767543793, -0.14371201395988464, -0.2046516090631485, 0.5577612519264221,
-0.03231537342071533, -0.2784661054611206, 0.19130706787109375, 1.4701707363128662,
-0.12504704296588898, 0.0564953051507473, -0.09991033375263214, -0.340340256690979, 0.03546293452382088,
-0.08934258669614792, -0.2992438077926636, -0.5984178185462952
],
'state_std': [
0.11929835379123688, 0.3562574088573456, 0.25852200388908386, 0.42075422406196594, 0.5202291011810303,
0.15685082972049713, 0.36770978569984436, 0.7161387801170349, 1.3763766288757324, 0.8632221817970276,
2.6364643573760986, 3.0134117603302, 3.720684051513672, 4.867283821105957, 2.6681625843048096,
3.845186948776245, 5.4768385887146
]
},
'walker2d-medium-expert-v2': {
'state_mean': [
1.2294334173202515, 0.16869689524173737, -0.07089081406593323, -0.16197483241558075,
0.37101927399635315, -0.012209027074277401, -0.42461398243904114, 0.18986578285694122,
3.162475109100342, -0.018092676997184753, 0.03496946766972542, -0.013921679928898811,
-0.05937029421329498, -0.19549426436424255, -0.0019200450042262673, -0.062483321875333786,
-0.27366524934768677
],
'state_std': [
0.09932824969291687, 0.25981399416923523, 0.15062759816646576, 0.24249176681041718, 0.6758718490600586,
0.1650741547346115, 0.38140663504600525, 0.6962361335754395, 1.3501490354537964, 0.7641991376876831,
1.534574270248413, 2.1785972118377686, 3.276582717895508, 4.766193866729736, 1.1716983318328857,
4.039782524108887, 5.891613960266113
]
},
'hopper-medium-v2': {
'state_mean': [
1.311279058456421, -0.08469521254301071, -0.5382719039916992, -0.07201576232910156, 0.04932365566492081,
2.1066856384277344, -0.15017354488372803, 0.008783451281487942, -0.2848185896873474,
-0.18540096282958984, -0.28461286425590515
],
'state_std': [
0.17790751159191132, 0.05444620922207832, 0.21297138929367065, 0.14530418813228607, 0.6124444007873535,
0.8517446517944336, 1.4515252113342285, 0.6751695871353149, 1.5362390279769897, 1.616074562072754,
5.607253551483154
]
},
'hopper-medium-replay-v2': {
'state_mean': [
1.2305138111114502, -0.04371410980820656, -0.44542956352233887, -0.09370097517967224,
0.09094487875699997, 1.3694725036621094, -0.19992674887180328, -0.022861352190375328,
-0.5287045240402222, -0.14465883374214172, -0.19652697443962097
],
'state_std': [
0.1756512075662613, 0.0636928603053093, 0.3438323438167572, 0.19566889107227325, 0.5547984838485718,
1.051029920578003, 1.158307671546936, 0.7963128685951233, 1.4802359342575073, 1.6540331840515137,
5.108601093292236
]
},
'hopper-medium-expert-v2': {
'state_mean': [
1.3293815851211548, -0.09836531430482864, -0.5444297790527344, -0.10201650857925415,
0.02277466468513012, 2.3577215671539307, -0.06349576264619827, -0.00374026270583272,
-0.1766270101070404, -0.11862941086292267, -0.12097819894552231
],
'state_std': [
0.17012375593185425, 0.05159067362546921, 0.18141433596611023, 0.16430604457855225, 0.6023368239402771,
0.7737284898757935, 1.4986555576324463, 0.7483318448066711, 1.7953159809112549, 2.0530025959014893,
5.725032806396484
]
},
}
def __init__(self, dataset_path: str, context_len: int, rtg_scale: float) -> None:
self.context_len = context_len
# load dataset
with open(dataset_path, 'rb') as f:
self.trajectories = pickle.load(f)
if isinstance(self.trajectories[0], list):
# for our collected dataset, e.g. cartpole/lunarlander case
trajectories_tmp = []
original_keys = ['obs', 'next_obs', 'action', 'reward']
keys = ['observations', 'next_observations', 'actions', 'rewards']
for key, o_key in zip(keys, original_keys):
trajectories_tmp = [
{
key: np.stack(
[
self.trjectories[eps_index][transition_index][o_key]
for transition_index in range(len(self.trajectories[eps_index]))
],
axis=0
)
} for eps_index in range(len(self.trajectories))
]
self.trajectories = trajectories_tmp
states = []
for traj in self.trajectories:
traj_len = traj['observations'].shape[0]
states.append(traj['observations'])
# calculate returns to go and rescale them
traj['returns_to_go'] = discount_cumsum(traj['rewards'], 1.0) / rtg_scale
# used for input normalization
states = np.concatenate(states, axis=0)
self.state_mean, self.state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6
# normalize states
for traj in self.trajectories:
traj['observations'] = (traj['observations'] - self.state_mean) / self.state_std
def get_state_stats(self) -> Tuple[np.ndarray, np.ndarray]:
return deepcopy(self.state_mean), deepcopy(self.state_std)
def get_d4rl_dataset_stats(self, env_d4rl_name: str) -> Dict[str, list]:
return self.D4RL_DATASET_STATS[env_d4rl_name]
def __len__(self) -> int:
return len(self.trajectories)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
traj = self.trajectories[idx]
traj_len = traj['observations'].shape[0]
if traj_len >= self.context_len:
# sample random index to slice trajectory
si = np.random.randint(0, traj_len - self.context_len)
states = torch.from_numpy(traj['observations'][si:si + self.context_len])
actions = torch.from_numpy(traj['actions'][si:si + self.context_len])
returns_to_go = torch.from_numpy(traj['returns_to_go'][si:si + self.context_len])
timesteps = torch.arange(start=si, end=si + self.context_len, step=1)
# all ones since no padding
traj_mask = torch.ones(self.context_len, dtype=torch.long)
else:
padding_len = self.context_len - traj_len
# padding with zeros
states = torch.from_numpy(traj['observations'])
states = torch.cat(
[states, torch.zeros(([padding_len] + list(states.shape[1:])), dtype=states.dtype)], dim=0
)
actions = torch.from_numpy(traj['actions'])
actions = torch.cat(
[actions, torch.zeros(([padding_len] + list(actions.shape[1:])), dtype=actions.dtype)], dim=0
)
returns_to_go = torch.from_numpy(traj['returns_to_go'])
returns_to_go = torch.cat(
[
returns_to_go,
torch.zeros(([padding_len] + list(returns_to_go.shape[1:])), dtype=returns_to_go.dtype)
],
dim=0
)
timesteps = torch.arange(start=0, end=self.context_len, step=1)
traj_mask = torch.cat(
[torch.ones(traj_len, dtype=torch.long),
torch.zeros(padding_len, dtype=torch.long)], dim=0
)
return timesteps, states, actions, returns_to_go, traj_mask
def hdf5_save(exp_data, expert_data_path):
try:
import h5py
except ImportError:
logging.warning("not found h5py package, please install it trough 'pip install h5py' ")
import numpy as np
dataset = dataset = h5py.File('%s_demos.hdf5' % expert_data_path.replace('.pkl', ''), 'w')
dataset.create_dataset('obs', data=np.array([d['obs'].numpy() for d in exp_data]), compression='gzip')
dataset.create_dataset('action', data=np.array([d['action'].numpy() for d in exp_data]), compression='gzip')
dataset.create_dataset('reward', data=np.array([d['reward'].numpy() for d in exp_data]), compression='gzip')
dataset.create_dataset('done', data=np.array([d['done'] for d in exp_data]), compression='gzip')
dataset.create_dataset('next_obs', data=np.array([d['next_obs'].numpy() for d in exp_data]), compression='gzip')
def naive_save(exp_data, expert_data_path):
with open(expert_data_path, 'wb') as f:
pickle.dump(exp_data, f)
def offline_data_save_type(exp_data, expert_data_path, data_type='naive'):
globals()[data_type + '_save'](exp_data, expert_data_path)
def create_dataset(cfg, **kwargs) -> Dataset:
cfg = EasyDict(cfg)
import_module(cfg.get('import_names', []))
return DATASET_REGISTRY.build(cfg.policy.collect.data_type, cfg=cfg, **kwargs)
| [
"torch.zeros",
"torch.arange",
"torch.from_numpy",
"torch.ones",
"torch.tensor"
] | 1.1.0 | kxzxvbk/DI-engine | 268d77db3cb54401b2cfc83e2bc3ec87c31e7b83 |
1.1 | """The code is adapted from https://github.com/nikhilbarhate99/min-decision-transformer
"""
from typing import List, Dict, Any, Tuple, Union
from collections import namedtuple
from torch.distributions import Normal, Independent
from ding.torch_utils import Adam, to_device
from ditk import logging
from ding.rl_utils import v_1step_td_data, v_1step_td_error, get_train_sample, \
qrdqn_nstep_td_data, qrdqn_nstep_td_error, get_nstep_return_data
from ding.model import model_wrap
from ding.utils.data.dataset import D4RLTrajectoryDataset
from ding.utils import POLICY_REGISTRY
from ding.utils.data import default_collate, default_decollate
from datetime import datetime
from ding.torch_utils import one_hot
import numpy as np
import torch.nn.functional as F
import torch
import gym
import copy
import os
import csv
from .dqn import DQNPolicy
@POLICY_REGISTRY.register('dt')
class DTPolicy(DQNPolicy):
r"""
Overview:
Policy class of DT algorithm in discrete environments.
"""
config = dict(
# (str) RL policy register name (refer to function "POLICY_REGISTRY").
type='dt',
# (bool) Whether to use cuda for network.
cuda=False,
# (bool) Whether the RL algorithm is on-policy or off-policy.
on_policy=False,
# (bool) Whether use priority(priority sample, IS weight, update priority)
priority=False,
# (float) Reward's future discount factor, aka. gamma.
discount_factor=0.97,
# (int) N-step reward for target q_value estimation
nstep=1,
obs_shape=4,
action_shape=2,
# encoder_hidden_size_list=[128, 128, 64],
dataset='medium', # medium / medium-replay / medium-expert
rtg_scale=1000, # normalize returns to go
max_eval_ep_len=1000, # max len of one episode
num_eval_ep=10, # num of evaluation episodes
batch_size=64, # training batch size
wt_decay=1e-4,
warmup_steps=10000,
max_train_iters=200,
context_len=20,
n_blocks=3,
embed_dim=128,
dropout_p=0.1,
learn=dict(
# (bool) Whether to use multi gpu
multi_gpu=False,
# batch_size=64,
learning_rate=1e-4,
# ==============================================================
# The following configs are algorithm-specific
# ==============================================================
),
# collect_mode config
collect=dict(),
eval=dict(),
# other config
other=dict(),
)
def _init_learn(self) -> None:
r"""
Overview:
Learn mode init method. Called by ``self.__init__``.
Init the optimizer, algorithm config, main and target models.
"""
self.stop_value = self._cfg.stop_value
self.env_name = self._cfg.env_name
dataset = self._cfg.dataset # medium / medium-replay / medium-expert
# rtg_scale: scale of `return to go`
# rtg_target: max target of `return to go`
# Our goal is normalize `return to go` to (0, 1), which will favour the covergence.
# As a result, we usually set rtg_scale == rtg_target.
self.rtg_scale = self._cfg.rtg_target # normalize returns to go
self.rtg_target = self._cfg.rtg_target # max target reward_to_go
self.max_eval_ep_len = self._cfg.max_eval_ep_len # max len of one episode
self.num_eval_ep = self._cfg.num_eval_ep # num of evaluation episodes
lr = self._cfg.learn.learning_rate # learning rate
wt_decay = self._cfg.wt_decay # weight decay
warmup_steps = self._cfg.warmup_steps # warmup steps for lr scheduler
max_train_iters = self._cfg.max_train_iters
self.context_len = self._cfg.context_len # K in decision transformer
n_blocks = self._cfg.n_blocks # num of transformer blocks
embed_dim = self._cfg.embed_dim # embedding (hidden) dim of transformer
dropout_p = self._cfg.dropout_p # dropout probability
# # load data from this file
# dataset_path = f'{self._cfg.dataset_dir}/{env_d4rl_name}.pkl'
# saves model and csv in this directory
self.log_dir = self._cfg.log_dir
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
# training and evaluation device
self.device = torch.device(self._device)
self.start_time = datetime.now().replace(microsecond=0)
self.start_time_str = self.start_time.strftime("%y-%m-%d-%H-%M-%S")
# prefix = "dt_" + env_d4rl_name
self.prefix = "dt_" + self.env_name
save_model_name = self.prefix + "_model_" + self.start_time_str + ".pt"
self.save_model_path = os.path.join(self.log_dir, save_model_name)
self.save_best_model_path = self.save_model_path[:-3] + "_best.pt"
log_csv_name = self.prefix + "_log_" + self.start_time_str + ".csv"
log_csv_path = os.path.join(self.log_dir, log_csv_name)
self.csv_writer = csv.writer(open(log_csv_path, 'a', 1))
csv_header = (["duration", "num_updates", "eval_avg_reward", "eval_avg_ep_len", "eval_d4rl_score"])
self.csv_writer.writerow(csv_header)
dataset_path = self._cfg.learn.dataset_path
logging.info("=" * 60)
logging.info("start time: " + self.start_time_str)
logging.info("=" * 60)
logging.info("device set to: " + str(self.device))
logging.info("dataset path: " + dataset_path)
logging.info("model save path: " + self.save_model_path)
logging.info("log csv save path: " + log_csv_path)
self._env = gym.make(self.env_name)
self.state_dim = self._cfg.model.state_dim
self.act_dim = self._cfg.model.act_dim
self._learn_model = self._model
self._optimizer = torch.optim.AdamW(self._learn_model.parameters(), lr=lr, weight_decay=wt_decay)
self._scheduler = torch.optim.lr_scheduler.LambdaLR(
self._optimizer, lambda steps: min((steps + 1) / warmup_steps, 1)
)
self.max_env_score = -1.0
def _forward_learn(self, data: list) -> Dict[str, Any]:
r"""
Overview:
Forward and backward function of learn mode.
Arguments:
- data (:obj:`dict`): Dict type data, including at least ['obs', 'action', 'reward', 'next_obs']
Returns:
- info_dict (:obj:`Dict[str, Any]`): Including current lr and loss.
"""
self._learn_model.train()
timesteps, states, actions, returns_to_go, traj_mask = data
timesteps = timesteps.to(self.device) # B x T
states = states.to(self.device) # B x T x state_dim
actions = actions.to(self.device) # B x T x act_dim
returns_to_go = returns_to_go.to(self.device) # B x T x 1
traj_mask = traj_mask.to(self.device) # B x T
action_target = torch.clone(actions).detach().to(self.device)
# The shape of `returns_to_go` may differ with different dataset (B x T or B x T x 1),
# and we need a 3-dim tensor
if len(returns_to_go.shape) == 2:
returns_to_go = returns_to_go.unsqueeze(-1)
# if discrete
if not self._cfg.model.continuous:
actions = one_hot(actions.squeeze(-1), num=self.act_dim)
state_preds, action_preds, return_preds = self._learn_model.forward(
timesteps=timesteps, states=states, actions=actions, returns_to_go=returns_to_go
)
traj_mask = traj_mask.view(-1, )
# only consider non padded elements
action_preds = action_preds.view(-1, self.act_dim)[traj_mask > 0]
if self._cfg.model.continuous:
action_target = action_target.view(-1, self.act_dim)[traj_mask > 0]
else:
action_target = action_target.view(-1)[traj_mask > 0]
if self._cfg.model.continuous:
action_loss = F.mse_loss(action_preds, action_target)
else:
action_loss = F.cross_entropy(action_preds, action_target)
self._optimizer.zero_grad()
action_loss.backward()
torch.nn.utils.clip_grad_norm_(self._learn_model.parameters(), 0.25)
self._optimizer.step()
self._scheduler.step()
return {
'cur_lr': self._optimizer.state_dict()['param_groups'][0]['lr'],
'action_loss': action_loss.detach().cpu().item(),
}
def evaluate_on_env(self, state_mean=None, state_std=None, render=False):
eval_batch_size = 1 # required for forward pass
results = {}
total_reward = 0
total_timesteps = 0
# state_dim = env.observation_space.shape[0]
# act_dim = env.action_space.shape[0]
if state_mean is None:
self.state_mean = torch.zeros((self.state_dim, )).to(self.device)
else:
self.state_mean = torch.from_numpy(state_mean).to(self.device)
if state_std is None:
self.state_std = torch.ones((self.state_dim, )).to(self.device)
else:
self.state_std = torch.from_numpy(state_std).to(self.device)
# same as timesteps used for training the transformer
# also, crashes if device is passed to arange()
timesteps = torch.arange(start=0, end=self.max_eval_ep_len, step=1)
timesteps = timesteps.repeat(eval_batch_size, 1).to(self.device)
self._learn_model.eval()
with torch.no_grad():
for _ in range(self.num_eval_ep):
# zeros place holders
# continuous action
actions = torch.zeros(
(eval_batch_size, self.max_eval_ep_len, self.act_dim), dtype=torch.float32, device=self.device
)
# discrete action # TODO
# actions = torch.randint(0,self.act_dim,[eval_batch_size, self.max_eval_ep_len, 1],
# dtype=torch.long, device=self.device)
states = torch.zeros(
(eval_batch_size, self.max_eval_ep_len, self.state_dim), dtype=torch.float32, device=self.device
)
rewards_to_go = torch.zeros(
(eval_batch_size, self.max_eval_ep_len, 1), dtype=torch.float32, device=self.device
)
# init episode
running_state = self._env.reset()
running_reward = 0
running_rtg = self.rtg_target / self.rtg_scale
for t in range(self.max_eval_ep_len):
total_timesteps += 1
# add state in placeholder and normalize
states[0, t] = torch.from_numpy(running_state).to(self.device)
# states[0, t] = (states[0, t].cpu() - self.state_mean.cpu().numpy()) / self.state_std.cpu().numpy()
states[0, t] = (states[0, t] - self.state_mean) / self.state_std
# calcualate running rtg and add it in placeholder
running_rtg = running_rtg - (running_reward / self.rtg_scale)
rewards_to_go[0, t] = running_rtg
if t < self.context_len:
_, act_preds, _ = self._learn_model.forward(
timesteps[:, :self.context_len], states[:, :self.context_len],
actions[:, :self.context_len], rewards_to_go[:, :self.context_len]
)
act = act_preds[0, t].detach()
else:
_, act_preds, _ = self._learn_model.forward(
timesteps[:, t - self.context_len + 1:t + 1], states[:, t - self.context_len + 1:t + 1],
actions[:, t - self.context_len + 1:t + 1], rewards_to_go[:, t - self.context_len + 1:t + 1]
)
act = act_preds[0, -1].detach()
# if discrete
if not self._cfg.model.continuous:
act = torch.argmax(act)
running_state, running_reward, done, _ = self._env.step(act.cpu().numpy())
# add action in placeholder
actions[0, t] = act
total_reward += running_reward
if render:
self._env.render()
if done:
break
results['eval/avg_reward'] = total_reward / self.num_eval_ep
results['eval/avg_ep_len'] = total_timesteps / self.num_eval_ep
return results
def evaluate(self, total_update_times, state_mean=None, state_std=None, render=False):
results = self.evaluate_on_env(state_mean, state_std, render)
eval_avg_reward = results['eval/avg_reward']
eval_avg_ep_len = results['eval/avg_ep_len']
eval_d4rl_score = self.get_d4rl_normalized_score(results['eval/avg_reward'], self.env_name) * 100
time_elapsed = str(datetime.now().replace(microsecond=0) - self.start_time)
log_str = (
"=" * 60 + '\n' + "time elapsed: " + time_elapsed + '\n' + "num of updates: " + str(total_update_times) +
'\n' + '\n' + "eval avg reward: " + format(eval_avg_reward, ".5f") + '\n' + "eval avg ep len: " +
format(eval_avg_ep_len, ".5f") + '\n' + "eval d4rl score: " + format(eval_d4rl_score, ".5f")
)
logging.info(log_str)
log_data = [time_elapsed, total_update_times, eval_avg_reward, eval_avg_ep_len, eval_d4rl_score]
log_csv_name = self.prefix + "_log_" + self.start_time_str + ".csv"
log_csv_path = os.path.join(self.log_dir, log_csv_name)
self.csv_writer.writerow(log_data)
# save model
logging.info("eval_avg_reward: " + format(eval_avg_reward, ".5f"))
eval_env_score = eval_avg_reward
if eval_env_score >= self.max_env_score:
logging.info("saving max env score model at: " + self.save_best_model_path)
torch.save(self._learn_model.state_dict(), self.save_best_model_path)
self.max_env_score = eval_env_score
logging.info("saving current model at: " + self.save_model_path)
torch.save(self._learn_model.state_dict(), self.save_model_path)
return self.max_env_score >= self.stop_value
def get_d4rl_normalized_score(self, score, env_name):
env_key = env_name.split('-')[0].lower()
assert env_key in D4RLTrajectoryDataset.REF_MAX_SCORE, \
f'no reference score for {env_key} env to calculate d4rl score'
d4rl_max_score, d4rl_min_score = D4RLTrajectoryDataset.REF_MAX_SCORE, D4RLTrajectoryDataset.REF_MIN_SCORE
return (score - d4rl_min_score[env_key]) / (d4rl_max_score[env_key] - d4rl_min_score[env_key])
def _state_dict_learn(self) -> Dict[str, Any]:
return {
'model': self._learn_model.state_dict(),
# 'target_model': self._target_model.state_dict(),
'optimizer': self._optimizer.state_dict(),
}
def _load_state_dict_learn(self, state_dict: Dict[str, Any]) -> None:
self._learn_model.load_state_dict(state_dict['model'])
# self._target_model.load_state_dict(state_dict['target_model'])
self._optimizer.load_state_dict(state_dict['optimizer'])
def default_model(self) -> Tuple[str, List[str]]:
return 'dt', ['ding.model.template.decision_transformer']
def _monitor_vars_learn(self) -> List[str]:
return ['cur_lr', 'action_loss']
| [
"torch.zeros",
"torch.device",
"torch.arange",
"torch.no_grad",
"torch.from_numpy",
"torch.nn.functional.mse_loss",
"torch.ones",
"torch.nn.functional.cross_entropy",
"torch.clone",
"torch.argmax"
] | 1.1.0 | kxzxvbk/DI-engine | 268d77db3cb54401b2cfc83e2bc3ec87c31e7b83 |
0.4 | # CometML needs to be imported first.
try:
import comet_ml
except ImportError:
pass
from model import SampleRNN, Predictor
from model import CNNSeq2SampleRNN
from optim import gradient_clipping
from nn import sequence_nll_loss_bits
from trainer import Trainer
from trainer.plugins import (
TrainingLossMonitor, ValidationPlugin, AbsoluteTimeMonitor, SaverPlugin,
GeneratorPlugin, StatsPlugin
)
from dataset import NpzDataset, DataLoader
import torch
from torch.utils.trainer.plugins import Logger
from natsort import natsorted
from functools import reduce
import os
import shutil
import sys
from glob import glob
import re
import argparse
import utils
import pickle
default_params = {
# model parameters
'n_rnn': 1,
'dim': 1024,
'learn_h0': True,
'q_levels': 256,
'seq_len': 1024,
'weight_norm': True,
'batch_size': 128, # 'batch_size': 128, 64
'val_frac': 0.5, # 0.05,
'test_frac': 0.5, # 0, # Test has already been separated for COGNIMUSE
# training parameters
'keep_old_checkpoints': False,
'datasets_path': 'datasets',
'results_path': 'results',
'epoch_limit': 1000, # default: 1000
'resume': True,
'sample_rate': 16000,
'n_samples': 1,
'sample_length': 48000, # 3s -> 30000, # 8s -> 80000
'loss_smoothing': 0.99,
'cuda': True,
'comet_key': None,
# 'npz_filename': 'video_feats_HSL_10fps_pad_train.npz',
# 'npz_filename_test': 'video_feats_HSL_10fps_pad_test.npz',
'npz_filename': 'video_feats_HSL_10fps_origAudio_pad_train.npz',
'npz_filename_test': 'video_feats_HSL_10fps_origAudio_pad_test.npz',
'cnn_pretrain': 'cnnseq/cnn2_origAudio_res_vanilla_HSL_bin_1D_CrossEntropy_ep_40_bs_30_lr_0.001_we_0.0001_adam_76.78perf/',
'cnn_seq2seq_pretrain': 'cnnseq/cnnseq2seq2_origAudio_HSL_bin_1D_res_stepPred_8_ep_20_bs_30_relu_layers_2_size_128_lr_0.001_we_1e-05_asgd_trainSize_3182_testSize_1139_cost_audio/',
}
tag_params = [
'exp', 'frame_sizes', 'n_rnn', 'dim', 'learn_h0', 'q_levels', 'seq_len',
'batch_size', 'dataset', 'val_frac', 'test_frac'
]
def param_to_string(value):
if isinstance(value, bool):
return 'T' if value else 'F'
elif isinstance(value, list):
return ','.join(map(param_to_string, value))
else:
return str(value)
def make_tag(params):
return '-'.join(
key + ':' + param_to_string(params[key])
for key in tag_params
if key not in default_params or params[key] != default_params[key]
)
def setup_results_dir(params):
def ensure_dir_exists(path):
if not os.path.exists(path):
os.makedirs(path)
tag = make_tag(params)
results_path = os.path.abspath(params['results_path'])
ensure_dir_exists(results_path)
results_path = os.path.join(results_path, tag)
if not os.path.exists(results_path):
os.makedirs(results_path)
elif not params['resume']:
shutil.rmtree(results_path)
os.makedirs(results_path)
for subdir in ['checkpoints', 'samples']:
ensure_dir_exists(os.path.join(results_path, subdir))
return results_path
# Includes load CNNSeq2Sample
def load_last_checkpoint(checkpoints_path, model_type='samplernn'):
if model_type == 'samplernn':
pattern = SaverPlugin.last_pattern
else:
pattern = SaverPlugin.last_pattern_cnnseq2sample
checkpoints_pattern = os.path.join(
checkpoints_path, pattern.format('*', '*')
)
checkpoint_paths = natsorted(glob(checkpoints_pattern))
if len(checkpoint_paths) > 0:
checkpoint_path = checkpoint_paths[-1]
checkpoint_name = os.path.basename(checkpoint_path)
match = re.match(
pattern.format(r'(\d+)', r'(\d+)'),
checkpoint_name
)
epoch = int(match.group(1))
iteration = int(match.group(2))
return (torch.load(checkpoint_path), epoch, iteration)
else:
return None
def tee_stdout(log_path):
log_file = open(log_path, 'a', 1)
stdout = sys.stdout
class Tee:
def write(self, string):
log_file.write(string)
stdout.write(string)
def flush(self):
log_file.flush()
stdout.flush()
sys.stdout = Tee()
def make_data_loader(overlap_len, params, npz_filename=None):
npz_filename = params['npz_filename'] if npz_filename is None else npz_filename
path = os.path.join(params['datasets_path'], params['dataset'], npz_filename)
def data_loader(split_from, split_to, eval):
dataset = NpzDataset(
path, overlap_len, params['q_levels'], split_from, split_to
)
return DataLoader(
dataset,
batch_size=params['batch_size'],
seq_len=params['seq_len'],
overlap_len=overlap_len,
shuffle=(not eval),
drop_last=(not eval)
)
return data_loader
def main(exp, frame_sizes, dataset, **params):
params = dict(
default_params,
exp=exp, frame_sizes=frame_sizes, dataset=dataset,
**params
)
results_path = setup_results_dir(params)
tee_stdout(os.path.join(results_path, 'log'))
# Save samplernn parameters in .json for future audio generation
import json
with open(os.path.join(results_path, 'sample_rnn_params.json'), 'w') as fp:
json.dump(params, fp, sort_keys=True, indent=4)
# Model
model = SampleRNN(
frame_sizes=params['frame_sizes'],
n_rnn=params['n_rnn'],
dim=params['dim'],
learn_h0=params['learn_h0'],
q_levels=params['q_levels'],
weight_norm=params['weight_norm'],
batch_size=params['batch_size']
)
print("CUDA num: {}".format(torch.cuda.device_count()))
predictor = Predictor(model)
if params['cuda']:
model = model.cuda()
predictor = predictor.cuda()
model_cnnseq2sample = CNNSeq2SampleRNN(params).cuda()
optimizer = gradient_clipping(torch.optim.Adam(predictor.parameters()))
data_loader = make_data_loader(model.lookback, params)
data_loader_test = make_data_loader(model.lookback, params, npz_filename=params['npz_filename_test'])
# test_split = 1 - params['test_frac']
# val_split = test_split - params['val_frac']
trainer = Trainer(
predictor, model_cnnseq2sample, sequence_nll_loss_bits, optimizer,
# data_loader(0, val_split, eval=False),
data_loader(0, 1, eval=False),
cuda=params['cuda']
)
checkpoints_path = os.path.join(results_path, 'checkpoints')
checkpoint_data = load_last_checkpoint(checkpoints_path)
checkpoint_data_cnnseq2sample = load_last_checkpoint(checkpoints_path, model_type='cnnseq2sample')
if checkpoint_data is not None:
(state_dict, epoch, iteration) = checkpoint_data
(state_dict_cnnseq2sample, epoch, iteration) = checkpoint_data_cnnseq2sample
trainer.epochs = epoch
trainer.iterations = iteration
predictor.load_state_dict(state_dict)
model_cnnseq2sample.load_state_dict(state_dict_cnnseq2sample)
trainer.register_plugin(TrainingLossMonitor(
smoothing=params['loss_smoothing']
))
trainer.register_plugin(ValidationPlugin(
# data_loader(val_split, test_split, eval=True),
# data_loader_test(0, 1, eval=True)
data_loader_test(0, params['val_frac'], eval=True),
data_loader_test(params['val_frac'], 1, eval=True)
# data_loader(test_split, 1, eval=True)
))
trainer.register_plugin(AbsoluteTimeMonitor())
trainer.register_plugin(SaverPlugin(
checkpoints_path, params['keep_old_checkpoints']
))
trainer.register_plugin(GeneratorPlugin(
os.path.join(results_path, 'samples'), params['n_samples'],
params['sample_length'], params['sample_rate']
))
trainer.register_plugin(
Logger([
'training_loss',
'validation_loss',
'test_loss',
'time'
])
)
trainer.register_plugin(StatsPlugin(
results_path,
iteration_fields=[
'training_loss',
('training_loss', 'running_avg'),
'time'
],
epoch_fields=[
'validation_loss',
'test_loss',
'time'
],
plots={
'loss': {
'x': 'iteration',
'ys': [
'training_loss',
('training_loss', 'running_avg'),
'validation_loss',
'test_loss',
],
'log_y': True
}
}
))
trainer.run(params['epoch_limit'])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS
)
def parse_bool(arg):
arg = arg.lower()
if 'true'.startswith(arg):
return True
elif 'false'.startswith(arg):
return False
else:
raise ValueError()
parser.add_argument('--exp', required=True, help='experiment name')
parser.add_argument(
'--frame_sizes', nargs='+', type=int, required=True,
help='frame sizes in terms of the number of lower tier frames, \
starting from the lowest RNN tier'
)
parser.add_argument(
'--dataset', required=True,
help='dataset name - name of a directory in the datasets path \
(settable by --datasets_path)'
)
parser.add_argument(
'--n_rnn', type=int, help='number of RNN layers in each tier'
)
parser.add_argument(
'--dim', type=int, help='number of neurons in every RNN and MLP layer'
)
parser.add_argument(
'--learn_h0', type=parse_bool,
help='whether to learn the initial states of RNNs'
)
parser.add_argument(
'--q_levels', type=int,
help='number of bins in quantization of audio samples'
)
parser.add_argument(
'--seq_len', type=int,
help='how many samples to include in each truncated BPTT pass'
)
parser.add_argument(
'--weight_norm', type=parse_bool,
help='whether to use weight normalization'
)
parser.add_argument('--batch_size', type=int, help='batch size')
parser.add_argument(
'--val_frac', type=float,
help='fraction of data to go into the validation set'
)
parser.add_argument(
'--test_frac', type=float,
help='fraction of data to go into the test set'
)
parser.add_argument(
'--keep_old_checkpoints', type=parse_bool,
help='whether to keep checkpoints from past epochs'
)
parser.add_argument(
'--datasets_path', help='path to the directory containing datasets'
)
parser.add_argument(
'--results_path', help='path to the directory to save the results to'
)
parser.add_argument('--epoch_limit', type=int, help='how many epochs to run')
parser.add_argument(
'--resume', type=parse_bool, default=True,
help='whether to resume training from the last checkpoint'
)
parser.add_argument(
'--sample_rate', type=int,
help='sample rate of the training data and generated sound'
)
parser.add_argument(
'--n_samples', type=int,
help='number of samples to generate in each epoch'
)
parser.add_argument(
'--sample_length', type=int,
help='length of each generated sample (in samples)'
)
parser.add_argument(
'--npz_filename', help='Filename with npz data for train'
)
parser.add_argument(
'--npz_filename_test', help='Filename with npz data for test'
)
parser.add_argument(
'--cnn_pretrain', help='Filename with pretrained CNN'
)
parser.add_argument(
'--cnn_seq2seq_pretrain', help='Filename with pretrained CNN-Seq2Seq'
)
parser.add_argument(
'--loss_smoothing', type=float,
help='smoothing parameter of the exponential moving average over \
training loss, used in the log and in the loss plot'
)
parser.add_argument(
'--cuda', type=parse_bool,
help='whether to use CUDA'
)
parser.add_argument(
'--comet_key', help='comet.ml API key'
)
parser.add_argument(
'--seq2seq_model_type', type=str,
help='Seq2Seq model optoins. Options=[seq2seq (lstm), seq2seq_gru (gru)]'
)
parser.set_defaults(**default_params)
main(**vars(parser.parse_args()))
| [
"torch.cuda.device_count",
"torch.utils.trainer.plugins.Logger",
"torch.load"
] | 0.4.1 | gcunhase/Scene2Wav | 99a3ad6c9f2cea1d58590a0bb834203bc525ced8 |
1.6 | import os
# os.environ["CUDA_VISIBLE_DEVICES"] = '1'
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
import random
import numpy as np
from tqdm import tqdm
from torch_geometric.data import DataLoader, DataListLoader
from torch_geometric.nn.data_parallel import DataParallel
import scipy.sparse as sp
import argparse
import warnings
warnings.simplefilter('ignore', sp.SparseEfficiencyWarning)
from modeling.GATIRec import GATIRec
from utils.logger import load_yaml
from utils.metrics import Evaluator
from dataloaders.datasets.MovieLens import DynamicMovieLens
from dataloaders.datasets.Flixster import Flixster
from dataloaders.datasets.Douban import Douban
from dataloaders.datasets.YahooMusic import YahooMusic
class Transfer(object):
def __init__(self):
self.config = load_yaml('config/transfer.yaml')
self.device = 'cpu'
self.cuda = self._init_gpu() # if cuda, device:'cpu'->'cuda'
self.model = self._init_model()
if self.config['ensemble']:
self.models = self._init_models()
self.evaluator = Evaluator()
def _init_gpu(self):
cuda = torch.cuda.is_available() and self.config['is_cuda']
if cuda:
self.device = 'cuda:0'
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(x) for x in self.config['gpu_ids']])
return cuda
def _init_model(self):
input_embedding = self.config['input_embedding']
edge_classes = self.config['edge_classes']
heads = self.config['heads']
layers = self.config['layers']
input_channels = self.config['input_channels'] if input_embedding else 4
attention = True
model = GATIRec(input_channels=input_channels, EGAT_heads=heads, EGAT_output_channels=32, EGAT_layers=layers,
edge_classes=edge_classes, multiply_by=1, activation='elu', decoder_choice='mlp',
concat_nodes_feature=True, edge_embedding='cumsum', add_self_feature=True,
input_embedding=input_embedding, attention=attention)
checkpoint = torch.load(self.config['checkpoint_path'])
model.load_state_dict(checkpoint['state_dict'])
if self.cuda:
device_ids = [i for i in range(len(self.config['gpu_ids']))]
if len(device_ids) > 1:
model = DataParallel(model, device_ids=device_ids)
model = model.to(self.device)
return model
def _init_models(self):
input_embedding = self.config['input_embedding']
edge_classes = self.config['edge_classes']
heads = self.config['heads']
layers = self.config['layers']
input_channels = self.config['input_channels'] if input_embedding else 4
attention = True
checkpoint_epochs = [50, 60, 70, 80]
models = []
for epoch in checkpoint_epochs:
model = GATIRec(input_channels=input_channels, EGAT_heads=heads, EGAT_output_channels=32, EGAT_layers=layers,
edge_classes=edge_classes, multiply_by=1, activation='elu', decoder_choice='mlp',
concat_nodes_feature=True, edge_embedding='cumsum', add_self_feature=True,
input_embedding=input_embedding, attention=attention)
checkpoint_path = os.path.join(self.config['ensemble_path'],'checkpoint_epoch0{}.pth.tar'.format(epoch))
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['state_dict'])
models.append(model)
models = torch.nn.ModuleList(models)
return models.to(self.device)
def testing(self, dataset_name='ml_100k'):
one_hot_flag = not self.config['input_embedding']
if dataset_name == 'flixster':
dataset = Flixster(root=self.config['dataset_root'],
max_neighbors=self.config['max_neighbors'], split='test',
one_hot_flag=one_hot_flag, transfer=True)
self.model.set_multiply_by(self.config['flixster_multiply_by'])
elif dataset_name == 'douban':
dataset = Douban(root=self.config['dataset_root'], max_neighbors=self.config['max_neighbors'],
split='test', one_hot_flag=one_hot_flag)
self.model.set_multiply_by(self.config['douban_multiply_by'])
elif dataset_name == 'yahoo_music':
dataset = YahooMusic(root=self.config['dataset_root'], max_neighbors=self.config['max_neighbors'],
split='test', one_hot_flag=one_hot_flag, transfer=True)
self.model.set_multiply_by(self.config['yahoo_music_multiply_by'])
else:
dataset = DynamicMovieLens(root=self.config['dataset_root'], dataset=self.config['pretrain_dataset'],
max_neighbors=self.config['max_neighbors'],
split='test', one_hot_flag=one_hot_flag)
self.model.set_multiply_by(1)
dataloader = DataLoader(dataset, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'], pin_memory=self.cuda)
self.model.eval()
self.evaluator.reset()
tbar = tqdm(dataloader)
with torch.no_grad():
for i, sample in enumerate(tbar):
data = sample
data.to(self.device, non_blocking=True)
data_y = data.y
preds = self.model(data)
self.evaluator.add_batch(preds.cpu().numpy(), data_y.cpu().numpy())
torch.cuda.empty_cache()
rmse = self.evaluator.rmse()
log_string = '[dataset: %s, numSamples: %d] rmse: %f' % (
dataset_name, len(dataloader.dataset), rmse)
print(log_string)
def ensemble_testing(self, dataset_name='ml_100k'):
one_hot_flag = not self.config['input_embedding']
if dataset_name == 'flixster':
dataset = Flixster(root=self.config['dataset_root'], max_neighbors=self.config['max_neighbors'],
split='test', one_hot_flag=one_hot_flag, transfer=True)
for i in range(4):
self.models[i].set_multiply_by(self.config['flixster_multiply_by'])
elif dataset_name == 'douban':
dataset = Douban(root=self.config['dataset_root'], max_neighbors=self.config['max_neighbors'], split='test',
one_hot_flag=one_hot_flag)
for i in range(4):
self.models[i].set_multiply_by(self.config['douban_multiply_by'])
elif dataset_name == 'yahoo_music':
dataset = YahooMusic(root=self.config['dataset_root'], max_neighbors=self.config['max_neighbors'],
split='test', one_hot_flag=one_hot_flag, transfer=True)
for i in range(4):
self.models[i].set_multiply_by(self.config['yahoo_music_multiply_by'])
else:
dataset = DynamicMovieLens(root=self.config['dataset_root'], dataset=self.config['pretrain_dataset'],
max_neighbors=self.config['max_neighbors'], split='test',
one_hot_flag=one_hot_flag)
for i in range(4):
self.models[i].set_multiply_by(1)
dataloader = DataLoader(dataset, batch_size=self.config['batch_size'], shuffle=False,
num_workers=self.config['num_workers'], pin_memory=self.cuda)
self.models.eval()
self.evaluator.reset()
tbar = tqdm(dataloader)
with torch.no_grad():
for i, sample in enumerate(tbar):
data = sample
data.to(self.device, non_blocking=True)
data_y = data.y
outs = []
for j in range(4):
preds = self.models[j](data).view(1, -1)
outs.append(preds)
outs = torch.cat(outs, 0).mean(0)
self.evaluator.add_batch(outs.cpu().numpy(), data_y.cpu().numpy())
torch.cuda.empty_cache()
rmse = self.evaluator.rmse()
log_string = '[dataset: %s, numSamples: %d] rmse: %f' % (dataset_name, len(dataloader.dataset), rmse)
print(log_string)
def start(self):
if self.config['ensemble']:
self.ensemble_testing('flixster')
self.ensemble_testing('douban')
self.ensemble_testing('yahoo_music')
else:
self.testing('flixster')
self.testing('douban')
self.testing('yahoo_music')
if __name__ == '__main__':
tr = Transfer()
tr.start()
| [
"torch.cat",
"torch.nn.ModuleList",
"torch.no_grad",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.load",
"torch.multiprocessing.set_sharing_strategy"
] | 1.6.0 | kaai520/GATI-Rec | 1cc2efbbeeef2705add0256dfec2262da1df7119 |
1.10 | from typing import Dict, Any
import random
import numpy as np
import torch
class Callback:
def __init__(self, update_frequency_type: str = 'batch', update_frequency: int = 100):
self.update_frequency_type = update_frequency_type
self.update_frequency = update_frequency
self._num_batches = 0
self._epoch = 0
self._batch = 0
self._step = 0
# Random states to stash
self._random_state = None
self._numpy_random_state = None
self._torch_random_state = None
def on_training_begin(self, num_batches: int, train: bool):
self._num_batches = num_batches
self._epoch = 0
self._batch = 0
self._step = 0
self._on_training_begin(train)
def on_train_batch_begin(self, batch: int, train: bool):
self._batch = batch
if self.update_frequency_type == 'batch' and batch % self.update_frequency == 0:
self._step = batch / self.update_frequency + self._epoch * (self._num_batches + 1)
self._on_train_batch_begin(batch, train)
def on_train_batch_end(self, batch: int, logs: Dict[str, Any], train: bool):
if self.update_frequency_type == 'batch' and batch % self.update_frequency == 0:
self._on_train_batch_end(batch, logs, train)
def on_train_epoch_begin(self, epoch: int, train: bool):
self._epoch = epoch
if self.update_frequency_type == 'epoch' and epoch % self.update_frequency == 0:
self._step = self._epoch
self._on_train_epoch_begin(epoch, train)
def on_train_epoch_end(self, epoch: int, logs: Dict[str, Any], train: bool):
if self.update_frequency_type == 'epoch' and epoch % self.update_frequency == 0:
self._on_train_epoch_end(epoch, logs, train)
def _stash_random_state(self):
self._random_state = random.getstate()
self._numpy_random_state = np.random.get_state()
self._torch_random_state = torch.get_rng_state()
def _pop_stashed_random_state(self):
if self._random_state is not None:
random.setstate(self._random_state)
np.random.set_state(self._numpy_random_state)
torch.set_rng_state(self._torch_random_state)
self._random_state = None
self._numpy_random_state = None
self._torch_random_state = None
# To be implemented by the subclasses
def _on_training_begin(self, train: bool):
pass
def _on_train_batch_begin(self, batch: int, train: bool):
pass
def _on_train_batch_end(self, batch: int, logs: Dict[str, Any], train: bool):
pass
def _on_train_epoch_begin(self, epoch: int, train: bool):
pass
def _on_train_epoch_end(self, epoch: int, logs: Dict[str, Any], train: bool):
pass
| [
"torch.set_rng_state",
"torch.get_rng_state"
] | 1.10.1 | paulosoaresua/mlbase | 8b60b80fd1745d6565fd38e9bc9d2e203033ae27 |
1.10 | import torch
import torch.nn as nn
from mlbase.model.base_model import BaseModel
from typing import List
from mlbase.callback.callback import Callback
from mlbase.callback.validation_check import ValidationCheck
from torch.utils.data import Dataset, DataLoader
import random
import numpy as np
class ModelRunner:
def __init__(self, model: BaseModel, optimizer: torch.optim):
self._model = model
self._optimizer = optimizer
self._initial_epoch = 0
self._initial_batch = 0
self._random_state_initializers = None
def reset(self):
self._initial_epoch = 0
self._initial_batch = 0
self._random_state_initializers = None
def load(self, in_dir: str, save_point: int = None):
if save_point is None:
# Retrieve the last saved model in the folder
pass
filename = f"{in_dir}/model.{save_point}.pt"
data_package = torch.load(filename)
self._model.load_state_dict(data_package['model_state_dict'])
self._optimizer.load_state_dict(data_package['optimizer_state_dict'])
random.setstate(data_package['random_state'])
np.random.set_state(data_package['numpy_random_state'])
torch.set_rng_state(data_package['torch_random_state'])
self._initial_epoch = data_package['epoch'] + 1
def train(self, training_set: Dataset, epochs: int, batch_size: int, callbacks: List[Callback], shuffle: bool):
self._model.train()
self._model.stop_training = False
training_data_loader = DataLoader(training_set, batch_size=batch_size, shuffle=shuffle)
# All occurrences of a ValidationCheck callback must be the first ones so that other callbacks
# have access to the measures computed by the former.
for i in range(len(callbacks)):
if isinstance(callbacks[i], ValidationCheck):
callback = callbacks.pop(i)
callbacks.insert(0, callback)
for callback in callbacks:
callback.on_training_begin(len(training_data_loader), True)
for epoch in range(self._initial_epoch, epochs):
for callback in callbacks:
callback.on_train_epoch_begin(epoch, True)
for batch, data in enumerate(training_data_loader):
self._model.log_keys.clear()
for callback in callbacks:
callback.on_train_batch_begin(batch, True)
loss = self._model.calculate_loss(data)
self._optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self._model.parameters(), 5.0)
self._optimizer.step()
for callback in callbacks:
callback.on_train_batch_end(batch, self._model.log_keys, True)
# Clear the log because if it's preserved and there's a callback per epoch to save the log,
# it will save the result from the last batch. Callbacks per epoch should be combined with a
# ValidationCheck or a EpochSummary callback so results can be computed over a full dataset.
self._model.log_keys.clear()
for callback in callbacks:
callback.on_train_epoch_end(epoch, self._model.log_keys, True)
if self._model.stop_training:
break
self._model.eval()
| [
"torch.set_rng_state",
"torch.load",
"torch.utils.data.DataLoader"
] | 1.10.1 | paulosoaresua/mlbase | 8b60b80fd1745d6565fd38e9bc9d2e203033ae27 |
1.1 | import argparse
import experiment_buddy
import torch
import torch.nn as nn
from altmin import get_mods, get_codes, update_codes, update_last_layer_, update_hidden_weights_adam_
from altmin import scheduler_step
from models import LeNet
from models import test
from utils import get_devices, load_dataset
# Training settings
parser = argparse.ArgumentParser(description='Online Alternating-Minimization with SGD')
parser.add_argument('--dataset', default='mnist', metavar='D',
help='name of dataset')
parser.add_argument('--data-augmentation', action='store_true', default=False,
help='enables data augmentation')
parser.add_argument('--batch-size', type=int, default=200, metavar='B',
help='input batch size for training')
parser.add_argument('--epochs', type=int, default=50, metavar='E',
help='number of epochs to train (default: 50)')
parser.add_argument('--n-iter-codes', type=int, default=5, metavar='N',
help='number of internal iterations for codes optimization')
parser.add_argument('--n-iter-weights', type=int, default=1, metavar='N',
help='number of internal iterations in learning weights')
parser.add_argument('--lr-codes', type=float, default=0.3, metavar='LR',
help='learning rate for codes updates')
parser.add_argument('--lr-out', type=float, default=0.008, metavar='LR',
help='learning rate for last layer weights updates')
parser.add_argument('--lr-weights', type=float, default=0.008, metavar='LR',
help='learning rate for hidden weights updates')
parser.add_argument('--lr-half-epochs', type=int, default=8, metavar='LH',
help='number of epochs after which learning rate if halfed')
parser.add_argument('--no-batchnorm', action='store_true', default=True,
help='disables batchnormalization')
parser.add_argument('--lambda_c', type=float, default=0.0, metavar='L',
help='codes sparsity')
parser.add_argument('--lambda_w', type=float, default=0.0, metavar='L',
help='weight sparsity')
parser.add_argument('--mu', type=float, default=0.003, metavar='M',
help='initial mu parameter')
parser.add_argument('--d-mu', type=float, default=0.0 / 300, metavar='M',
help='increase in mu after every mini-batch')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-interval', type=int, default=1000, metavar='N',
help='how many batches to wait before saving test performance (if set to zero, it does not save)')
parser.add_argument('--log-first-epoch', action='store_true', default=False,
help='whether or not it should test and log after every mini-batch in first epoch')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
args = parser.parse_args()
experiment_buddy.register_defaults(vars(args))
tb = experiment_buddy.deploy("")
# Check cuda
device, num_gpus = get_devices("cuda:0" if not args.no_cuda and torch.cuda.is_available() else "cpu", seed=args.seed)
# Load data and model
model_name = "lenet"
if model_name == 'feedforward' or model_name == 'binary':
model_name += '_' + str(args.n_hidden_layers) + 'x' + str(args.n_hiddens)
print('\nOnline alternating-minimization with sgd')
print('* Loading dataset {}'.format(args.dataset))
print('* Loading model {}'.format(model_name))
train_loader, test_loader, n_inputs = load_dataset(args.dataset, batch_size=args.batch_size, conv_net=True, num_workers=0)
window_size = train_loader.dataset.data[0].shape[0]
if len(train_loader.dataset.data[0].shape) == 3:
num_input_channels = train_loader.dataset.data[0].shape[2]
else:
num_input_channels = 1
model = LeNet(num_input_channels=num_input_channels, window_size=window_size, bias=True).to(device)
criterion = nn.CrossEntropyLoss()
if __name__ == "__main__":
# Save everything in a `ddict`
# SAV = ddict(args=args.__dict__)
# Store training and test performance after each training epoch
# SAV.perf = ddict(tr=[], te=[])
# Store test performance after each iteration in first epoch
# SAV.perf.first_epoch = []
# Store test performance after each args.save_interval iterations
# SAV.perf.te_vs_iterations = []
# Expose model modules that has_codes
model = get_mods(model, optimizer='Adam', optimizer_params={'lr': args.lr_weights}, scheduler=lambda epoch: 1 / 2 ** (epoch // args.lr_half_epochs))
model[-1].optimizer.param_groups[0]['lr'] = args.lr_out
# Initial mu and increment after every mini-batch
mu = args.mu
mu_max = 10 * args.mu
step = 0
for epoch in range(1, args.epochs + 1):
print('\nEpoch {} of {}. mu = {:.4f}, lr_out = {}'.format(epoch, args.epochs, mu, model[-1].scheduler.get_lr()))
for batch_idx, (data, targets) in enumerate(train_loader):
data, targets = data.to(device), targets.to(device)
# (1) Forward
model.train()
with torch.no_grad():
outputs, codes = get_codes(model, data)
# (2) Update codes
codes, num_gradients = update_codes(codes, model, targets, criterion, mu, lambda_c=args.lambda_c, n_iter=args.n_iter_codes, lr=args.lr_codes)
step += num_gradients
# (3) Update weights
num_gradients = update_last_layer_(model[-1], codes[-1], targets, criterion, n_iter=args.n_iter_weights)
step += num_gradients
num_gradients = update_hidden_weights_adam_(model, data, codes, lambda_w=args.lambda_w, n_iter=args.n_iter_weights)
step += num_gradients
# Store all iterations of first epoch
test_accuracy, test_loss = test(model, data_loader=test_loader, label=" - Test")
tb.add_scalar("test/accuracy", test_accuracy, step)
tb.add_scalar("test/loss", test_loss, step)
# Outputs to terminal
loss = criterion(outputs, targets)
tb.add_scalar("train/epoch", epoch, step)
tb.add_scalar("train/loss", loss, step)
# Increment mu
if mu < mu_max:
mu = mu + args.d_mu
scheduler_step(model)
# Print performances
# SAV.perf.tr += [test(model, data_loader=train_loader, label="Training")]
# SAV.perf.te += [test(model, data_loader=test_loader, label="Test")]
train_accuracy, train_loss = test(model, data_loader=train_loader)
tb.add_scalar("train/accuracy", train_accuracy, step)
tb.add_scalar("train/loss", train_loss, step)
test_accuracy, test_loss = test(model, data_loader=test_loader)
tb.add_scalar("test/accuracy", test_accuracy, step)
tb.add_scalar("test/loss", test_loss, step)
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss"
] | 1.1.0 | manuel-delverme/online-alt-min | 83f2c7d8bf9d6c8de8a8812e4fee73f9b58e05ad |
1.0 | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
"""
import collections
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from logging import StreamHandler
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
from .integrations import ( # isort: split
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
run_hp_search_optuna,
run_hp_search_ray,
)
import numpy as np
import torch
from packaging import version
from torch import nn
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from . import __version__
from .configuration_utils import PretrainedConfig
from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from .debug_utils import DebugOption, DebugUnderflowOverflow
from .deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from .dependency_versions_check import dep_version_check
from .file_utils import (
CONFIG_NAME,
WEIGHTS_NAME,
PushToHubMixin,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_tpu_available,
is_training_run_on_sagemaker,
)
from .modelcard import TrainingSummary
from .modeling_utils import PreTrainedModel, unwrap_model
from .optimization import Adafactor, AdamW, get_scheduler
from .tokenization_utils_base import PreTrainedTokenizerBase
from .trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from .trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from .trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
HPSearchBackend,
PredictionOutput,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
get_last_checkpoint,
set_seed,
speed_metrics,
)
from .training_args import ParallelMode, TrainingArguments
from .utils import logging
from .utils.modeling_auto_mapping import MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
_is_torch_generator_available = False
_is_native_amp_available = False
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from .utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if version.parse(torch.__version__) >= version.parse("1.6"):
_is_torch_generator_available = True
_is_native_amp_available = True
from torch.cuda.amp import autocast
if is_datasets_available():
import datasets
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
from smdistributed.dataparallel.torch.parallel.distributed import DistributedDataParallel as DDP
else:
import torch.distributed as dist
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from .trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
if is_training_run_on_sagemaker():
logging.add_handler(StreamHandler(sys.stdout))
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class Trainer:
"""
Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):
The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.
.. note::
:class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`
provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as
they work the same way as the 🤗 Transformers models.
args (:class:`~transformers.TrainingArguments`, `optional`):
The arguments to tweak for training. Will default to a basic instance of
:class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in
the current directory if not provided.
data_collator (:obj:`DataCollator`, `optional`):
The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.
Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of
:func:`~transformers.DataCollatorWithPadding` otherwise.
train_dataset (:obj:`torch.utils.data.dataset.Dataset` or :obj:`torch.utils.data.dataset.IterableDataset`, `optional`):
The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
Note that if it's a :obj:`torch.utils.data.dataset.IterableDataset` with some randomization and you are
training in a distributed fashion, your iterable dataset should either use a internal attribute
:obj:`generator` that is a :obj:`torch.Generator` for the randomization that must be identical on all
processes (and the Trainer will manually set the seed of this :obj:`generator` at each epoch) or have a
:obj:`set_epoch()` method that internally sets the seed of the RNGs used.
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed.
tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):
The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the
maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
interrupted training or reuse the fine-tuned model.
model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):
A function that instantiates the model to be used. If provided, each call to
:meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.
The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be
able to choose different architectures according to hyper parameters (such as layer count, sizes of inner
layers, dropout probabilities etc).
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.
optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple
containing the optimizer and the scheduler to use. Will default to an instance of
:class:`~transformers.AdamW` on your model and a scheduler given by
:func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.
Important attributes:
- **model** -- Always points to the core model. If using a transformers model, it will be a
:class:`~transformers.PreTrainedModel` subclass.
- **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,
the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the
inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.
- **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
data parallelism, this means some of the model layers are split on different GPUs).
- **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
to :obj:`False` if model parallel or deepspeed is used, or if the default
``TrainingArguments.place_model_on_device`` is overridden to return :obj:`False` .
- **is_in_train** -- Whether or not a model is currently running ``train`` (e.g. when ``evaluate`` is called
while in ``train``)
"""
from .trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Dataset] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Callable[[], PreTrainedModel] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
):
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. "
"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.",
FutureWarning,
)
self.model_init = model_init
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full fp16 eval - since the model needs to be half'ed first
# 4. Sharded DDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or (args.fp16_full_eval and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device:
model = model.to(args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo()
if self.is_world_process_zero():
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_amp = False
self.fp16_backend = None
if args.fp16:
if args.fp16_backend == "auto":
self.fp16_backend = "amp" if _is_native_amp_available else "apex"
else:
self.fp16_backend = args.fp16_backend
logger.info(f"Using {self.fp16_backend} fp16 backend")
if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16
if self.fp16_backend == "amp":
self.use_amp = True
if is_sagemaker_mp_enabled():
self.scaler = smp.amp.GradScaler()
elif self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if is_sagemaker_mp_enabled() and self.use_amp and args.max_grad_norm is not None and args.max_grad_norm > 0:
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState()
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = (
["start_positions", "end_positions"]
if type(self.model).__name__ in MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES.values()
else ["labels"]
)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# very last
self._memory_tracker.stop_and_update_metrics()
def add_callback(self, callback):
"""
Add a callback to the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will instantiate a member of that class.
"""
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.
If the callback is not found, returns :obj:`None` (and no error is raised).
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will pop the first member of that class found in the list of callbacks.
Returns:
:class:`~transformer.TrainerCallback`: The callback removed, if found.
"""
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
"""
Remove a callback from the current list of :class:`~transformer.TrainerCallback`.
Args:
callback (:obj:`type` or :class:`~transformer.TrainerCallback`):
A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.
In the first case, will remove the first member of that class found in the list of callbacks.
"""
self.callback_handler.remove_callback(callback)
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += ["label", "label_ids"]
columns = [k for k in self._signature_columns if k in dataset.column_names]
ignored_columns = list(set(dataset.column_names) - set(self._signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set "
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
)
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if not isinstance(self.train_dataset, collections.abc.Sized):
return None
generator = None
if self.args.world_size <= 1 and _is_torch_generator_available:
generator = torch.Generator()
generator.manual_seed(int(torch.empty((), dtype=torch.int64).random_().item()))
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=self.args.seed,
)
else:
if self.args.world_size <= 1:
if _is_torch_generator_available:
return RandomSampler(self.train_dataset, generator=generator)
return RandomSampler(self.train_dataset)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=self.args.seed,
)
def get_train_dataloader(self) -> DataLoader:
"""
Returns the training :class:`~torch.utils.data.DataLoader`.
Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted
to distributed training if necessary) otherwise.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
if isinstance(train_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self.args.train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self.args.train_batch_size,
sampler=train_sampler,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
"""
Returns the evaluation :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not
accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
"""
Returns the test :class:`~torch.utils.data.DataLoader`.
Subclass and override this method if you want to inject some custom behavior.
Args:
test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):
The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.
"""
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
if isinstance(test_dataset, torch.utils.data.dataset.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=self.data_collator,
drop_last=self.args.dataloader_drop_last,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method (or :obj:`create_optimizer`
and/or :obj:`create_scheduler`) in a subclass.
"""
self.create_optimizer()
self.create_scheduler(num_training_steps)
def create_optimizer(self):
"""
Setup the optimizer.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
if self.optimizer is None:
decay_parameters = get_parameter_names(self.model, [nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
},
]
optimizer_cls = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {"scale_parameter": False, "relative_step": False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {
"betas": (self.args.adam_beta1, self.args.adam_beta2),
"eps": self.args.adam_epsilon,
}
optimizer_kwargs["lr"] = self.args.learning_rate
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
def create_scheduler(self, num_training_steps: int):
"""
Setup the scheduler. The optimizer of the trainer must have been set up before this method is called.
Args:
num_training_steps (int): The number of training steps to do.
"""
if self.lr_scheduler is None:
warmup_steps = (
self.args.warmup_steps
if self.args.warmup_steps > 0
else math.ceil(num_training_steps * self.args.warmup_ratio)
)
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
self.optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=num_training_steps,
)
def num_examples(self, dataloader: DataLoader) -> int:
"""
Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.
Will raise an exception if the underlying dataset does not implement method :obj:`__len__`
"""
return len(dataloader.dataset)
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
"""HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
for key, value in params.items():
if not hasattr(self.args, key):
raise AttributeError(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`."
)
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info("Trial:", trial.params)
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfDeepSpeedConfig
self.args.hf_deepspeed_config = HfDeepSpeedConfig(self.args)
def _report_to_hp_search(
self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float]
):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, epoch)
if trial.should_prune():
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir)
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
def call_model_init(self, trial=None):
model_init_argcount = len(inspect.signature(self.model_init).parameters)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def _wrap_model(self, model, training=True):
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = nn.DataParallel(model)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
elif is_sagemaker_dp_enabled():
model = DDP(model, device_ids=[dist.get_local_rank()], broadcast_buffers=False)
elif self.args.local_rank != -1:
if self.args.ddp_find_unused_parameters is not None:
find_unused_parameters = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
find_unused_parameters = not getattr(model.config, "gradient_checkpointing", False)
else:
find_unused_parameters = True
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank],
output_device=self.args.local_rank,
find_unused_parameters=find_unused_parameters,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (:obj:`str` or :obj:`bool`, `optional`):
If a :obj:`str`, local path to a saved checkpoint as saved by a previous instance of
:class:`~transformers.Trainer`. If a :obj:`bool` and equals `True`, load the last checkpoint in
`args.output_dir` as saved by a previous instance of :class:`~transformers.Trainer`. If present,
training will resume from the model/optimizer/scheduler states loaded here.
trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):
The trial run or the hyperparameter dictionary for hyperparameter search.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if args.fp16_full_eval and not args.do_train:
self.model = self.model.to(args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
set_seed(args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None:
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}).")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warn(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if args.deepspeed:
# will be resumed in deepspeed_init
pass
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self.model = self.model.to(args.device)
self.model_wrapped = self.model
# Keeping track whether we can can len() on the dataset or not
train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
if train_dataset_is_sized:
num_update_steps_per_epoch = len(train_dataloader) // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training datalaoder has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = len(self.train_dataset) * args.num_train_epochs
else:
# see __init__. max_steps is set when the dataset has no __len__
max_steps = args.max_steps
num_train_epochs = int(args.num_train_epochs)
num_update_steps_per_epoch = max_steps
num_train_samples = args.max_steps * total_train_batch_size
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
model = self._wrap_model(self.model_wrapped)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
num_examples = (
self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * args.max_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, "trainer_state.json")
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, "trainer_state.json"))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} "
"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` "
"flag to your launch command, but you will resume the training on data already seen by your model."
)
if self.is_local_process_zero() and not args.disable_tqdm:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None
self.state.trial_params = hp_params(trial) if trial is not None else None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
# We just need to begin an iteration to create the randomization of the sampler.
for _ in train_dataloader:
break
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator) if train_dataset_is_sized else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
for step, inputs in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
((step + 1) % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss += self.training_step(model, inputs)
else:
tr_loss += self.training_step(model, inputs)
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if (step + 1) % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.use_amp:
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
xm.optimizer_step(self.optimizer)
elif self.use_amp:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
logger.info(
f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})."
)
# We load the model state dict on the CPU to avoid an OOM error.
if args.local_rank == 0:
print("Only loading from rank 0")
state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME), map_location="cpu")
# If the model is on the GPU, it still works!
self._load_state_dict_in_model(state_dict)
if self.deepspeed:
self.deepspeed.load_checkpoint(
self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False
)
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _load_state_dict_in_model(self, state_dict):
load_result = self.model.load_state_dict(state_dict, strict=False)
if len(load_result.missing_keys) != 0:
if set(load_result.missing_keys) == set(self.model._keys_to_ignore_on_save):
self.model.tie_weights()
else:
logger.warn(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warn(f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}.")
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):
if self.control.should_log:
logs: Dict[str, float] = {}
tr_loss_scalar = tr_loss.item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self.evaluate()
self._report_to_hp_search(trial, epoch, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank != -1:
rng_file = os.path.join(checkpoint, f"rng_state_{local_rank}.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(os.path.join(checkpoint, rng_file)):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
else:
from ray import tune
run_id = tune.get_trial_id()
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_fp16_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if smp.dp_rank() == 0:
# Consolidate the state dict on all processed of dp_rank 0
opt_state_dict = self.optimizer.state_dict()
# Save it and the scheduler on the main process
if self.is_world_process_zero():
torch.save(opt_state_dict, os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
elif self.is_world_process_zero() and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, "scaler.pt"))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.is_world_process_zero():
self.state.save_to_json(os.path.join(output_dir, "trainer_state.json"))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
local_rank = xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank
if local_rank == -1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{local_rank}.pth"))
# Maybe delete some older checkpoints.
if self.is_world_process_zero():
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
"""If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
if os.path.isfile(os.path.join(checkpoint, "optimizer.pt")) and os.path.isfile(
os.path.join(checkpoint, "scheduler.pt")
):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, "scheduler.pt"), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, "optimizer.pt"), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, "scheduler.pt")))
reissue_pt_warnings(caught_warnings)
if self.use_amp and os.path.isfile(os.path.join(checkpoint, "scaler.pt")):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, "scaler.pt")))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
"""
Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by
:obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is
provided, the sum of all metrics otherwise.
.. warning::
To use this method, you need to have provided a ``model_init`` when initializing your
:class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible
with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the
method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.
Args:
hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`):
A function that defines the hyperparameter search space. Will default to
:func:`~transformers.trainer_utils.default_hp_space_optuna` or
:func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.
compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):
A function computing the objective to minimize or maximize from the metrics returned by the
:obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.
n_trials (:obj:`int`, `optional`, defaults to 100):
The number of trial runs to test.
direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`):
Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should
pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or
several metrics.
backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):
The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which
one is installed. If both are installed, will default to optuna.
kwargs:
Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For
more information see:
- the documentation of `optuna.create_study
<https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html>`__
- the documentation of `tune.run
<https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__
Returns:
:class:`transformers.trainer_utils.BestRun`: All the information about the best run.
"""
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`."
"To install ray run `pip install ray[tune]`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray
best_run = run_hp_search(self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
"""
Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and
handling potential state.
"""
for k, v in inputs.items():
if isinstance(v, torch.Tensor):
kwargs = dict(device=self.args.device)
if self.deepspeed and inputs[k].dtype != torch.int64:
# NLP models inputs are int64 and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update(dict(dtype=self.args.hf_deepspeed_config.dtype()))
inputs[k] = v.to(**kwargs)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
scaler = self.scaler if self.use_amp else None
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps, scaler=scaler)
return loss_mb.reduce_mean().detach().to(self.args.device)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
"""
Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
machines) main process.
"""
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on several
machines, this is only going to be :obj:`True` for one process).
"""
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will only save from the main process.
"""
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
state_dict = self.model_wrapped.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
):
state_dict = self.model.state_dict()
if self.is_world_process_zero():
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.is_world_process_zero():
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.is_world_process_zero():
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_fp16_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
self.deepspeed.save_fp16_model(output_dir, WEIGHTS_NAME)
elif self.is_world_process_zero():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
save_config=self.is_world_process_zero(),
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, save_config=self.is_world_process_zero(), save_function=xm.save)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += distributed_broadcast_scalars([self.current_flos]).sum().item()
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*")]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_mode_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
(pass it to the init :obj:`compute_metrics` argument).
You can also subclass and override this method to inject custom behavior.
Args:
eval_dataset (:obj:`Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,
columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the
:obj:`__len__` method.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"eval_bleu" if the prefix is "eval" (default)
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
dictionary also contains the epoch number which comes from the training state.
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:obj:`Dataset`):
Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the
``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"test"`):
An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
"test_bleu" if the prefix is "test" (default)
.. note::
If your predictions or labels have different sequence length (for instance because you're doing dynamic
padding in a token classification task) the predictions will be padded (on the right) to allow for
concatenation into one array. The padding index is -100.
Returns: `NamedTuple` A namedtuple with the following keys:
- predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.
- label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).
- metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset
contained labels).
"""
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
elif isinstance(eval_dataset, IterableDatasetShard):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
"""
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.
"""
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
if tensor.shape[1] == max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on :obj:`model` using obj:`inputs`.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to evaluate.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (:obj:`bool`):
Whether or not to return the loss only.
ignore_keys (:obj:`Lst[str]`, `optional`):
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions.
Return:
Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
logits and labels (each being optional).
"""
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
"""
For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of
floating point operations for every backward + forward pass. If using another model, either implement such a
method in the model or subclass and override this method.
Args:
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
Returns:
:obj:`int`: The number of floating-point operations.
"""
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self):
"""
Initializes a git repo in :obj:`self.args.push_to_hub_model_id`.
"""
if not self.is_world_process_zero():
return
use_auth_token = True if self.args.push_to_hub_token is None else self.args.push_to_hub_token
repo_url = PushToHubMixin._get_repo_url_from_name(
self.args.push_to_hub_model_id,
organization=self.args.push_to_hub_organization,
use_auth_token=use_auth_token,
)
self.repo = PushToHubMixin._create_or_get_repo(
self.args.output_dir, repo_url=repo_url, use_auth_token=use_auth_token
)
# By default, ignore the checkpoint folders
if not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Optional[str] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Optional[str] = None,
dataset_tags: Optional[Union[str, List[str]]] = None,
dataset: Optional[Union[str, List[str]]] = None,
dataset_args: Optional[Union[str, List[str]]] = None,
):
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def push_to_hub(self, commit_message: Optional[str] = "add model", **kwargs) -> str:
"""
Upload `self.model` and `self.tokenizer` to the 🤗 model hub on the repo `self.args.push_to_hub_model_id`.
Parameters:
commit_message (:obj:`str`, `optional`, defaults to :obj:`"add model"`):
Message to commit while pushing.
kwargs:
Additional keyword arguments passed along to :meth:`~transformers.Trainer.create_model_card`.
Returns:
The url of the commit of your model in the given repository.
"""
if not self.is_world_process_zero():
return
self.create_model_card(model_name=self.args.push_to_hub_model_id, **kwargs)
self.save_model()
return self.repo.push_to_hub(commit_message=commit_message)
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.
Works both with or without labels.
"""
if not isinstance(dataloader.dataset, collections.abc.Sized):
raise ValueError("dataset must implement __len__")
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
# if eval is called w/o train init deepspeed here
if self.args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False)
# if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while
# ``train`` is running, halve it first and then put on device
if not self.is_in_train and self.args.fp16_full_eval:
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, self.args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if self.args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def _gather_and_numpify(self, tensors, name):
"""
Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
concatenating them to `gathered`
"""
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
| [
"torch.cat",
"torch.cuda.amp.autocast",
"torch.Generator",
"torch.cuda.random.set_rng_state",
"torch.random.get_rng_state",
"torch.cuda.random.set_rng_state_all",
"torch.cuda.is_available",
"torch.load",
"torch.cuda.random.get_rng_state_all",
"torch.nn.DataParallel",
"torch.utils.data.sampler.RandomSampler",
"torch.tensor",
"torch.empty",
"torch.random.set_rng_state",
"torch.nn.parallel.DistributedDataParallel",
"torch.utils.data.sampler.SequentialSampler",
"torch.cuda.amp.GradScaler",
"torch.distributed.barrier",
"torch.cuda.random.get_rng_state",
"torch.distributed.get_local_rank",
"torch.utils.data.dataloader.DataLoader",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.0 | rejinjoy18/transformers | 71346c4a9099d84685c91fab626d0b8b1704ef08 |
1.8 | import torch
from colossalai.gemini.tensor import stateful_op_impl
from ..stateful_tensor import StatefulTensorV2
from packaging import version
@stateful_op_impl(torch.nn.functional.linear)
def stateful_linear(types, args, kwargs, pg):
"""Handles ``__torch_function__`` dispatch for ``torch.nn.functional.linear``.
This method computes a linear.
"""
input_tensor = args[0]
weight = args[1]
if version.parse(torch.__version__) > version.parse("1.11.0"):
if len(args) == 3:
bias = args[2]
else:
bias = None
else:
bias = kwargs.get('bias', None)
if isinstance(bias, StatefulTensorV2):
bias = bias.torch_tensor()
# Add communication logic before and after linear call.
if isinstance(weight, StatefulTensorV2):
return torch.nn.functional.linear(input_tensor, weight.torch_tensor(), bias)
else:
return torch.nn.functional.linear(input_tensor, weight, bias)
| [
"torch.nn.functional.linear"
] | 1.8 | weiplanet/ColossalAI | ab962b9735ea323eb84c5bc4bce534bf2376960e |
1.5 | import torch
import torch.nn as nn
from torch.autograd import Variable
from abc import ABCMeta, abstractmethod
class AbstractPrimitive(nn.Module, metaclass=ABCMeta):
"""
Use this class when creating new operations for edges.
This is required because we are agnostic to operations
at the edges. As a consequence, they can contain subgraphs
which requires naslib to detect and properly process them.
"""
def __init__(self, kwargs):
super().__init__()
self.init_params = {k: v for k, v in kwargs.items() if k != 'self' and not k.startswith('_') and k != 'kwargs'}
@abstractmethod
def forward(self, x, edge_data):
"""
The forward processing of the operation.
"""
raise NotImplementedError()
@abstractmethod
def get_embedded_ops(self):
"""
Return any embedded ops so that they can be
analysed whether they contain a child graph, e.g.
a 'motif' in the hierachical search space.
If there are no embedded ops, then simply return
`None`. Should return a list otherwise.
"""
raise NotImplementedError()
@property
def get_op_name(self):
return type(self).__name__
class Identity(AbstractPrimitive):
"""
An implementation of the Identity operation.
"""
def __init__(self, **kwargs):
super().__init__(locals())
def forward(self, x, edge_data):
return x
def get_embedded_ops(self):
return None
class Zero(AbstractPrimitive):
"""
Implementation of the zero operation. It removes
the connection by multiplying its input with zero.
"""
def __init__(self, stride, **kwargs):
"""
When setting stride > 1 then it is assumed that the
channels must be doubled.
"""
super().__init__(locals())
self.stride = stride
def forward(self, x, edge_data):
if self.stride == 1:
return x.mul(0.)
else:
return x[:, :, ::self.stride, ::self.stride].mul(0.)
def get_embedded_ops(self):
return None
def __repr__(self):
return "Zero (stride={})".format(self.stride)
class Zero1x1(AbstractPrimitive):
"""
Implementation of the zero operation. It removes
the connection by multiplying its input with zero.
"""
def __init__(self, stride, **kwargs):
"""
When setting stride > 1 then it is assumed that the
channels must be doubled.
"""
super().__init__(locals())
self.stride = stride
def forward(self, x, edge_data):
if self.stride == 1:
return x.mul(0.)
else:
x = x[:, :, ::self.stride, ::self.stride].mul(0.)
return torch.cat([x, x], dim=1) # double the channels TODO: ugly as hell
def get_embedded_ops(self):
return None
def __repr__(self):
return "Zero1x1 (stride={})".format(self.stride)
class SepConv(AbstractPrimitive):
"""
Implementation of Separable convolution operation as
in the DARTS paper, i.e. 2 sepconv directly after another.
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True, **kwargs):
super().__init__(locals())
self.kernel_size = kernel_size
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x, edge_data=None):
return self.op(x)
def get_embedded_ops(self):
return None
@property
def get_op_name(self):
op_name = super().get_op_name
op_name += '{}x{}'.format(self.kernel_size, self.kernel_size)
return op_name
class DilConv(AbstractPrimitive):
"""
Implementation of a dilated separable convolution as
used in the DARTS paper.
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True, **kwargs):
super().__init__(locals())
self.kernel_size = kernel_size
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine),
)
def forward(self, x, edge_data):
return self.op(x)
def get_embedded_ops(self):
return None
@property
def get_op_name(self):
op_name = super().get_op_name
op_name += '{}x{}'.format(self.kernel_size, self.kernel_size)
return op_name
class Stem(AbstractPrimitive):
"""
This is used as an initial layer directly after the
image input.
"""
def __init__(self, C_out, **kwargs):
super().__init__(locals())
self.seq = nn.Sequential(
nn.Conv2d(3, C_out, 3, padding=1, bias=False),
nn.BatchNorm2d(C_out))
def forward(self, x, edge_data):
return self.seq(x)
def get_embedded_ops(self):
return None
class Sequential(AbstractPrimitive):
"""
Implementation of `torch.nn.Sequential` to be used
as op on edges.
"""
def __init__(self, *args, **kwargs):
super().__init__(locals())
self.primitives = args
self.op = nn.Sequential(*args)
def forward(self, x, edge_data):
return self.op(x)
def get_embedded_ops(self):
return list(self.primitives)
class MaxPool(AbstractPrimitive):
def __init__(self, kernel_size, stride, **kwargs):
super().__init__(locals())
self.maxpool = nn.MaxPool2d(kernel_size, stride=stride, padding=1)
def forward(self, x, edge_data):
x = self.maxpool(x)
return x
def get_embedded_ops(self):
return None
class MaxPool1x1(AbstractPrimitive):
"""
Implementation of MaxPool with an optional 1x1 convolution
in case stride > 1. The 1x1 convolution is required to increase
the number of channels.
"""
def __init__(self, kernel_size, stride, C_in=None, C_out=None, affine=True, **kwargs):
super().__init__(locals())
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size, stride=stride, padding=1)
if stride > 1:
assert C_in is not None and C_out is not None
self.conv = nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x, edge_data):
x = self.maxpool(x)
if self.stride > 1:
x = self.conv(x)
x = self.bn(x)
return x
def get_embedded_ops(self):
return None
class AvgPool(AbstractPrimitive):
"""
Implementation of Avergae Pooling.
"""
def __init__(self, kernel_size, stride, **kwargs):
super().__init__(locals())
self.avgpool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
def forward(self, x, edge_data):
x = self.avgpool(x)
return x
def get_embedded_ops(self):
return None
class AvgPool1x1(AbstractPrimitive):
"""
Implementation of Avergae Pooling with an optional
1x1 convolution afterwards. The convolution is required
to increase the number of channels if stride > 1.
"""
def __init__(self, kernel_size, stride, C_in=None, C_out=None, affine=True, **kwargs):
super().__init__(locals())
self.stride = stride
self.avgpool = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
if stride > 1:
assert C_in is not None and C_out is not None
self.conv = nn.Conv2d(C_in, C_out, 1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x, edge_data):
x = self.avgpool(x)
if self.stride > 1:
x = self.conv(x)
x = self.bn(x)
return x
def get_embedded_ops(self):
return None
class ReLUConvBN(AbstractPrimitive):
def __init__(self, C_in, C_out, kernel_size, stride=1, affine=True, **kwargs):
super().__init__(locals())
self.kernel_size = kernel_size
pad = 0 if stride == 1 and kernel_size == 1 else 1
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=pad, bias=False),
nn.BatchNorm2d(C_out, affine=affine)
)
def forward(self, x, edge_data):
return self.op(x)
def get_embedded_ops(self):
return None
@property
def get_op_name(self):
op_name = super().get_op_name
op_name += '{}x{}'.format(self.kernel_size, self.kernel_size)
return op_name
class Concat1x1(nn.Module):
"""
Implementation of the channel-wise concatination followed by a 1x1 convolution
to retain the channel dimension.
"""
def __init__(self, num_in_edges, C_out, affine=True, **kwargs):
super().__init__()
self.conv = nn.Conv2d(num_in_edges * C_out, C_out, kernel_size=1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
"""
Expecting a list of input tensors. Stacking them channel-wise
and applying 1x1 conv
"""
x = torch.cat(x, dim=1)
x = self.conv(x)
x = self.bn(x)
return x
| [
"torch.cat",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
] | 1.5.0 | deepdad/NASLib-1 | 6c93788f145187fe8cda446531f5b9f98e4ab48b |
1.5 | from typing import List, Optional
import torch
from torch import nn
import numpy as np
import logging
from src.utils.pytorch_linear_reg_utils import fit_linear, linear_reg_pred, outer_prod, add_const_col
from src.data.data_class import TrainDataSet, TestDataSet, TrainDataSetTorch, TestDataSetTorch
logger = logging.getLogger()
class DeepGMMModel:
def __init__(self,
primal_net: nn.Module,
dual_net: nn.Module
):
self.primal_net = primal_net
self.dual_net = dual_net
def predict_t(self, treatment: torch.Tensor):
self.primal_net.train(False)
return self.primal_net(treatment)
def predict(self, treatment: np.ndarray):
treatment_t = torch.tensor(treatment, dtype=torch.float32)
return self.predict_t(treatment_t).data.numpy()
def evaluate_t(self, test_data: TestDataSetTorch):
target = test_data.structural
with torch.no_grad():
pred = self.predict_t(test_data.treatment)
return (torch.norm((target - pred)) ** 2) / target.size()[0]
def evaluate(self, test_data: TestDataSet):
return self.evaluate_t(TestDataSetTorch.from_numpy(test_data)).data.item()
| [
"torch.norm",
"torch.no_grad",
"torch.tensor"
] | 1.5.0 | liyuan9988/DeepFeatureIV | 54b04e9e9e4c88d4859ea65d34ceb69dd1b58bc2 |
0.4 | """
Loss functions for recommender models.
The pointwise, BPR, and hinge losses are a good fit for
implicit feedback models trained through negative sampling.
The regression and Poisson losses are used for explicit feedback
models.
"""
import torch
import torch.nn.functional as F
from spotlight.torch_utils import assert_no_grad
def my_own_loss(positive_predictions, negative_predictions, mask=None):
"""
modified copy of Hinge pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
loss = torch.clamp(negative_predictions -
positive_predictions +
1.0, 0.0)
if (mask is not None and mask.sum()>0):
mask = mask.float()
loss = loss * mask
ret_loss = loss.sum() / mask.sum()
else:
ret_loss = loss.mean()
return ret_loss
def pointwise_loss(positive_predictions, negative_predictions, mask=None):
"""
Logistic loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
positives_loss = (1.0 - F.sigmoid(positive_predictions))
negatives_loss = F.sigmoid(negative_predictions)
loss = (positives_loss + negatives_loss)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def bpr_loss(positive_predictions, negative_predictions, mask=None):
"""
Bayesian Personalised Ranking [1]_ pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
References
----------
.. [1] Rendle, Steffen, et al. "BPR: Bayesian personalized ranking from
implicit feedback." Proceedings of the twenty-fifth conference on
uncertainty in artificial intelligence. AUAI Press, 2009.
"""
loss = (1.0 - F.sigmoid(positive_predictions -
negative_predictions))
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Hinge pairwise loss function.
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Tensor containing predictions for sampled negative items.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
"""
loss = torch.clamp(negative_predictions -
positive_predictions +
1.0, 0.0)
if mask is not None:
mask = mask.float()
loss = loss * mask
return loss.sum() / mask.sum()
return loss.mean()
def adaptive_hinge_loss(positive_predictions, negative_predictions, mask=None):
"""
Adaptive hinge pairwise loss function. Takes a set of predictions
for implicitly negative items, and selects those that are highest,
thus sampling those negatives that are closes to violating the
ranking implicit in the pattern of user interactions.
Approximates the idea of weighted approximate-rank pairwise loss
introduced in [2]_
Parameters
----------
positive_predictions: tensor
Tensor containing predictions for known positive items.
negative_predictions: tensor
Iterable of tensors containing predictions for sampled negative items.
More tensors increase the likelihood of finding ranking-violating
pairs, but risk overfitting.
mask: tensor, optional
A binary tensor used to zero the loss from some entries
of the loss tensor.
Returns
-------
loss, float
The mean value of the loss function.
References
----------
.. [2] Weston, Jason, Samy Bengio, and Nicolas Usunier. "Wsabie:
Scaling up to large vocabulary image annotation." IJCAI.
Vol. 11. 2011.
"""
highest_negative_predictions, _ = torch.max(negative_predictions, 0)
return hinge_loss(positive_predictions, highest_negative_predictions.squeeze(), mask=mask)
def regression_loss(observed_ratings, predicted_ratings):
"""
Regression loss.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings.
predicted_ratings: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
return ((observed_ratings - predicted_ratings) ** 2).mean()
def poisson_loss(observed_ratings, predicted_ratings):
"""
Poisson loss.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings.
predicted_ratings: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
return (predicted_ratings - observed_ratings * torch.log(predicted_ratings)).mean()
def logistic_loss(observed_ratings, predicted_ratings):
"""
Logistic loss for explicit data.
Parameters
----------
observed_ratings: tensor
Tensor containing observed ratings which
should be +1 or -1 for this loss function.
predicted_ratings: tensor
Tensor containing rating predictions.
Returns
-------
loss, float
The mean value of the loss function.
"""
assert_no_grad(observed_ratings)
# Convert target classes from (-1, 1) to (0, 1)
observed_ratings = torch.clamp(observed_ratings, 0, 1)
return F.binary_cross_entropy_with_logits(predicted_ratings,
observed_ratings,
size_average=True)
| [
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.sigmoid",
"torch.max",
"torch.clamp",
"torch.log"
] | 0.4.0 | paprocki-r/spotlight | a7dd31bf5e225b9e8ec8dc6ffcd0f2093d43336c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.