version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.3 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import argparse
import time
import os.path as osp
import os
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.backends import cudnn
from torch.utils.data import DataLoader
from reid import datasets
from reid import models
from reid.dist_metric import DistanceMetric
from reid.loss import TripletLoss
from reid.trainers import CoTeaching
from reid.evaluators import Evaluator, extract_features
from reid.utils.data import transforms as T
import torch.nn.functional as F
from reid.utils.data.preprocessor import Preprocessor
from reid.utils.data.sampler import RandomIdentitySampler
from reid.utils.serialization import load_checkpoint, save_checkpoint
from sklearn.cluster import DBSCAN
from reid.rerank import re_ranking
def calScores(clusters, labels):
"""
compute pair-wise precision pair-wise recall
"""
from scipy.special import comb
if len(clusters) == 0:
return 0, 0
else:
curCluster = []
for curClus in clusters.values():
curCluster.append(labels[curClus])
TPandFP = sum([comb(len(val), 2) for val in curCluster])
TP = 0
for clusterVal in curCluster:
for setMember in set(clusterVal):
if sum(clusterVal == setMember) < 2: continue
TP += comb(sum(clusterVal == setMember), 2)
FP = TPandFP - TP
# FN and TN
TPandFN = sum([comb(labels.tolist().count(val), 2) for val in set(labels)])
FN = TPandFN - TP
# cal precision and recall
precision, recall = TP / (TP + FP), TP / (TP + FN)
fScore = 2 * precision * recall / (precision + recall)
return precision, recall, fScore
def get_data(name, data_dir, height, width, batch_size,
workers):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, num_val=0.1)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# use all training and validation images in target dataset
train_set = dataset.trainval
num_classes = dataset.num_trainval_ids
transformer = T.Compose([
T.Resize((height, width)),
T.ToTensor(),
normalizer,
])
extfeat_loader = DataLoader(
Preprocessor(train_set, root=dataset.images_dir,
transform=transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
test_loader = DataLoader(
Preprocessor(list(set(dataset.query) | set(dataset.gallery)),
root=dataset.images_dir, transform=transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, num_classes, extfeat_loader, test_loader
def get_source_data(name, data_dir, height, width, batch_size,
workers):
root = osp.join(data_dir, name)
dataset = datasets.create(name, root, num_val=0.1)
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# use all training images on source dataset
train_set = dataset.train
num_classes = dataset.num_train_ids
transformer = T.Compose([
T.Resize((height, width)),
T.ToTensor(),
normalizer,
])
extfeat_loader = DataLoader(
Preprocessor(train_set, root=dataset.images_dir,
transform=transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return dataset, extfeat_loader
def calDis(qFeature, gFeature): # 246s
x, y = F.normalize(qFeature), F.normalize(gFeature)
# x, y = qFeature, gFeature
m, n = x.shape[0], y.shape[0]
disMat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(m, n) + \
torch.pow(y, 2).sum(dim=1, keepdim=True).expand(n, m).t()
disMat.addmm_(1, -2, x, y.t())
return disMat.clamp_(min=1e-5)
def labelUnknown(knownFeat, allLab, unknownFeat):
# allLab--label from known
disMat = calDis(knownFeat, unknownFeat)
labLoc = disMat.argmin(dim=0)
return allLab[labLoc]
def labelNoise(feature, labels):
# features and labels with -1
noiseFeat, pureFeat = feature[labels == -1, :], feature[labels != -1, :]
pureLabs = labels[labels != -1] # no outliers
unLab = labelUnknown(pureFeat, pureLabs, noiseFeat)
labels[labels == -1] = unLab
return labels.numpy()
def getCenter(features, labels):
allCenter = {}
features = features[labels != -1, :]
labels = labels[labels != -1]
for pid in set(labels):
allCenter[pid] = torch.from_numpy(features[labels == pid, :].mean(axis=0)).unsqueeze(0)
return torch.cat(list(allCenter.values()))
def main(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.benchmark = True
# Create data loaders
assert args.num_instances > 1, "num_instances should be greater than 1"
assert args.batch_size % args.num_instances == 0, \
'num_instances should divide batch_size'
if args.height is None or args.width is None:
args.height, args.width = (144, 56) if args.arch == 'inception' else \
(256, 128)
# get source data
src_dataset, src_extfeat_loader = \
get_source_data(args.src_dataset, args.data_dir, args.height,
args.width, args.batch_size, args.workers)
# get target data
tgt_dataset, num_classes, tgt_extfeat_loader, test_loader = \
get_data(args.tgt_dataset, args.data_dir, args.height,
args.width, args.batch_size, args.workers)
# Create model
# Hacking here to let the classifier be the number of source ids
if args.src_dataset == 'dukemtmc':
model = models.create(args.arch, num_classes=632, pretrained=False)
coModel = models.create(args.arch, num_classes=632, pretrained=False)
elif args.src_dataset == 'market1501':
model = models.create(args.arch, num_classes=676, pretrained=False)
coModel = models.create(args.arch, num_classes=676, pretrained=False)
elif args.src_dataset == 'msmt17':
model = models.create(args.arch, num_classes=1041, pretrained=False)
coModel = models.create(args.arch, num_classes=1041, pretrained=False)
elif args.src_dataset == 'cuhk03':
model = models.create(args.arch, num_classes=1230, pretrained=False)
coModel = models.create(args.arch, num_classes=1230, pretrained=False)
else:
raise RuntimeError('Please specify the number of classes (ids) of the network.')
# Load from checkpoint
if args.resume:
print('Resuming checkpoints from finetuned model on another dataset...\n')
checkpoint = load_checkpoint(args.resume)
model.load_state_dict(checkpoint['state_dict'], strict=False)
coModel.load_state_dict(checkpoint['state_dict'], strict=False)
else:
raise RuntimeWarning('Not using a pre-trained model.')
model = nn.DataParallel(model).cuda()
coModel = nn.DataParallel(coModel).cuda()
# Criterion
criterion = [
TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda(),
TripletLoss(args.margin, args.num_instances, isAvg=False, use_semi=False).cuda()
]
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr
)
coOptimizer = torch.optim.Adam(
coModel.parameters(), lr=args.lr
)
optims = [optimizer, coOptimizer]
# training stage transformer on input images
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_transformer = T.Compose([
T.Resize((args.height, args.width)),
T.RandomHorizontalFlip(),
T.ToTensor(), normalizer,
T.RandomErasing(probability=0.5, sh=0.2, r1=0.3)
])
# # Start training
for iter_n in range(args.iteration):
if args.lambda_value == 0:
source_features = 0
else:
# get source datas' feature
source_features, _ = extract_features(model, src_extfeat_loader, print_freq=args.print_freq, numStripe=None)
# synchronization feature order with src_dataset.train
source_features = torch.cat([source_features[f].unsqueeze(0) for f, _, _ in src_dataset.train], 0)
# extract training images' features
print('Iteration {}: Extracting Target Dataset Features...'.format(iter_n + 1))
target_features, _ = extract_features(model, tgt_extfeat_loader, print_freq=args.print_freq, numStripe=None)
# synchronization feature order with dataset.train
target_features = torch.cat([target_features[f].unsqueeze(0) for f, _, _ in tgt_dataset.trainval], 0)
# calculate distance and rerank result
print('Calculating feature distances...')
target_features = target_features.numpy()
rerank_dist = re_ranking(source_features, target_features, lambda_value=args.lambda_value)
if iter_n == 0:
# DBSCAN cluster
tri_mat = np.triu(rerank_dist, 1) # tri_mat.dim=2
tri_mat = tri_mat[np.nonzero(tri_mat)] # tri_mat.dim=1
tri_mat = np.sort(tri_mat, axis=None)
top_num = np.round(args.rho * tri_mat.size).astype(int)
eps = tri_mat[:top_num].mean()
print('eps in cluster: {:.3f}'.format(eps))
cluster = DBSCAN(eps=eps, min_samples=4, metric='precomputed', n_jobs=8)
# select & cluster images as training set of this epochs
print('Clustering and labeling...')
labels = cluster.fit_predict(rerank_dist)
num_ids = len(set(labels)) - 1
print('Iteration {} have {} training ids'.format(iter_n + 1, num_ids))
# generate new dataset
new_dataset = []
# assign label for target ones
newLab = labelNoise(torch.from_numpy(target_features), torch.from_numpy(labels))
# unknownFeats = target_features[labels==-1,:]
counter = 0
from collections import defaultdict
realIDs, fakeIDs = defaultdict(list), []
for (fname, realID, cam), label in zip(tgt_dataset.trainval, newLab):
# dont need to change codes in trainer.py _parsing_input function and sampler function after add 0
new_dataset.append((fname, label, cam))
realIDs[realID].append(counter)
fakeIDs.append(label)
counter += 1
precision, recall, fscore = calScores(realIDs, np.asarray(fakeIDs))
print('Iteration {} have {} training images'.format(iter_n + 1, len(new_dataset)))
print(f'precision:{precision * 100}, recall:{100 * recall}, fscore:{100 * fscore}')
train_loader = DataLoader(
Preprocessor(new_dataset, root=tgt_dataset.images_dir, transform=train_transformer),
batch_size=args.batch_size, num_workers=4,
sampler=RandomIdentitySampler(new_dataset, args.num_instances),
pin_memory=True, drop_last=True
)
trainer = CoTeaching(
model, coModel, train_loader, criterion, optims
)
# Start training
for epoch in range(args.epochs):
trainer.train(epoch, remRate=0.2 + (0.8 / args.iteration) * (1 + iter_n)) # to at most 80%
# test only
evaluator = Evaluator(model, print_freq=args.print_freq)
rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
# Evaluate
rank_score = evaluator.evaluate(test_loader, tgt_dataset.query, tgt_dataset.gallery)
save_checkpoint({
'state_dict': model.module.state_dict(),
'epoch': epoch + 1, 'best_top1': rank_score.market1501[0],
}, True, fpath=osp.join(args.logs_dir, 'adapted.pth.tar'))
return rank_score.map, rank_score.market1501[0]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Triplet loss classification")
# data
parser.add_argument('--src_dataset', type=str, default='dukemtmc',
choices=datasets.names())
parser.add_argument('--tgt_dataset', type=str, default='market1501',
choices=datasets.names())
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--split', type=int, default=0)
parser.add_argument('--noiseLam', type=float, default=0.5)
parser.add_argument('--height', type=int,
help="input height, default: 256 for resnet*, "
"144 for inception")
parser.add_argument('--width', type=int,
help="input width, default: 128 for resnet*, "
"56 for inception")
parser.add_argument('--combine-trainval', action='store_true',
help="train and val sets together for training, "
"val set alone for validation")
parser.add_argument('--num_instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 4")
# model
parser.add_argument('--arch', type=str, default='resnet50',
choices=models.names())
# loss
parser.add_argument('--margin', type=float, default=0.5,
help="margin of the triplet loss, default: 0.5")
parser.add_argument('--lambda_value', type=float, default=0.1,
help="balancing parameter, default: 0.1")
parser.add_argument('--rho', type=float, default=1.6e-3,
help="rho percentage, default: 1.6e-3")
# optimizer
parser.add_argument('--lr', type=float, default=6e-5,
help="learning rate of all parameters")
# training configs
parser.add_argument('--resume', type=str, metavar='PATH',
default='')
parser.add_argument('--evaluate', type=int, default=0,
help="evaluation only")
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--print_freq', type=int, default=1)
parser.add_argument('--iteration', type=int, default=10)
parser.add_argument('--epochs', type=int, default=30)
# metric learning
parser.add_argument('--dist_metric', type=str, default='euclidean',
choices=['euclidean', 'kissme'])
# misc
parser.add_argument('--data_dir', type=str, metavar='PATH',
default='')
parser.add_argument('--logs_dir', type=str, metavar='PATH',
default='')
args = parser.parse_args()
mean_ap, rank1 = main(args)
| [
"torch.nn.functional.normalize",
"torch.from_numpy",
"torch.manual_seed",
"torch.nn.DataParallel",
"torch.pow"
] | 1.3.1 | TencentYoutuResearch/PersonReID-ACT | 264b1b43f9424c297638ebf6f8f8ace09512ed29 |
1.4 | # -*- coding: utf-8 -*
from paths import ROOT_PATH # isort:skip
from videoanalyst.config.config import cfg
from videoanalyst.config.config import specify_task
from videoanalyst.model import builder as model_builder
from videoanalyst.pipeline import builder as pipeline_builder
from videoanalyst.utils import complete_path_wt_root_in_cfg
from videoanalyst.pipeline.utils.bbox import xywh2xyxy, xyxy2xywh
import argparse
from loguru import logger
import cv2
import numpy as np
import time
import torch
font_size = 0.5
font_width = 1
def make_parser():
parser = argparse.ArgumentParser(
description="press s to select the target box,\n \
then press enter or space to confirm it or press c to cancel it,\n \
press c to stop track and press q to exit program")
parser.add_argument(
"-cfg",
"--config",
default="experiments/siamfcpp/test/got10k/siamfcpp_alexnet-got.yaml",
type=str,
help='experiment configuration')
parser.add_argument("-d",
"--device",
default="cpu",
type=str,
help="torch.device, cuda or cpu")
parser.add_argument("-v",
"--video",
type=str,
default="webcam",
help="path to input video file, default is webcam")
parser.add_argument("-o",
"--output",
type=str,
default="",
help="path to dump the track video")
return parser
def main(args):
root_cfg = cfg
root_cfg.merge_from_file(args.config)
logger.info("Load experiment configuration at: %s" % args.config)
# resolve config
root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)
root_cfg = root_cfg.test
task, task_cfg = specify_task(root_cfg)
task_cfg.freeze()
window_name = task_cfg.exp_name
# build model
model = model_builder.build(task, task_cfg.model)
# build pipeline
pipeline = pipeline_builder.build(task, task_cfg.pipeline, model)
dev = torch.device(args.device)
pipeline.set_device(dev)
init_box = None
template = None
vw = None
if args.video == "webcam":
logger.info("[INFO] starting video stream...")
vs = cv2.VideoCapture(0)
vs.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
else:
vs = cv2.VideoCapture(args.video)
if args.output:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
width, height = vs.get(3), vs.get(4)
vw = cv2.VideoWriter(args.output, fourcc, 25, (int(width), int(height)))
while vs.isOpened():
ret, frame = vs.read()
if ret:
if init_box is not None:
time_a = time.time()
rect_pred = pipeline.update(frame)
show_frame = frame.copy()
time_cost = time.time() - time_a
bbox_pred = xywh2xyxy(rect_pred)
bbox_pred = tuple(map(int, bbox_pred))
cv2.putText(show_frame,
"track cost: {:.4f} s".format(time_cost), (128, 20),
cv2.FONT_HERSHEY_COMPLEX, font_size, (0, 0, 255),
font_width)
cv2.rectangle(show_frame, bbox_pred[:2], bbox_pred[2:],
(0, 255, 0))
if template is not None:
show_frame[:128, :128] = template
else:
show_frame = frame
cv2.imshow(window_name, show_frame)
if vw is not None:
vw.write(show_frame)
key = cv2.waitKey(30) & 0xFF
if key == ord("q"):
break
# if the 's' key is selected, we are going to "select" a bounding
# box to track
elif key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
box = cv2.selectROI(window_name,
frame,
fromCenter=False,
showCrosshair=True)
if box[2] > 0 and box[3] > 0:
init_box = box
template = cv2.resize(
frame[box[1]:box[1] + box[3], box[0]:box[0] + box[2]],
(128, 128))
pipeline.init(frame, init_box)
elif key == ord("c"):
init_box = None
template = None
vs.release()
if vw is not None:
vw.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args()
main(args)
| [
"torch.device"
] | 1.4.0 | GZHermit/video_analyst | 6233b19320e3d07b95fb1f782efd89b052a8cf4e |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
import torch
import pytorch_lightning as pl
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from tests.helpers import BoringModel
def test_finetuning_with_resume_from_checkpoint(tmpdir):
"""
This test validates that generated ModelCheckpoint is pointing to the right best_model_path during test
"""
seed_everything(4)
checkpoint_callback = ModelCheckpoint(monitor="val_loss", dirpath=tmpdir, filename="{epoch:02d}", save_top_k=-1)
class ExtendedBoringModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("val_loss", loss, on_epoch=True, prog_bar=True)
model = ExtendedBoringModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=12,
limit_val_batches=6,
limit_test_batches=12,
callbacks=[checkpoint_callback],
logger=False,
)
trainer.fit(model)
assert os.listdir(tmpdir) == ["epoch=00.ckpt"]
best_model_paths = [checkpoint_callback.best_model_path]
results = []
for idx in range(3, 6):
# load from checkpoint
trainer = pl.Trainer(
default_root_dir=tmpdir,
max_epochs=idx,
limit_train_batches=12,
limit_val_batches=12,
limit_test_batches=12,
resume_from_checkpoint=best_model_paths[-1],
progress_bar_refresh_rate=0,
)
trainer.fit(model)
trainer.test()
results.append(deepcopy(trainer.callback_metrics))
best_model_paths.append(trainer.checkpoint_callback.best_model_path)
for idx in range(len(results) - 1):
assert results[idx]["val_loss"] > results[idx + 1]["val_loss"]
for idx, best_model_path in enumerate(best_model_paths):
if idx == 0:
assert best_model_path.endswith(f"epoch=0{idx}.ckpt")
else:
assert f"epoch={idx + 1}" in best_model_path
def test_accumulated_gradient_batches_with_resume_from_checkpoint(tmpdir):
"""
This test validates that accumulated gradient is properly recomputed and reset on the trainer.
"""
ckpt = ModelCheckpoint(dirpath=tmpdir, save_last=True)
model = BoringModel()
trainer_kwargs = dict(
max_epochs=1, accumulate_grad_batches={0: 2}, callbacks=ckpt, limit_train_batches=1, limit_val_batches=0
)
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
trainer_kwargs["max_epochs"] = 2
trainer_kwargs["resume_from_checkpoint"] = ckpt.last_model_path
trainer = Trainer(**trainer_kwargs)
trainer.fit(model)
| [
"torch.optim.lr_scheduler.StepLR"
] | 1.6 | PeppeSaccardi/pytorch-lightning | 046110797227c352126c779c207e076ce9682eae |
1.7 | """Attention networks."""
import logging
import torch
import torch.nn as nn
import bootleg.utils.model_utils
from bootleg.layers.helper_modules import MLP, AttnBlock, NormAndSum, SelfAttnBlock
from bootleg.symbols.constants import (
BERT_WORD_DIM,
DISAMBIG,
KG_BIAS_LOAD_CLASS,
MAIN_CONTEXT_MATRIX,
)
from bootleg.utils import model_utils
from bootleg.utils.embedding_utils import get_max_candidates
logger = logging.getLogger(__name__)
class AttnNetwork(nn.Module):
"""Base attention network.
Args:
args: args
entity_symbols: entity symbols
"""
def __init__(self, args, entity_symbols):
super(AttnNetwork, self).__init__()
self.num_entities_with_pad_and_nocand = (
entity_symbols.num_entities_with_pad_and_nocand
)
# Number of candidates
self.K = get_max_candidates(entity_symbols, args.data_config)
# Number of aliases
self.M = args.data_config.max_aliases
self.hidden_size = args.model_config.hidden_size
self.num_heads = args.model_config.num_heads
self.num_model_stages = args.model_config.num_model_stages
assert (
self.num_model_stages > 0
), f"You must have > 0 model stages. You have {self.num_model_stages}"
self.num_fc_layers = args.model_config.num_fc_layers
self.ff_inner_size = args.model_config.ff_inner_size
def forward(
self,
sent_embedding,
sent_embedding_mask,
entity_embedding,
entity_embedding_mask,
start_span_idx,
end_span_idx,
batch_on_the_fly_data,
):
"""Model forward.
Args:
sent_embedding: sentence embedding (B x N x L)
sent_embedding_mask: sentence embedding mask (B x N)
entity_embedding: entity embedding (B x M x K x H)
entity_embedding_mask: entity embedding mask (B x M x K)
start_span_idx: start mention index into sentence (B x M)
end_span_idx: end mention index into sentence (B x M)
batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices
Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),
Dict of all output entity embeddings from each KG matrix (B x M x K x H)
"""
raise NotImplementedError
class Bootleg(AttnNetwork):
"""Bootleg attention network V1.
Args:
args: args
entity_symbols: entity symbols
"""
def __init__(self, args, entity_symbols):
super(Bootleg, self).__init__(args, entity_symbols)
self.dropout = args.train_config.dropout
# For each stage, create a transformer block for phrase (entity_word) and co-occurrence (self_entity) modules
self.attention_modules = nn.ModuleDict()
self.combine_modules = nn.ModuleDict()
for i in range(self.num_model_stages):
self.attention_modules[f"stage_{i}_entity_word"] = AttnBlock(
size=self.hidden_size,
ff_inner_size=args.model_config.ff_inner_size,
dropout=self.dropout,
num_heads=self.num_heads,
)
self.attention_modules[f"stage_{i}_self_entity"] = SelfAttnBlock(
size=self.hidden_size,
ff_inner_size=args.model_config.ff_inner_size,
dropout=self.dropout,
num_heads=self.num_heads,
)
self.combine_modules[f"stage_{i}_combine"] = NormAndSum(self.hidden_size)
# For the KG bias module
self.kg_bias_list = []
self.kg_bias_keys = []
for emb in args.data_config.ent_embeddings:
if emb.load_class == KG_BIAS_LOAD_CLASS:
# self.kg_bias_weights[emb.key] = torch.nn.Parameter(torch.tensor(2.0))
setattr(self, emb.key, torch.nn.Parameter(torch.tensor(2.0)))
self.kg_bias_list.append(getattr(self, emb.key))
self.kg_bias_keys.append(emb.key)
self.kg_bias_keys = sorted(self.kg_bias_keys)
# If we have kg bias terms, we want to take the average of those context matrices when generating the final
# context matrix to be returned. The no_kg_key is used for the context matrix without kg_bias terms added.
# If we use the key ending in _nokg, it will not be averaged in the final result. If we do not have kg bias
# terms, we want the nokg context matrix to be the final matrix. MAIN_CONTEXT_MATRIX key allows for this.
if len(self.kg_bias_keys) > 0:
self.no_kg_key = "context_matrix_nokg"
else:
self.no_kg_key = MAIN_CONTEXT_MATRIX
self.kg_softmax = nn.Softmax(dim=2)
# Two things to note, the attn mask is a block diagonal matrix prevent an alias from paying attention to its
# own K candidates in the attention layer This works because the original input is added to the output of
# this attention, meaning an alias becomes its original embedding plus the contributions of the other
# aliases in the sentence. Second, the attn mask is added to the attention before softmax (added to Q dot
# V^T) -- softmax makes e^(-1e9+old_value) become zero When setting it to be -inf, you can get nans in the
# loss if all entities end up being masked out (eg only one alias in the sentence)
self.e2e_entity_mask = torch.zeros((self.K * self.M, self.K * self.M))
for i in range(self.M):
self.e2e_entity_mask[
i * self.K : (i + 1) * self.K, i * self.K : (i + 1) * self.K
] = 1.0
# Must manually move this to the device as it's not part of a module...we can probably fix this
self.e2e_entity_mask = self.e2e_entity_mask.masked_fill(
(self.e2e_entity_mask == 1), float(-1e9)
)
# Track attention weights
self.attention_weights = {}
# Prediction layers: each stage except the last gets a prediction layer
# Last layer's prediction head is added in slice heads
disambig_task = nn.ModuleDict()
for i in range(self.num_model_stages - 1):
disambig_task[bootleg.utils.model_utils.get_stage_head_name(i)] = MLP(
self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout
)
self.predict_layers = {DISAMBIG: disambig_task}
self.predict_layers = nn.ModuleDict(self.predict_layers)
def forward(
self,
sent_embedding,
sent_embedding_mask,
entity_embedding,
entity_embedding_mask,
start_span_idx,
end_span_idx,
batch_on_the_fly_data,
):
"""Model forward.
Args:
sent_embedding: sentence embedding (B x N x L)
sent_embedding_mask: sentence embedding mask (B x N)
entity_embedding: entity embedding (B x M x K x H)
entity_embedding_mask: entity embedding mask (B x M x K)
start_span_idx: start mention index into sentence (B x M)
end_span_idx: end mention index into sentence (B x M)
batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices
Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),
Dict of all output entity embeddings from each KG matrix (B x M x K x H)
"""
batch_size = sent_embedding.shape[0]
out = {DISAMBIG: {}}
# Create KG bias matrices for each kg bias key
kg_bias_norms = {}
for key in self.kg_bias_keys:
bias_weight = getattr(self, key) # self.kg_bias_weights[key]
kg_bias = (
batch_on_the_fly_data[key]
.float()
.to(sent_embedding.device)
.reshape(batch_size, self.M * self.K, self.M * self.K)
)
kg_bias_diag = kg_bias + bias_weight * torch.eye(self.M * self.K).repeat(
batch_size, 1, 1
).view(batch_size, self.M * self.K, self.M * self.K).to(kg_bias.device)
kg_bias_norm = self.kg_softmax(
kg_bias_diag.masked_fill((kg_bias_diag == 0), float(-1e9))
)
kg_bias_norms[key] = kg_bias_norm
sent_tensor = sent_embedding.transpose(0, 1)
# Resize the alias embeddings and the entity mask from B x M x K x D -> B x (M*K) x D
entity_embedding = entity_embedding.contiguous().view(
batch_size, self.M * self.K, self.hidden_size
)
entity_embedding = entity_embedding.transpose(0, 1) # reshape for attention
key_padding_mask_entities = entity_embedding_mask.contiguous().view(
batch_size, self.M * self.K
)
# Iterate through stages
query_tensor = entity_embedding
for stage_index in range(self.num_model_stages):
# As we are adding a residual in the attention modules, we can make embs empty
embs = []
context_mat_dict = {}
# ============================================================================
# Phrase module: compute attention between entities and words
# ============================================================================
word_entity_attn_context, word_entity_attn_weights = self.attention_modules[
f"stage_{stage_index}_entity_word"
](
q=query_tensor,
x=sent_tensor,
key_mask=sent_embedding_mask,
attn_mask=None,
)
# Add embeddings to be merged in the output
embs.append(word_entity_attn_context)
# Save the attention weights
self.attention_weights[
f"stage_{stage_index}_entity_word"
] = word_entity_attn_weights
# ============================================================================
# Co-occurrence module: compute self attention over entities
# ============================================================================
# Move entity mask to device
# TODO: move to device in init?
self.e2e_entity_mask = self.e2e_entity_mask.to(
key_padding_mask_entities.device
)
entity_attn_context, entity_attn_weights = self.attention_modules[
f"stage_{stage_index}_self_entity"
](
x=query_tensor,
key_mask=key_padding_mask_entities,
attn_mask=self.e2e_entity_mask,
)
# Mask out MxK of single aliases, alias_indices is batch x M, mask is true when single alias
non_null_aliases = (
self.K
- key_padding_mask_entities.reshape(batch_size, self.M, self.K).sum(-1)
) != 0
entity_attn_post_mask = (
(non_null_aliases.sum(1) == 1)
.unsqueeze(1)
.expand(batch_size, self.K * self.M)
.transpose(0, 1)
)
entity_attn_post_mask = entity_attn_post_mask.unsqueeze(-1).expand_as(
entity_attn_context
)
entity_attn_context = torch.where(
entity_attn_post_mask,
torch.zeros_like(entity_attn_context),
entity_attn_context,
)
# Add embeddings to be merged in the output
embs.append(entity_attn_context)
# Save the attention weights
self.attention_weights[
f"stage_{stage_index}_self_entity"
] = entity_attn_weights
# Combine module output
context_matrix_nokg = self.combine_modules[f"stage_{stage_index}_combine"](
embs
)
context_mat_dict[self.no_kg_key] = context_matrix_nokg.transpose(
0, 1
).reshape(batch_size, self.M, self.K, self.hidden_size)
# ============================================================================
# KG module: add in KG connectivity bias
# ============================================================================
for key in self.kg_bias_keys:
context_matrix_kg = torch.bmm(
kg_bias_norms[key], context_matrix_nokg.transpose(0, 1)
).transpose(0, 1)
context_matrix_kg = (context_matrix_nokg + context_matrix_kg) / 2
context_mat_dict[f"context_matrix_{key}"] = context_matrix_kg.transpose(
0, 1
).reshape(batch_size, self.M, self.K, self.hidden_size)
if stage_index < self.num_model_stages - 1:
score = model_utils.max_score_context_matrix(
context_mat_dict,
self.predict_layers[DISAMBIG][
bootleg.utils.model_utils.get_stage_head_name(stage_index)
],
)
out[DISAMBIG][
f"{bootleg.utils.model_utils.get_stage_head_name(stage_index)}"
] = score
# This will take the average of the context matrices that do not end in the key "_nokg";
# if there are not kg bias terms, it will select the context_matrix_nokg
# (as it's key, in this setting, will not end in _nokg)
query_tensor = (
model_utils.generate_final_context_matrix(
context_mat_dict, ending_key_to_exclude="_nokg"
)
.reshape(batch_size, self.M * self.K, self.hidden_size)
.transpose(0, 1)
)
return {
"intermed_scores": out,
"ent_embs": context_mat_dict,
"final_scores": None,
}
class BootlegM2E(AttnNetwork):
"""Bootleg attention network with a mention to entity canidate tensformer
layer.
Args:
args: args
entity_symbols: entity symbols
"""
def __init__(self, args, entity_symbols):
super(BootlegM2E, self).__init__(args, entity_symbols)
self.dropout = args.train_config.dropout
# For each stage, create a transformer block for phrase (entity_word) and co-occurrence (self_entity) modules
self.attention_modules = nn.ModuleDict()
self.combine_modules = nn.ModuleDict()
for i in range(self.num_model_stages):
self.attention_modules[f"stage_{i}_entity_word"] = AttnBlock(
size=self.hidden_size,
ff_inner_size=args.model_config.ff_inner_size,
dropout=self.dropout,
num_heads=self.num_heads,
)
self.attention_modules[f"stage_{i}_self_entity"] = SelfAttnBlock(
size=self.hidden_size,
ff_inner_size=args.model_config.ff_inner_size,
dropout=self.dropout,
num_heads=self.num_heads,
)
self.attention_modules[f"stage_{i}_mention_entity"] = AttnBlock(
size=self.hidden_size,
ff_inner_size=args.model_config.ff_inner_size,
dropout=self.dropout,
num_heads=self.num_heads,
)
self.combine_modules[f"stage_{i}_combine"] = NormAndSum(self.hidden_size)
# For the KG bias module
# self.kg_bias_weights = nn.ParameterDict() # ParameterDicts are buggy in DataParallel
# self.kg_bias_list = []
self.kg_bias_keys = []
for emb in args.data_config.ent_embeddings:
if emb.load_class == KG_BIAS_LOAD_CLASS:
# self.kg_bias_weights[emb.key] = torch.nn.Parameter(torch.tensor(2.0))
# setattr(self, emb.key, torch.nn.Parameter(torch.tensor(2.0)))
# self.kg_bias_list.append(getattr(self, emb.key))
self.kg_bias_keys.append(emb.key)
self.kg_bias_keys = sorted(self.kg_bias_keys)
# If we have kg bias terms, we want to take the average of those context matrices when generating the final
# context matrix to be returned. The no_kg_key is used for the context matrix without kg_bias terms added.
# If we use the key ending in _nokg, it will not be averaged in the final result. If we do not have kg bias
# terms, we want the nokg context matrix to be the final matrix. MAIN_CONTEXT_MATRIX key allows for this.
if len(self.kg_bias_keys) > 0:
self.no_kg_key = "context_matrix_nokg"
else:
self.no_kg_key = MAIN_CONTEXT_MATRIX
self.kg_softmax = nn.Softmax(dim=2)
# Two things to note, the attn mask is a block diagonal matrix prevent an alias from paying attention to its
# own K candidates in the attention layer This works because the original input is added to the output of
# this attention, meaning an alias becomes its original embedding plus the contributions of the other
# aliases in the sentence. Second, the attn mask is added to the attention before softmax (added to Q dot
# V^T) -- softmax makes e^(-1e9+old_value) become zero When setting it to be -inf, you can get nans in the
# loss if all entities end up being masked out (eg only one alias in the sentence)
self.e2e_entity_mask = torch.zeros((self.K * self.M, self.K * self.M))
for i in range(self.M):
self.e2e_entity_mask[
i * self.K : (i + 1) * self.K, i * self.K : (i + 1) * self.K
] = 1.0
# Must manually move this to the device as it's not part of a module...we can probably fix this
self.e2e_entity_mask = self.e2e_entity_mask.masked_fill(
(self.e2e_entity_mask == 1), float(-1e9)
)
# Track attention weights
self.attention_weights = {}
# Prediction layers: each stage except the last gets a prediction layer
# Last layer's prediction head is added in slice heads
disambig_task = nn.ModuleDict()
for i in range(self.num_model_stages - 1):
disambig_task[bootleg.utils.model_utils.get_stage_head_name(i)] = MLP(
self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout
)
self.predict_layers = {DISAMBIG: disambig_task}
self.predict_layers = nn.ModuleDict(self.predict_layers)
def forward(
self,
sent_embedding,
sent_embedding_mask,
entity_embedding,
entity_embedding_mask,
start_span_idx,
end_span_idx,
batch_on_the_fly_data,
):
"""Model forward.
Args:
sent_embedding: sentence embedding (B x N x L)
sent_embedding_mask: sentence embedding mask (B x N)
entity_embedding: entity embedding (B x M x K x H)
entity_embedding_mask: entity embedding mask (B x M x K)
start_span_idx: start mention index into sentence (B x M)
end_span_idx: end mention index into sentence (B x M)
batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices
Returns: Dict of Dict of intermediate layer candidate scores (B x M x K),
Dict of all output entity embeddings from each KG matrix (B x M x K x H)
"""
batch_size = sent_embedding.shape[0]
out = {DISAMBIG: {}}
# Create KG bias matrices for each kg bias key
kg_bias_norms = {}
for key in self.kg_bias_keys:
kg_bias_norms[key] = (
batch_on_the_fly_data[key]
.float()
.reshape(batch_size, self.M * self.K, self.M * self.K)
)
# get mention embedding
# average words in mention; batch x M x dim
mention_tensor_start = model_utils.select_alias_word_sent(
start_span_idx, sent_embedding
)
mention_tensor_end = model_utils.select_alias_word_sent(
end_span_idx, sent_embedding
)
mention_tensor = (mention_tensor_start + mention_tensor_end) / 2
# reshape for alias attention where each mention attends to its K candidates
# query = batch*M x 1 x dim, key = value = batch*M x K x dim
# softmax(QK^T) -> batch*M x 1 x K
# softmax(QK^T)V -> batch*M x 1 x dim
mention_tensor = mention_tensor.reshape(
batch_size * self.M, 1, self.hidden_size
).transpose(0, 1)
# get sentence embedding; move batch to middle
sent_tensor = sent_embedding.transpose(0, 1)
# Resize the alias embeddings and the entity mask from B x M x K x D -> B x (M*K) x D
entity_embedding = entity_embedding.contiguous().view(
batch_size, self.M * self.K, self.hidden_size
)
entity_embedding = entity_embedding.transpose(0, 1) # reshape for attention
key_padding_mask_entities = entity_embedding_mask.contiguous().view(
batch_size, self.M * self.K
)
key_padding_mask_entities_mention = entity_embedding_mask.contiguous().view(
batch_size * self.M, self.K
)
# Mask of aliases; key_padding_mask_entities_mention of True means mask.
# We want to find aliases with all masked entities
key_padding_mask_mentions = (
torch.sum(~key_padding_mask_entities_mention, dim=-1) == 0
)
# Unmask these aliases to avoid nan in attention
key_padding_mask_entities_mention[key_padding_mask_mentions] = False
# Iterate through stages
query_tensor = entity_embedding
for stage_index in range(self.num_model_stages):
# As we are adding a residual in the attention modules, we can make embs empty
embs = []
context_mat_dict = {}
key_tensor_mention = (
query_tensor.transpose(0, 1)
.contiguous()
.reshape(batch_size, self.M, self.K, self.hidden_size)
.reshape(batch_size * self.M, self.K, self.hidden_size)
.transpose(0, 1)
)
# ============================================================================
# Phrase module: compute attention between entities and words
# ============================================================================
word_entity_attn_context, word_entity_attn_weights = self.attention_modules[
f"stage_{stage_index}_entity_word"
](
q=query_tensor,
x=sent_tensor,
key_mask=sent_embedding_mask,
attn_mask=None,
)
# Add embeddings to be merged in the output
embs.append(word_entity_attn_context)
# Save the attention weights
self.attention_weights[
f"stage_{stage_index}_entity_word"
] = word_entity_attn_weights
# ============================================================================
# Co-occurrence module: compute self attention over entities
# ============================================================================
# Move entity mask to device
# TODO: move to device in init?
self.e2e_entity_mask = self.e2e_entity_mask.to(
key_padding_mask_entities.device
)
entity_attn_context, entity_attn_weights = self.attention_modules[
f"stage_{stage_index}_self_entity"
](
x=query_tensor,
key_mask=key_padding_mask_entities,
attn_mask=self.e2e_entity_mask,
)
# Mask out MxK of single aliases, alias_indices is batch x M, mask is true when single alias
non_null_aliases = (
self.K
- key_padding_mask_entities.reshape(batch_size, self.M, self.K).sum(-1)
) != 0
entity_attn_post_mask = (
(non_null_aliases.sum(1) == 1)
.unsqueeze(1)
.expand(batch_size, self.K * self.M)
.transpose(0, 1)
)
entity_attn_post_mask = entity_attn_post_mask.unsqueeze(-1).expand_as(
entity_attn_context
)
entity_attn_context = torch.where(
entity_attn_post_mask,
torch.zeros_like(entity_attn_context),
entity_attn_context,
)
# Add embeddings to be merged in the output
embs.append(entity_attn_context)
# Save the attention weights
self.attention_weights[
f"stage_{stage_index}_self_entity"
] = entity_attn_weights
# ============================================================================
# Mention module: compute attention between entities and mentions
# ============================================================================
# output is 1 x batch*M x dim
(
mention_entity_attn_context,
mention_entity_attn_weights,
) = self.attention_modules[f"stage_{stage_index}_mention_entity"](
q=mention_tensor,
x=key_tensor_mention,
key_mask=key_padding_mask_entities_mention,
attn_mask=None,
)
# key_padding_mask_mentions mentions have all padded candidates,
# meaning their row in the context matrix are all nan
mention_entity_attn_context[key_padding_mask_mentions.unsqueeze(0)] = 0
mention_entity_attn_context = (
mention_entity_attn_context.expand(
self.K, batch_size * self.M, self.hidden_size
)
.transpose(0, 1)
.reshape(batch_size, self.M * self.K, self.hidden_size)
.transpose(0, 1)
)
# Add embeddings to be merged in the output
embs.append(mention_entity_attn_context)
# Save the attention weights
self.attention_weights[
f"stage_{stage_index}_mention_entity"
] = mention_entity_attn_weights
# Combine module output
context_matrix_nokg = self.combine_modules[f"stage_{stage_index}_combine"](
embs
)
context_mat_dict[self.no_kg_key] = context_matrix_nokg.transpose(
0, 1
).reshape(batch_size, self.M, self.K, self.hidden_size)
# ============================================================================
# KG module: add in KG connectivity bias
# ============================================================================
for key in self.kg_bias_keys:
context_matrix_kg = torch.bmm(
kg_bias_norms[key], context_matrix_nokg.transpose(0, 1)
).transpose(0, 1)
context_matrix_kg = (context_matrix_nokg + context_matrix_kg) / 2
context_mat_dict[f"context_matrix_{key}"] = context_matrix_kg.transpose(
0, 1
).reshape(batch_size, self.M, self.K, self.hidden_size)
if stage_index < self.num_model_stages - 1:
score = model_utils.max_score_context_matrix(
context_mat_dict,
self.predict_layers[DISAMBIG][
bootleg.utils.model_utils.get_stage_head_name(stage_index)
],
)
out[DISAMBIG][
f"{bootleg.utils.model_utils.get_stage_head_name(stage_index)}"
] = score
# This will take the average of the context matrices that do not end in the key "_nokg";
# if there are not kg bias terms, it will select the context_matrix_nokg
# (as it's key, in this setting, will not end in _nokg)
query_tensor = (
model_utils.generate_final_context_matrix(
context_mat_dict, ending_key_to_exclude="_nokg"
)
.reshape(batch_size, self.M * self.K, self.hidden_size)
.transpose(0, 1)
)
return {
"intermed_scores": out,
"ent_embs": context_mat_dict,
"final_scores": None,
}
class BERTNED(AttnNetwork):
"""NED Baseline model using BERT.
Args:
args: args
entity_symbols: entity symbols
"""
def __init__(self, args, entity_symbols):
super(BERTNED, self).__init__(args, entity_symbols)
self.dropout = args.train_config.dropout
self.span_proj = MLP(
input_size=2 * BERT_WORD_DIM,
num_hidden_units=None,
output_size=self.hidden_size,
num_layers=1,
)
# Prediction layers
disambig_task = nn.ModuleDict()
disambig_task["final"] = MLP(
self.hidden_size, self.hidden_size, 1, self.num_fc_layers, self.dropout
)
self.predict_layers = {DISAMBIG: disambig_task}
self.predict_layers = nn.ModuleDict(self.predict_layers)
def forward(
self,
sent_embedding,
sent_embedding_mask,
entity_embedding,
entity_embedding_mask,
start_span_idx,
end_span_idx,
batch_on_the_fly_data,
):
"""Model forward.
Args:
sent_embedding: sentence embedding (B x N x L)
sent_embedding_mask: sentence embedding mask (B x N)
entity_embedding: entity embedding (B x M x K x H)
entity_embedding_mask: entity embedding mask (B x M x K)
start_span_idx: start mention index into sentence (B x M)
end_span_idx: end mention index into sentence (B x M)
batch_on_the_fly_data: batch on the fly dictionary with values (B x (M*K) x (M*K)) of KG adjacency matrices
Returns: Dict of Dict of intermediate output layer scores (will be empty for this model),
Output entity embeddings (B x M x K x H),
Candidate scores (B x M x K)
"""
out = {DISAMBIG: {}}
context_mat_dict = {}
batch_size, M, K, emb_dim = entity_embedding.shape
alias_start_idx_sent = start_span_idx
alias_end_idx_sent = end_span_idx
assert (
emb_dim == self.hidden_size
), f"BERT NED requires the learned entity embedding dim be the same as the hidden size"
assert alias_start_idx_sent.shape == alias_end_idx_sent.shape
# Get alias words from sent embedding then cat and proj
alias_start_word_tensor = model_utils.select_alias_word_sent(
alias_start_idx_sent, sent_embedding
)
alias_end_word_tensor = model_utils.select_alias_word_sent(
alias_end_idx_sent, sent_embedding
)
alias_pair_word_tensor = torch.cat(
[alias_start_word_tensor, alias_end_word_tensor], dim=-1
)
alias_emb = (
self.span_proj(alias_pair_word_tensor)
.unsqueeze(2)
.expand(batch_size, M, self.K, self.hidden_size)
)
alias_emb = (
alias_emb.contiguous()
.reshape((batch_size * M * self.K), self.hidden_size)
.unsqueeze(1)
)
# entity_embedding_mask: if I don't have 30 candidates, use a mask to fill the rest of the
# matrix for empty candidates
entity_embedding_zeroed = torch.where(
entity_embedding_mask.unsqueeze(-1),
torch.zeros_like(entity_embedding),
entity_embedding,
)
entity_embedding_tensor = (
entity_embedding_zeroed.contiguous()
.reshape((batch_size * M * self.K), self.hidden_size)
.unsqueeze(-1)
)
# Performs batch wise dot produce across each dim=0 dimension
score = (
torch.bmm(alias_emb, entity_embedding_tensor)
.unsqueeze(-1)
.reshape(batch_size, M, self.K)
)
context_mat_dict[DISAMBIG] = entity_embedding_tensor.reshape(
batch_size, M, self.K, self.hidden_size
)
return {
"intermed_scores": out,
"ent_embs": context_mat_dict,
"final_scores": score,
}
| [
"torch.zeros",
"torch.cat",
"torch.nn.Softmax",
"torch.nn.ModuleDict",
"torch.bmm",
"torch.tensor",
"torch.eye",
"torch.zeros_like",
"torch.sum"
] | 1.7.0 | Mehrad0711/bootleg | f812b6200eb84b0163d353f0d4f73308a921fcfa |
1.1 | """
@author: Patrik Purgai
@copyright: Copyright 2019, supervised-translation
@license: MIT
@email: [email protected]
@date: 2019.04.04.
"""
# pylint: disable=no-member
# pylint: disable=not-callable
import torch
import random
from torch.nn.modules import (
Module, ModuleList)
from torch.nn.functional import (
log_softmax, softmax, linear,
embedding)
from torch.nn import (
Linear, Softmax, Parameter,
GRU, Dropout, Embedding)
def setup_model_args(parser):
"""
Sets up the model arguments.
"""
parser.add_argument(
'--hidden_size',
type=int,
default=256,
help='Hidden size of the model.')
parser.add_argument(
'--embedding_size',
type=int,
default=128,
help='Embedding dimension for the tokens.')
def create_model(args, tokenizers, device):
"""
Creates the sequence to sequence model.
"""
source_tokenizer, target_tokenizer = tokenizers
special_ids = target_tokenizer.bos_id(), \
target_tokenizer.eos_id(), source_tokenizer.pad_id(), \
target_tokenizer.pad_id(), source_tokenizer.unk_id()
tensor_indices = [
torch.tensor(i).to(device) for i in special_ids]
model = Seq2Seq(
source_vocab_size=len(source_tokenizer),
target_vocab_size=len(target_tokenizer),
indices=tensor_indices,
**vars(args)).to(device)
return model
def neginf(dtype):
"""
Return a representable finite
number near -inf for a dtype.
"""
if dtype is torch.float16:
return -65504
else:
return -1e20
# NOTE currently unused function
def embeddeding_dropout(embed, inputs, training, mask=None, p=0.1):
"""
Applies dropout to the embedding layer based on
https://arxiv.org/pdf/1512.05287.pdf. The code is
based on salesforce/awd-lstm-lm.
"""
if not training:
masked_embed_weight = embed.weight
if mask is not None:
# masks might be provided, which is useful for shared
# dropout masks over the whole sequence of inputs
masked_embed_weight = mask * embed.weight
elif p:
mask = embed.weight.new_empty((embed.weight.size(0), 1))
mask.bernoulli_(1 - p).expand_as(embed.weight) / (1 - p)
masked_embed_weight = mask * embed.weight
else:
masked_embed_weight = embed.weight
return embedding(
inputs, masked_embed_weight, embed.padding_idx,
embed.max_norm, embed.norm_type,
embed.scale_grad_by_freq, embed.sparse)
class Seq2Seq(Module):
"""
The sequence-to-sequence model.
"""
def __init__(self, embedding_size, hidden_size, indices,
source_vocab_size, target_vocab_size, **kwargs):
super().__init__()
self.start_idx, self.end_idx, \
self.pad_idx, _, self.unk_idx = indices
self.encoder = Encoder(
input_size=embedding_size,
hidden_size=hidden_size,
pad_idx=self.pad_idx,
vocab_size=source_vocab_size)
self.decoder = Decoder(
input_size=embedding_size,
hidden_size=hidden_size,
vocab_size=target_vocab_size)
def forward(self, inputs, attn_mask=None, targets=None,
max_len=50):
"""
Runs the inputs through the encoder-decoder model.
"""
# inputs are expexted in sequence-first format
batch_size = inputs.size(0)
max_len = targets.size(1) if targets is not None \
else max_len
if attn_mask is None:
attn_mask = inputs.eq(self.pad_idx)
# the number of layers in the decoder must be equal
# to the number of layers in the encoder because of
# the initial hidden states from the encoder
encoder_outputs, hidden_states = self.encoder(inputs)
scores = []
preds = self.start_idx.detach().expand(batch_size, 1)
for idx in range(max_len):
# if targets are provided and training then apply
# teacher forcing 50% of the time
if targets is not None and self.training and \
random.random() > 0.5:
prev_output = targets[:, idx].unsqueeze(1)
else:
prev_output = preds[:, -1:]
step_scores, hidden_states = self.decoder(
inputs=prev_output,
encoder_outputs=encoder_outputs,
prev_hiddens=hidden_states,
attn_mask=attn_mask)
_, step_preds = step_scores.max(dim=-1)
preds = torch.cat([preds, step_preds], dim=-1)
scores.append(step_scores)
scores = torch.cat(scores, dim=1)
preds = preds.narrow(1, 1, preds.size(1) - 1)
return scores, preds
class Encoder(Module):
"""
Encoder module for the seq2seq model.
"""
def __init__(self, input_size, hidden_size, pad_idx,
vocab_size):
super().__init__()
self.embedding = Embedding(
num_embeddings=vocab_size,
embedding_dim=input_size,
padding_idx=pad_idx)
self.dropout = Dropout(p=0.1)
self.merge = Linear(
in_features=hidden_size * 2,
out_features=hidden_size,
bias=False)
# creating rnn layer as module list so locked
# dropout can be applied between each layer
# NOTE: currently not using weight drop, because
# it is incompatible with apex
self.rnn = ModuleList([
GRU(input_size=input_size,
hidden_size=hidden_size,
bidirectional=True,
batch_first=True)] + [
GRU(input_size=hidden_size,
hidden_size=hidden_size,
batch_first=True)
for _ in range(2)
])
def forward(self, inputs):
"""
Computes the embeddings and runs them through an RNN.
"""
embedded = self.embedding(inputs)
embedded = self.dropout(embedded)
outputs, hidden_state = self.rnn[0](embedded)
# merging the two directions of bidirectional layer
# by summing along the first axis
hidden_states = [hidden_state.sum(0, keepdim=True)]
outputs = self.merge(outputs)
for layer in self.rnn[1:]:
outputs, hidden_state = layer(outputs)
outputs = self.dropout(outputs)
hidden_states.append(hidden_state)
return outputs, hidden_states
class Decoder(Module):
"""
Decoder module for the seq2seq.
"""
def __init__(self, input_size, hidden_size, vocab_size):
super().__init__()
self.embedding = Embedding(
num_embeddings=vocab_size,
embedding_dim=input_size)
self.dropout = Dropout(p=0.1)
self.rnn = ModuleList([
GRU(input_size=input_size,
hidden_size=hidden_size,
batch_first=True)] + [
GRU(input_size=hidden_size,
hidden_size=hidden_size,
batch_first=True)
for _ in range(2)
])
self.attn = Attention(hidden_size=hidden_size)
self.out_bias = Parameter(torch.zeros((vocab_size, )))
self.out_weight = self.embedding.weight
def forward(self, inputs, encoder_outputs, prev_hiddens,
attn_mask=None, embed_mask=None):
"""
Applies decoding with attention mechanism, mixture
of sofmaxes and multi dropout during training.
MoS implementation is taken from
"""
embedded = self.embedding(inputs)
output = self.dropout(embedded)
hidden_states = []
for idx, layer in enumerate(self.rnn):
output, hidden_state = layer(
output, prev_hiddens[idx])
output = self.dropout(output)
hidden_states.append(hidden_state)
# NOTE attention weights are not used currently
# (they could be exported for visualization)
output, _ = self.attn(
decoder_output=output,
hidden_state=hidden_state,
encoder_outputs=encoder_outputs,
attn_mask=attn_mask)
logits = linear(
output, self.out_weight, self.out_bias)
log_probs = log_softmax(logits, dim=-1)
return log_probs, hidden_states
class Attention(Module):
"""
Luong style general attention from
https://arxiv.org/pdf/1508.04025.pdf.
"""
def __init__(self, hidden_size):
super().__init__()
self.project = Linear(
in_features=hidden_size,
out_features=hidden_size,
bias=False)
self.combine = Linear(
in_features=hidden_size * 2,
out_features=hidden_size,
bias=False)
def forward(self, decoder_output, hidden_state,
encoder_outputs, attn_mask=None):
"""
Applies attention by creating the weighted
context vector. Implementation is based on
`IBM/pytorch-seq2seq`.
"""
hidden_state = self.project(hidden_state)
hidden_state = hidden_state.transpose(0, 1)
encoder_outputs_t = encoder_outputs.transpose(1, 2)
attn_scores = torch.bmm(
hidden_state, encoder_outputs_t)
# applying mask on padded values of the input
# NOTE during beam search mask might not be provided
if attn_mask is not None:
attn_scores = attn_scores.squeeze(1)
attn_scores.masked_fill_(
attn_mask, neginf(attn_scores.dtype))
attn_scores = attn_scores.unsqueeze(1)
attn_weights = softmax(attn_scores, dim=-1)
attn_applied = torch.bmm(
attn_weights, encoder_outputs)
stacked = torch.cat(
[decoder_output, attn_applied], dim=-1)
outputs = self.combine(stacked)
return outputs, attn_weights
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.GRU",
"torch.bmm",
"torch.nn.functional.embedding",
"torch.nn.functional.log_softmax",
"torch.nn.functional.linear",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.nn.Embedding"
] | 1.1.0 | Mrpatekful/supervised-translation | d03db6a0fc25900fd42b8057a12adad0b8d025f8 |
1.5 | from typing import Iterable, Callable
import torch
from torch.optim import Optimizer
def compute_sam(group: dict, closure: Callable):
grads = []
params_with_grads = []
rho = group['rho']
# update internal_optim's learning rate
for p in group['params']:
if p.grad is not None:
# without clone().detach(), p.grad will be zeroed by closure()
grads.append(p.grad.clone().detach())
params_with_grads.append(p)
device = grads[0].device
# compute \hat{\epsilon}=\rho/\norm{g}\|g\|
grad_norm = torch.stack(
[g.detach().norm(2).to(device) for g in grads]).norm(2)
epsilon = grads # alias for readability
torch._foreach_mul_(epsilon, rho / grad_norm)
# virtual step toward \epsilon
torch._foreach_add_(params_with_grads, epsilon)
# compute g=\nabla_w L_B(w)|_{w+\hat{\epsilon}}
closure()
# virtual step back to the original point
torch._foreach_sub_(params_with_grads, epsilon)
| [
"torch._foreach_sub_",
"torch._foreach_add_",
"torch._foreach_mul_"
] | 1.5.0 | tourdeml/SAM | 08cb3cccb39157859a1c77ef1e1852120df4a790 |
1.9 | # -*- coding: utf-8 -*-
# pylint: disable=E1101
"""
Deprecated. Use named_interface.BVBQMixMVN.
Won't be documented due to this
"""
import torch
from . import utils
from . import bvbq
from . import distributions
from . import gp
from . import acquisition
from . import metrics
class BVBQMixMVN(object):
def __init__(self, eval_function, ndim):
self.set_eval_function(eval_function)
self.ndim = ndim
self.logprobgp = None
self.mixmeans = None
self.mixvars = None
self.mixweights = None
self.nmixtures = 0
def initialize_data(self, xdata, ydata, kind='smatern52',
noise=0.0, mean=-30.0, empirical_params=False,
**kwargs):
# TODO : Assertions, customizations and new policies
logprobgp = gp.SimpleGP(self.ndim, kind=kind,
noise=noise, zeromax=True)
logprobgp.mean = mean
logprobgp.fix_mean()
logprobgp.fix_noise()
logprobgp.set_data(xdata, ydata, empirical_params=empirical_params)
self.logprobgp = logprobgp
def initialize_components(self, init_policy='manual', **kwargs):
# TODO : Assertions, customization and new policies
assert init_policy in ['manual', 'manual_mix']
if init_policy == 'manual':
mean = kwargs.get('mean')
var = kwargs.get('var')
mixmeans = torch.atleast_2d(utils.tensor_convert(mean))
mixvars = torch.atleast_2d(utils.tensor_convert(var))
mixweights = torch.ones(1)
nmixtures = 1
elif init_policy == 'manual_mix':
nmixtures = mixmeans.shape[0]
self.mixmeans = mixmeans
self.mixvars = mixvars
self.mixweights = mixweights
self.nmixtures = nmixtures
def update_distribution(self):
#TODO : Customization
mean, var = bvbq.propose_component_mvn_mixmvn_relbo(
self.logprobgp,
self.mixmeans,
self.mixvars,
self.mixweights)
mixmeans, mixvars, mixweights = bvbq.update_distribution_mvn_mixmvn(
self.logprobgp,
mean, var,
self.mixmeans,
self.mixvars,
self.mixweights)
self.mixmeans = mixmeans
self.mixvars = mixvars
self.mixweights = mixweights
def update_evaluations(self, name='PP'):
x0 = self.distribution.sample(1)[0, :]
x = acquisition.acquire_next_point_mixmvn(x0,
self.logprobgp,
self.distribution,
name=name)
y = self.evaluate_single(x)
# FIXME: Fix this function
# self.logprobgp.update(x,y)
# FIXME : Substitute below lines for actual (fixed) efficient update above
X = torch.vstack([self.eval_points, x])
y = torch.vstack([self.eval_values, y])
self.logprobgp.set_data(X, y)
def evaluate_single(self, x):
return torch.squeeze(self.eval_function(x))
def fit_all_parameters(self):
#TODO : Customization
mixmeans, mixvars, mixweights = bvbq.fit_mixmvn_elbo(
self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)
self.mixmeans = mixmeans
self.mixvars = mixvars
self.mixweights = mixweights
def fit_all_weights(self):
#TODO : Customization
mixmeans, mixvars, mixweights = bvbq.reweight_mixmvn_elbo(
self.logprobgp, self.mixmeans, self.mixvars, self.mixweights)
self.mixmeans = mixmeans
self.mixvars = mixvars
self.mixweights = mixweights
def set_eval_function(self, eval_function):
self._eval_function = eval_function
self.eval_function = utils.numpy_to_torch_wrapper(eval_function)
def elbo_metric(self, nsamples=1000):
return metrics.bq_mixmvn_elbo_with_var(self.logprobgp,
self.mixmeans,
self.mixvars,
self.mixweights,
nsamples=nsamples)
def optimize_gp_params(self, *args, **kwargs):
baseopt = kwargs.get('baseopt', 'QN')
kwargs.pop('baseopt', None)
assert baseopt in ['QN', 'SGD']
if baseopt == 'QN':
return self.optimize_gp_params_qn(*args, **kwargs)
elif baseopt == 'SGD':
return self.optimize_gp_params_sgd(*args, **kwargs)
def suggest_initialization_points(self, n):
raise NotImplementedError
#return xdata
@property
def distribution(self):
return distributions.MixtureDiagonalNormalDistribution(
self.mixmeans, self.mixvars, self.mixweights)
# XXX: This actually performs computation
@property
def optimize_gp_params_qn(self):
return self.logprobgp.optimize_params_qn
@property
def optimize_gp_params_sgd(self):
return self.logprobgp.optimize_params_sgd
@property
def eval_points(self):
return self.logprobgp.X
@property
def eval_values(self):
return self.logprobgp.y
| [
"torch.vstack",
"torch.ones"
] | 1.9.0 | DFNaiff/BVBQ | 48f0eb624483f67b748d791efc0c06ddfb6e0646 |
1.2 | import torch
import torch.nn as nn
class LabelSmoothingLoss(nn.Module):
"""
Provides Label-Smoothing loss.
Args:
class_num (int): the number of classfication
ignore_index (int): Indexes that are ignored when calculating loss
smoothing (float): ratio of smoothing (confidence = 1.0 - smoothing)
dim (int): dimention of calculation loss
logit (torch.Tensor): probability distribution value from model and it has a logarithm shape
target (torch.Tensor): ground-thruth encoded to integers which directly point a word in label
Returns: label_smoothed
- **label_smoothed** (float): sum of loss
Reference:
https://github.com/pytorch/pytorch/issues/7455
"""
def __init__(self, class_num, ignore_index, smoothing=0.1, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.class_num = class_num
self.dim = dim
self.ignore_index = ignore_index
def forward(self, logit, target):
with torch.no_grad():
label_smoothed = torch.zeros_like(logit)
label_smoothed.fill_(self.smoothing / (self.class_num - 1))
label_smoothed.scatter_(1, target.data.unsqueeze(1), self.confidence)
label_smoothed[target == self.ignore_index, :] = 0
return torch.sum(-label_smoothed * logit) | [
"torch.zeros_like",
"torch.no_grad",
"torch.sum"
] | 1.2.0 | ngbsLab/Korean-Speech-Recognition | 3867bf7d23222da6812c9b98a93d3c6f7b3c80fc |
1.9 | import models.SingleLayer_net as single_layer
import loss_functions.rank_based_loss as rbl
# import wandb
import torch
import utils.data_functions as df
import os
import json
import pandas as pd
import csv
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# wandb.init(project='example')
exp_name = 'example'
# wandb.run.name = exp_name
standardized_data = True
save_training_embeddings_to_plot = True
shuffle = False
drop_last = False
experiments_folder ="./example_data"
initial_embeddings_path = os.path.join(experiments_folder, 'Normalized_VGGish_embeddings_based_on_Training_Set')
train_initial_embeddings_path = os.path.join(initial_embeddings_path, 'train')
val_initial_embeddings_path = os.path.join(initial_embeddings_path, 'val')
test_initial_embeddings_path = os.path.join(initial_embeddings_path, 'test')
results_folder = os.path.join(experiments_folder, "results_"+exp_name)
checkpoints_folder = os.path.join(results_folder, "checkpoints")
if not os.path.exists(checkpoints_folder):
os.makedirs(checkpoints_folder)
if save_training_embeddings_to_plot:
if not os.path.exists(os.path.join(checkpoints_folder, "Embeddings_plot")):
os.mkdir(os.path.join(checkpoints_folder, "Embeddings_plot"))
train_df = pd.read_csv(os.path.join(experiments_folder, 'train.csv'), dtype = str)
val_df = pd.read_csv(os.path.join(experiments_folder, 'val.csv'), dtype = str)
test_df = pd.read_csv(os.path.join(experiments_folder, 'test.csv'), dtype = str)
configs = {"EMBEDDINGS_SIZE" : 128,
"output_EMBEDDINGS_SIZE" :3,
"EARLY_STOPPING_PTC" : 20,
"LR" : 1e-5,
"BATCH_SIZE" : 12,
"n_epochs" : 100,
}
params = {'batch_size': configs["BATCH_SIZE"],'shuffle': shuffle, 'drop_last': drop_last}
training_set = df.RankBasedLossHierarchicalLabelsEmbeddings(train_df, train_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])
training_generator = torch.utils.data.DataLoader(training_set, **params)
len_train = len(training_set)
validation_set = df.RankBasedLossHierarchicalLabelsEmbeddings(val_df , val_initial_embeddings_path, target_labels='hierarchical_labels')#,'species','taxon'])
params_val = {'batch_size': configs["BATCH_SIZE"],'shuffle': False, 'drop_last': False}
validation_generator = torch.utils.data.DataLoader(validation_set, **params_val)
len_val = len(validation_set)
model =single_layer.SingleLayerHypersphereConstraint(configs)
# wandb.watch(model)
# wandb.config = configs
# wandb.config["architecture"] = "LinLayer_cosinedist"
# wandb.config["dataset"] = "TuT"
with open(os.path.join(results_folder, 'configs_dict'), "w") as c:
json.dump(configs, c)
checkpoint_name = rbl.train_RbL(model, training_generator, validation_generator,
checkpoints_folder, configs['EARLY_STOPPING_PTC'], save_training_embeddings_to_plot,
configs['n_epochs'], configs, distance='cosine',
number_of_ranks = 4)
print( "\nFinished training, will now use the checkpoint to generate embeddings for the test set:")
# Predict with checkpoint:
# if save_embeddings_to_plot:
if not os.path.exists(os.path.join(results_folder, "test_Embeddings_plot")):
os.mkdir(os.path.join(results_folder, "test_Embeddings_plot"))
test_set = df.RankBasedLossHierarchicalLabelsEmbeddings(test_df, test_initial_embeddings_path, target_labels = 'hierarchical_labels')
test_generator = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False)
len_test = len(test_set)
# load the checkpoint, configs and model
with open(os.path.join(results_folder, "configs_dict") )as c:
configs = json.load(c)
model=single_layer.SingleLayerHypersphereConstraint(configs)
model.load_state_dict(torch.load(checkpoint_name)["net_dict"])
sil_id, sil_species =rbl.predict(model, test_generator, configs, results_folder)
print("sil_fine level", sil_id)
print('sil_coarse level', sil_species)
with open(os.path.join(results_folder, 'silhouettes_on_test_set.csv'), 'w') as fout:
writer = csv.writer(fout)
writer.writerow(['sil_fine_level', str(sil_id)])
writer.writerow(['sil_coarse_level', str(sil_species)]) | [
"torch.utils.data.DataLoader",
"torch.load"
] | 1.9.1 | inesnolas/Rank-based-loss_ICASSP22 | 3ebe7345dc26b8fa74543725a51b43b7170c58cc |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This module implements loading meshes from glTF 2 assets stored in a
GLB container file or a glTF JSON file with embedded binary data.
It is experimental.
The module provides a MeshFormatInterpreter called
MeshGlbFormat which must be used explicitly.
e.g.
.. code-block:: python
from pytorch3d.io import IO
from pytorch3d.io.experimental_gltf_io import MeshGlbFormat
io = IO()
io.register_meshes_format(MeshGlbFormat())
io.load_mesh(...)
This implementation is quite restricted in what it supports.
- It does not try to validate the input against the standard.
- It loads the default scene only.
- Only triangulated geometry is supported.
- The geometry of all meshes of the entire scene is aggregated into a single mesh.
Use `load_meshes()` instead to get un-aggregated (but transformed) ones.
- All material properties are ignored except for either vertex color, baseColorTexture
or baseColorFactor. If available, one of these (in this order) is exclusively
used which does not match the semantics of the standard.
"""
import json
import struct
import warnings
from base64 import b64decode
from collections import deque
from enum import IntEnum
from io import BytesIO
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import torch
from iopath.common.file_io import PathManager
from PIL import Image
from pytorch3d.io.utils import PathOrStr, _open_file
from pytorch3d.renderer.mesh import TexturesBase, TexturesUV, TexturesVertex
from pytorch3d.structures import Meshes, join_meshes_as_scene
from pytorch3d.transforms import Transform3d, quaternion_to_matrix
from .pluggable_formats import MeshFormatInterpreter, endswith
_GLTF_MAGIC = 0x46546C67
_JSON_CHUNK_TYPE = 0x4E4F534A
_BINARY_CHUNK_TYPE = 0x004E4942
_DATA_URI_PREFIX = "data:application/octet-stream;base64,"
class _PrimitiveMode(IntEnum):
POINTS = 0
LINES = 1
LINE_LOOP = 2
LINE_STRIP = 3
TRIANGLES = 4
TRIANGLE_STRIP = 5
TRIANGLE_FAN = 6
class _ComponentType(IntEnum):
BYTE = 5120
UNSIGNED_BYTE = 5121
SHORT = 5122
UNSIGNED_SHORT = 5123
UNSIGNED_INT = 5125
FLOAT = 5126
_ITEM_TYPES: Dict[int, Any] = {
5120: np.int8,
5121: np.uint8,
5122: np.int16,
5123: np.uint16,
5125: np.uint32,
5126: np.float32,
}
_ElementShape = Union[Tuple[int], Tuple[int, int]]
_ELEMENT_SHAPES: Dict[str, _ElementShape] = {
"SCALAR": (1,),
"VEC2": (2,),
"VEC3": (3,),
"VEC4": (4,),
"MAT2": (2, 2),
"MAT3": (3, 3),
"MAT4": (4, 4),
}
def _read_header(stream: BinaryIO) -> Optional[Tuple[int, int]]:
header = stream.read(12)
magic, version, length = struct.unpack("<III", header)
if magic != _GLTF_MAGIC:
return None
return version, length
def _read_chunks(
stream: BinaryIO, length: int
) -> Optional[Tuple[Dict[str, Any], np.ndarray]]:
"""
Get the json header and the binary data from a
GLB file.
"""
json_data = None
binary_data = None
while stream.tell() < length:
chunk_header = stream.read(8)
chunk_length, chunk_type = struct.unpack("<II", chunk_header)
chunk_data = stream.read(chunk_length)
if chunk_type == _JSON_CHUNK_TYPE:
json_data = json.loads(chunk_data)
elif chunk_type == _BINARY_CHUNK_TYPE:
binary_data = chunk_data
else:
warnings.warn("Unsupported chunk type")
return None
if json_data is None:
raise ValueError("Missing json header")
if binary_data is not None:
binary_data = np.frombuffer(binary_data, dtype=np.uint8)
return json_data, binary_data
def _make_node_transform(node: Dict[str, Any]) -> Transform3d:
"""
Convert a transform from the json data in to a PyTorch3D
Transform3d format.
"""
array = node.get("matrix")
if array is not None: # Stored in column-major order
M = np.array(array, dtype=np.float32).reshape(4, 4, order="F")
return Transform3d(matrix=torch.from_numpy(M))
out = Transform3d()
# Given some of (scale/rotation/translation), we do them in that order to
# get points in to the world space.
# See https://github.com/KhronosGroup/glTF/issues/743 .
array = node.get("scale", None)
if array is not None:
scale_vector = torch.FloatTensor(array)
out = out.scale(scale_vector[None])
# Rotation quaternion (x, y, z, w) where w is the scalar
array = node.get("rotation", None)
if array is not None:
x, y, z, w = array
# We negate w. This is equivalent to inverting the rotation.
# This is needed as quaternion_to_matrix makes a matrix which
# operates on column vectors, whereas Transform3d wants a
# matrix which operates on row vectors.
rotation_quaternion = torch.FloatTensor([-w, x, y, z])
rotation_matrix = quaternion_to_matrix(rotation_quaternion)
out = out.rotate(R=rotation_matrix)
array = node.get("translation", None)
if array is not None:
translation_vector = torch.FloatTensor(array)
out = out.translate(x=translation_vector[None])
return out
class _GLTFLoader:
def __init__(self, stream: BinaryIO) -> None:
self._json_data = None
# Map from buffer index to (decoded) binary data
self._binary_data = {}
version_and_length = _read_header(stream)
if version_and_length is None: # GLTF
stream.seek(0)
json_data = json.load(stream)
else: # GLB
version, length = version_and_length
if version != 2:
warnings.warn("Unsupported version")
return
json_and_binary_data = _read_chunks(stream, length)
if json_and_binary_data is None:
raise ValueError("Data not found")
json_data, binary_data = json_and_binary_data
self._binary_data[0] = binary_data
self._json_data = json_data
self._accessors = json_data.get("accessors", [])
self._buffer_views = json_data.get("bufferViews", [])
self._buffers = json_data.get("buffers", [])
self._texture_map_images = {}
def _access_image(self, image_index: int) -> np.ndarray:
"""
Get the data for an image from the file. This is only called
by _get_texture_map_image which caches it.
"""
image_json = self._json_data["images"][image_index]
buffer_view = self._buffer_views[image_json["bufferView"]]
if "byteStride" in buffer_view:
raise NotImplementedError("strided buffer views")
length = buffer_view["byteLength"]
offset = buffer_view.get("byteOffset", 0)
binary_data = self.get_binary_data(buffer_view["buffer"])
bytesio = BytesIO(binary_data[offset : offset + length].tobytes())
# pyre-fixme[16]: `Image.Image` has no attribute `__enter__`.
with Image.open(bytesio) as f:
array = np.array(f)
if array.dtype == np.uint8:
return array.astype(np.float32) / 255.0
else:
return array
def _get_texture_map_image(self, image_index: int) -> torch.Tensor:
"""
Return a texture map image as a torch tensor.
Calling this function repeatedly with the same arguments returns
the very same tensor, this allows a memory optimization to happen
later in TexturesUV.join_scene.
Any alpha channel is ignored.
"""
im = self._texture_map_images.get(image_index)
if im is not None:
return im
im = torch.from_numpy(self._access_image(image_index))[:, :, :3]
self._texture_map_images[image_index] = im
return im
def _access_data(self, accessor_index: int) -> np.ndarray:
"""
Get the raw data from an accessor as a numpy array.
"""
accessor = self._accessors[accessor_index]
buffer_view_index = accessor.get("bufferView")
# Undefined buffer view (all zeros) are not (yet) supported
if buffer_view_index is None:
raise NotImplementedError("Undefined buffer view")
accessor_byte_offset = accessor.get("byteOffset", 0)
component_type = accessor["componentType"]
element_count = accessor["count"]
element_type = accessor["type"]
# Sparse accessors are not (yet) supported
if accessor.get("sparse") is not None:
raise NotImplementedError("Sparse Accessors")
buffer_view = self._buffer_views[buffer_view_index]
buffer_index = buffer_view["buffer"]
buffer_byte_length = buffer_view["byteLength"]
element_byte_offset = buffer_view.get("byteOffset", 0)
element_byte_stride = buffer_view.get("byteStride", 0)
if element_byte_stride != 0 and element_byte_stride < 4:
raise ValueError("Stride is too small.")
if element_byte_stride > 252:
raise ValueError("Stride is too big.")
element_shape = _ELEMENT_SHAPES[element_type]
item_type = _ITEM_TYPES[component_type]
item_dtype = np.dtype(item_type)
item_count = np.prod(element_shape)
item_size = item_dtype.itemsize
size = element_count * item_count * item_size
if size > buffer_byte_length:
raise ValueError("Buffer did not have enough data for the accessor")
buffer_ = self._buffers[buffer_index]
binary_data = self.get_binary_data(buffer_index)
if len(binary_data) < buffer_["byteLength"]:
raise ValueError("Not enough binary data for the buffer")
if element_byte_stride == 0:
element_byte_stride = item_size * item_count
# The same buffer can store interleaved elements
if element_byte_stride < item_size * item_count:
raise ValueError("Items should not overlap")
dtype = np.dtype(
{
"names": ["element"],
"formats": [str(element_shape) + item_dtype.str],
"offsets": [0],
"itemsize": element_byte_stride,
}
)
byte_offset = accessor_byte_offset + element_byte_offset
if byte_offset % item_size != 0:
raise ValueError("Misaligned data")
byte_length = element_count * element_byte_stride
buffer_view = binary_data[byte_offset : byte_offset + byte_length].view(dtype)[
"element"
]
# Convert matrix data from column-major (OpenGL) to row-major order
if element_type in ("MAT2", "MAT3", "MAT4"):
buffer_view = np.transpose(buffer_view, (0, 2, 1))
return buffer_view
def _get_primitive_attribute(
self, primitive_attributes: Dict[str, Any], key: str, dtype
) -> Optional[np.ndarray]:
accessor_index = primitive_attributes.get(key)
if accessor_index is None:
return None
primitive_attribute = self._access_data(accessor_index)
if key == "JOINTS_0":
pass
elif dtype == np.uint8:
primitive_attribute /= 255.0
elif dtype == np.uint16:
primitive_attribute /= 65535.0
else:
if dtype != np.float32:
raise ValueError("Unexpected data type")
primitive_attribute = primitive_attribute.astype(dtype)
return primitive_attribute
def get_binary_data(self, buffer_index: int):
"""
Get the binary data from a buffer as a 1D numpy array of bytes.
This is implemented for explicit uri data buffers or the main GLB data
segment.
"""
buffer_ = self._buffers[buffer_index]
binary_data = self._binary_data.get(buffer_index)
if binary_data is None: # Lazily decode binary data
uri = buffer_.get("uri")
if not uri.startswith(_DATA_URI_PREFIX):
raise NotImplementedError("Unexpected URI type")
binary_data = b64decode(uri[len(_DATA_URI_PREFIX) :])
binary_data = np.frombuffer(binary_data, dtype=np.uint8)
self._binary_data[buffer_index] = binary_data
return binary_data
def get_texture_for_mesh(
self, primitive: Dict[str, Any], indices: torch.Tensor
) -> Optional[TexturesBase]:
"""
Get the texture object representing the given mesh primitive.
Args:
primitive: the mesh primitive being loaded.
indices: the face indices of the mesh
"""
attributes = primitive["attributes"]
vertex_colors = self._get_primitive_attribute(attributes, "COLOR_0", np.float32)
if vertex_colors is not None:
return TexturesVertex(torch.from_numpy(vertex_colors))
vertex_texcoords_0 = self._get_primitive_attribute(
attributes, "TEXCOORD_0", np.float32
)
if vertex_texcoords_0 is not None:
verts_uvs = torch.from_numpy(vertex_texcoords_0)
verts_uvs[:, 1] = 1 - verts_uvs[:, -1]
faces_uvs = indices
material_index = primitive.get("material", 0)
material = self._json_data["materials"][material_index]
material_roughness = material["pbrMetallicRoughness"]
if "baseColorTexture" in material_roughness:
texture_index = material_roughness["baseColorTexture"]["index"]
texture_json = self._json_data["textures"][texture_index]
# Todo - include baseColorFactor when also given
# Todo - look at the sampler
image_index = texture_json["source"]
map = self._get_texture_map_image(image_index)
elif "baseColorFactor" in material_roughness:
# Constant color?
map = torch.FloatTensor(material_roughness["baseColorFactor"])[
None, None, :3
]
texture = TexturesUV(
# pyre-fixme[61]: `map` may not be initialized here.
maps=[map], # alpha channel ignored
faces_uvs=[faces_uvs],
verts_uvs=[verts_uvs],
)
return texture
return None
def load(self, include_textures: bool) -> List[Tuple[Optional[str], Meshes]]:
"""
Attempt to load all the meshes making up the default scene from
the file as a list of possibly-named Meshes objects.
Args:
include_textures: Whether to try loading textures.
Returns:
Meshes object containing one mesh.
"""
if self._json_data is None:
raise ValueError("Initialization problem")
# This loads the default scene from the file.
# This is usually the only one.
# It is possible to have multiple scenes, in which case
# you could choose another here instead of taking the default.
scene_index = self._json_data.get("scene")
if scene_index is None:
raise ValueError("Default scene is not specified.")
scene = self._json_data["scenes"][scene_index]
nodes = self._json_data.get("nodes", [])
meshes = self._json_data.get("meshes", [])
root_node_indices = scene["nodes"]
mesh_transform = Transform3d()
names_meshes_list: List[Tuple[Optional[str], Meshes]] = []
# Keep track and apply the transform of the scene node to mesh vertices
Q = deque([(Transform3d(), node_index) for node_index in root_node_indices])
while Q:
parent_transform, current_node_index = Q.popleft()
current_node = nodes[current_node_index]
transform = _make_node_transform(current_node)
current_transform = transform.compose(parent_transform)
if "mesh" in current_node:
mesh_index = current_node["mesh"]
mesh = meshes[mesh_index]
mesh_name = mesh.get("name", None)
mesh_transform = current_transform
for primitive in mesh["primitives"]:
attributes = primitive["attributes"]
accessor_index = attributes["POSITION"]
positions = torch.from_numpy(
self._access_data(accessor_index).copy()
)
positions = mesh_transform.transform_points(positions)
mode = primitive.get("mode", _PrimitiveMode.TRIANGLES)
if mode != _PrimitiveMode.TRIANGLES:
raise NotImplementedError("Non triangular meshes")
if "indices" in primitive:
accessor_index = primitive["indices"]
indices = self._access_data(accessor_index).astype(np.int64)
else:
indices = np.arange(0, len(positions), dtype=np.int64)
indices = torch.from_numpy(indices.reshape(-1, 3))
texture = None
if include_textures:
texture = self.get_texture_for_mesh(primitive, indices)
mesh_obj = Meshes(
verts=[positions], faces=[indices], textures=texture
)
names_meshes_list.append((mesh_name, mesh_obj))
if "children" in current_node:
children_node_indices = current_node["children"]
Q.extend(
[
(current_transform, node_index)
for node_index in children_node_indices
]
)
return names_meshes_list
def load_meshes(
path: PathOrStr,
path_manager: PathManager,
include_textures: bool = True,
) -> List[Tuple[Optional[str], Meshes]]:
"""
Loads all the meshes from the default scene in the given GLB file.
and returns them separately.
Args:
path: path to read from
path_manager: PathManager object for interpreting the path
include_textures: whether to load textures
Returns:
List of (name, mesh) pairs, where the name is the optional name property
from the GLB file, or None if it is absent, and the mesh is a Meshes
object containing one mesh.
"""
with _open_file(path, path_manager, "rb") as f:
loader = _GLTFLoader(cast(BinaryIO, f))
names_meshes_list = loader.load(include_textures=include_textures)
return names_meshes_list
class MeshGlbFormat(MeshFormatInterpreter):
"""
Implements loading meshes from glTF 2 assets stored in a
GLB container file or a glTF JSON file with embedded binary data.
This implementation is quite restricted in what it supports.
- It does not try to validate the input against the standard.
- It loads the default scene only.
- Only triangulated geometry is supported.
- The geometry of all meshes of the entire scene is aggregated into a single mesh.
Use `load_meshes()` instead to get un-aggregated (but transformed) ones.
- All material properties are ignored except for either vertex color, baseColorTexture
or baseColorFactor. If available, one of these (in this order) is exclusively
used which does not match the semantics of the standard.
"""
def __init__(self) -> None:
self.known_suffixes = (".glb",)
def read(
self,
path: PathOrStr,
include_textures: bool,
device,
path_manager: PathManager,
**kwargs,
) -> Optional[Meshes]:
if not endswith(path, self.known_suffixes):
return None
names_meshes_list = load_meshes(
path=path,
path_manager=path_manager,
include_textures=include_textures,
)
meshes_list = [mesh for name, mesh in names_meshes_list]
mesh = join_meshes_as_scene(meshes_list)
return mesh.to(device)
def save(
self,
data: Meshes,
path: PathOrStr,
path_manager: PathManager,
binary: Optional[bool],
**kwargs,
) -> bool:
return False
| [
"torch.FloatTensor",
"torch.from_numpy"
] | 3 | fabibo3/pytorch3d | 36b7656753ae759aed2eb7ffb432b6eca4d42fe2 |
1.7 | import torch
from torch import Tensor
from kge import Config, Dataset
from kge.model.kge_model import KgeModel
import json
import os
import numpy as np
import time
class hmcn_model(KgeModel):
"""
Implements hierarchical Multi-Label classification Network as defined in Wehrmann et al. (2018)
Codes are adapted from:
https://github.com/Tencent/NeuralNLP-NeuralClassifier/blob/master/model/classification/hmcn.py
"""
def __init__(
self,
config: Config,
dataset: Dataset,
configuration_key=None,
init_for_load_only=False,
):
self._init_configuration(config, configuration_key)
# Initialize embedding model
embedding_model = KgeModel.create(
config=config,
dataset=dataset,
configuration_key=self.configuration_key + ".embedding_model",
init_for_load_only=init_for_load_only,
)
# Initialize this model
super().__init__(
config=config,
dataset=dataset,
scorer=embedding_model.get_scorer(),
create_embedders=False,
init_for_load_only=init_for_load_only,
)
self._embedding_model = embedding_model
types_path = self.config.get('hmcn_model.types_path')
y, idx, pos_weights, hier_tuple_ids, hier, hierarchical_depth, global2local, hierarchy_classes\
= self.load_types(types_dataset_path=types_path, num_entities=dataset.num_entities())
self.types = y
self.type_ids = idx
self.hier_tuple_ids = hier_tuple_ids
self.hier = hier
self.pos_weights = pos_weights
#HMCN setup
self.hierarchical_depth = hierarchical_depth
self.hierarchical_class = hierarchy_classes
self.global2local = global2local
hidden_dimension = self._embedding_model.get_s_embedder().dim
#predictions
self.beta = self.config.get("hmcn_model.beta")
self.p = self.config.get("hmcn_model.hiddenlayer_dropout")
self.lamb = torch.Tensor([self.config.get("hmcn_model.lambda")])
# Setup HMCN model according to Wehrmann et al. (2018)
# Code adapted from
# https://github.com/Tencent/NeuralNLP-NeuralClassifier/blob/master/model/classification/hmcn.py
self.local_layers = torch.nn.ModuleList()
self.global_layers = torch.nn.ModuleList()
for i in range(1, len(self.hierarchical_depth)):
self.global_layers.append(
torch.nn.Sequential(
torch.nn.Linear(hidden_dimension + self.hierarchical_depth[i - 1], self.hierarchical_depth[i]),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(self.hierarchical_depth[i]),
torch.nn.Dropout(p=0.5)
))
self.local_layers.append(
torch.nn.Sequential(
torch.nn.Linear(self.hierarchical_depth[i], self.global2local[i]),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(self.global2local[i]),
torch.nn.Linear(self.global2local[i], self.hierarchical_class[i])
))
self.global_layers.apply(self._init_weight)
self.local_layers.apply(self._init_weight)
self.linear = torch.nn.Linear(self.hierarchical_depth[-1], len(hier_tuple_ids))
self.linear.apply(self._init_weight)
self.dropout = torch.nn.Dropout(p=self.p)
def prepare_job(self, job, **kwargs):
self._embedding_model.prepare_job(job, **kwargs)
def penalty(self, **kwargs):
''' penalty calculated in training as it depends on confidence estimates '''
penalties = self._embedding_model.penalty(**kwargs)
return penalties
def get_lambda(self):
return self.lamb
def get_tuple_ids(self):
return self.hier_tuple_ids
def get_train_mask(self, idx):
return self.train_mask[idx]
# pass embedding methods down to wrapped embedder
def get_s_embedder(self):
return self._embedding_model.get_s_embedder()
def get_o_embedder(self):
return self._embedding_model.get_o_embedder()
def get_p_embedder(self):
return self._embedding_model.get_p_embedder()
def get_scorer(self):
return self._embedding_model.get_scorer()
def score_spo(self, s, p, o, direction=None):
return self._embedding_model.score_spo(s, p, o, direction)
def score_po(self, p, o, s=None):
return self._embedding_model.score_po(p, o, s)
def score_so(self, s, o, p=None):
return self._embedding_model.score_so(s, o, p)
def score_sp_po(self, s, p, o, entity_subset=None):
return self._embedding_model.score_sp_po(s, p, o, entity_subset)
# mimics forward
def predict_all(self, idx, device):
entity_embeddings = self._embedding_model.get_s_embedder().to(device).embed(indexes=idx)
local_layer_outputs = []
global_layer_activation = entity_embeddings
#batch_size = len(idx)
for i, (local_layer, global_layer) in enumerate(zip(self.local_layers, self.global_layers)):
local_layer_activation = global_layer(global_layer_activation)
local_layer_outputs.append(local_layer(local_layer_activation))
if i < len(self.global_layers) - 1:
global_layer_activation = torch.cat((local_layer_activation, entity_embeddings), 1)
else:
global_layer_activation = local_layer_activation
global_layer_output = self.linear(global_layer_activation)
local_layer_output = torch.cat(local_layer_outputs, 1)
probits = self.beta * torch.sigmoid(local_layer_output) + (1 - self.beta) * torch.sigmoid(global_layer_output)
return global_layer_output, local_layer_output, probits
# function to zero all child class predictions, that dont have the relative parent type assigned
# type = 'proba' used for violation calculation by lagging parent confidence to child
# type = 'binbary' used to ensure hierarchy consistent prediction
def build_mask(self, y, type='binary', device=None):
mask = []
y_parent = {}
# Assume root type is predicted for all instances
for root_type in self.hier[1].keys():
if type == 'binary':
y_parent[(1, root_type)] = torch.ones(len(y), dtype=torch.int).to(device)
else:
y_parent[(1, root_type)] = torch.ones(len(y), dtype=torch.float).to(device)
for hier_tuple, tuple_id in self.hier_tuple_ids.items():
mask.append(y_parent[hier_tuple])
type_level, type_id = hier_tuple
for child in self.hier[type_level][type_id]:
child_tuple = (type_level + 1, child)
if child_tuple not in y_parent:
y_parent[child_tuple] = y[:, tuple_id]
# DAG!
else:
if type == 'binary':
# Tie handling when both parent cast predictions: logical or
y_parent[child_tuple] = torch.logical_or(y_parent[child_tuple], y[:, tuple_id]).int()
else:
# Tie Handling use maximum confidence of parent predictions
y_parent[child_tuple] = torch.max(y_parent[child_tuple], y[:, tuple_id]).float()
return torch.stack(mask).transpose(0, 1).float()
def load_types(self, types_dataset_path, num_entities):
"""
@param types_dataset_path: Path to type dataset. Requires hier.json, train.del, valid.del and test.del.
@param num_entities: Number of unique entities in the KG.
@return:
y: Binary map of types with shape (num_entities, num_types-1). Root type not considered.
idx: dict with keys ['train', 'valid', 'test'] containing respective entity ids.
pos_weights: positive weights of class computed from training split for weighted bce_with_logits_loss.
hier_tuple_ids: dict with keys [(level, type_id)] for mapping y to type id
hierarchical_depth: number of ReLU neurons per hierarchy level: 384.
global2local: local ReLU neurons. same as hierarchy_classes.
hierarchy_classes: Number of classes per hierarchy level. root class excluded.
"""
# load the hierarchy to receive type information
hier_path = os.path.join(types_dataset_path, 'hier.json')
with open(hier_path, 'r') as json_file:
hier = json.load(json_file)
# reshape hierarchy and build binary type map (usefull to map predictions to type_ids)
# build required shapes for HMCN
# ReLU neurons set to 384 per level see Wehrmann et al (2018)
hier_t, train_freq, hier_tuple_ids = {}, [], {}
for hier_level, parents in hier.items():
hier_t[int(hier_level)] = {}
if int(hier_level) == 0:
#no prediction for level 0
hierarchical_depth = [0] # Global ReLU neurons
global2local = [0] # Local transfer neurons
hierarchy_classes = [0] #number of classes per level
continue
else:
hierarchical_depth.append(384) # Global ReLU neurons
global2local.append(len(parents)) # Local transfer neurons
hierarchy_classes.append(len(parents)) # number of classes per level
for level_type in parents:
hier_t[int(hier_level)][int(level_type)] = hier[hier_level][level_type].copy()
if (int(hier_level), int(level_type)) not in hier_tuple_ids:
hier_tuple_ids[(int(hier_level), int(level_type))] = len(hier_tuple_ids)
train_freq.append(0)
hier = hier_t
# build type maps keeping track of ids of respective split
type_idx = {}
y = np.zeros((num_entities, len(hier_tuple_ids)))
# load types
for split in ['train', 'valid', 'test']:
idx = []
types_path = os.path.join(types_dataset_path, split + '.del')
with open(types_path, 'r') as file:
for line in file:
entity_id, type_list = line.split("\t", maxsplit=1)
type_list = type_list.rstrip("\n")
# iterate through hierarchical type structure
for level, level_types in enumerate(json.loads(type_list)):
if level == 0:
continue
for type_id in level_types:
bin_type_id = hier_tuple_ids[(level, int(type_id))]
y[int(entity_id), bin_type_id] = 1
if split == 'train':
train_freq[bin_type_id] += 1
idx.append(int(entity_id))
type_idx[split] = idx.copy()
# compute weights for loss function
pos_weights = []
for class_count in train_freq:
if class_count == 0:
pos_weight = len(type_idx['train'])
else:
neg_count = len(type_idx['train']) - class_count
pos_weight = neg_count / class_count
pos_weights.append(pos_weight)
# create output numpy arrays and tensors
y = torch.from_numpy(y)
pos_weights = torch.from_numpy(np.array(pos_weights))
idx = {split: torch.from_numpy(np.array(entity_ids)) for split, entity_ids in type_idx.items()}
return y, idx, pos_weights, hier_tuple_ids, hier, hierarchical_depth, global2local, hierarchy_classes
def _init_weight(self, m):
if isinstance(m, torch.nn.Linear):
torch.nn.init.normal_(m.weight, std=0.1) | [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.Dropout",
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.max",
"torch.from_numpy",
"torch.nn.ReLU",
"torch.nn.init.normal_",
"torch.nn.BatchNorm1d",
"torch.logical_or"
] | 1.7.1 | LukasDegitz/kge | 0ff3d2623d520e0634374e81d4184d525c189a25 |
1.4 | # -*- coding: utf-8 -*
# --------------------------------------------------------
# SNNformer Feature Extractor (SFE) - SNN branch
# --------------------------------------------------------
import torch.nn as nn
import torch
from videoanalyst.model.backbone.backbone_base import (TRACK_BACKBONES,
VOS_BACKBONES)
from videoanalyst.model.common_opr.common_block import conv_bn_relu
from videoanalyst.model.module_base import ModuleBase
thresh_bais = 0.3
# thresh = 0.3 # neuronal threshold
lens = 0.5 # hyper-parameters of approximate function
decay = 0.2 # decay constants
global thresh
class SpatialGroupEnhance(nn.Module):
""" Dynamic Spiking Threshold from spatial features"""
def __init__(self):
super(SpatialGroupEnhance, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.weight = nn.Parameter(torch.zeros(1, 1, 1, 1))
self.bias = nn.Parameter(torch.ones(1, 1, 1, 1))
self.sig = nn.Sigmoid()
def forward(self, x): # (b, c, h, w)
b, c, h, w = x.size()
xn = x * self.avg_pool(x)
xn = xn.mean(dim=1, keepdim=True)
entro = torch.mean(xn, dim=0).squeeze()
h,w = entro.size()
entro = entro.view(-1)
max = torch.max(entro)
min = torch.min(entro)
entro = (entro - min) / (max-min) * 255
his = torch.histc(entro, bins=256, min=0, max=255) / (h*w)
entro_final = torch.sum(his * -torch.log(his + 0.00000001))
entro_final = entro_final / torch.count_nonzero(his)
x = self.sig(xn)
x = torch.mean(x)
return x + entro_final*10
class ActFun(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.gt(thresh).float()
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad_input = grad_output.clone()
temp = abs(input - thresh) < lens
return grad_input * temp.float()
act_fun = ActFun.apply
# membrane potential update
def mem_update(ops, x, mem, spike):
mem = mem * decay * (1. - spike) + ops(x)
spike = act_fun(mem) # act_fun : approximation firing function
return mem, spike
cfg_cnn = [(6, 64, 2, 0, 11),
(64, 128, 2, 0, 9),
(128, 256, 2, 0, 5),
(64, 128, 1, 1, 3),
(128, 256, 1, 1, 3)]
# kernel size
cfg_kernel = [147, 70, 33, 31, 31]
cfg_kernel_first = [59, 26, 11, 15, 15]
# fc layer
cfg_fc = [128, 10]
@VOS_BACKBONES.register
@TRACK_BACKBONES.register
class SNN3(ModuleBase):
r"""
SNN branch
Hyper-parameters
----------------
pretrain_model_path: string
Path to pretrained backbone parameter file,
Parameter to be loaded in _update_params_
"""
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self):
super(SNN3, self).__init__()
cfg_cnn = [(3, 64, 2, 0, 11),
(64, 128, 2, 0, 9),
(128, 256, 2, 0, 5),
(64, 128, 1, 1, 3),
(128, 256, 1, 1, 3)]
# kernel size
cfg_kernel = [147, 70, 33, 31, 31]
cfg_kernel_first = [59, 26, 11, 15, 15]
in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[0]
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)
in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[1]
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)
in_planes, out_planes, stride, padding, kernel_size = cfg_cnn[2]
self.conv3 = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn_tem = nn.BatchNorm2d(256)
self.relu_tem = nn.ReLU()
self.fuse_snn_transfor = nn.Conv2d(out_planes*2, out_planes, kernel_size=1, stride=1, padding=0)
self.thre_w = SpatialGroupEnhance()
self.conv33_11 = nn.Conv2d(256, 256, kernel_size=13, stride=2, padding=0)
self.bn_spa = nn.BatchNorm2d(256)
self.relu_spa = nn.ReLU()
def forward(self, input_pos, input_neg, trans_snn, transformer_sig, transformer_fea, first_seq):
global thresh
if transformer_fea is None:
thresh = 0.3
else:
thresh = self.thre_w(transformer_fea) * thresh_bais
if first_seq:
time_window = len(input_pos)
tem_c3m = 0
for step in range(time_window):
x_pos = input_pos[step]
x_neg = input_neg[step]
x = torch.where(x_pos > x_neg, x_pos, x_neg)
c1_mem, c1_spike = mem_update(self.conv1, x.float(), trans_snn[0], trans_snn[1])
c2_mem, c2_spike = mem_update(self.conv2, c1_spike, trans_snn[2], trans_snn[3])
c3_mem, c3_spike = mem_update(self.conv3, c2_spike, trans_snn[4], trans_snn[5])
trans_snn = [c1_mem, c1_spike, c2_mem, c2_spike, c3_mem, c3_spike]
tem_c3m = tem_c3m + c3_mem
tem_fea = tem_c3m / time_window
tem_fea = self.relu_tem(self.bn_tem(tem_fea))
spa_fea = self.relu_spa(self.bn_spa(self.conv33_11(transformer_fea)))
return tem_fea, spa_fea, trans_snn
else:
time_window = len(input_pos)
tem_c3m = 0
for step in range(time_window):
x_pos = input_pos[step]
x_neg = input_neg[step]
x = torch.where(x_pos > x_neg, x_pos, x_neg)
c1_mem, c1_spike = mem_update(self.conv1, x.float(), trans_snn[0], trans_snn[1])
c2_mem, c2_spike = mem_update(self.conv2, c1_spike, trans_snn[2], trans_snn[3])
c3_mem, c3_spike = mem_update(self.conv3, c2_spike, trans_snn[4], trans_snn[5])
trans_snn = [c1_mem, c1_spike, c2_mem, c2_spike, c3_mem, c3_spike]
tem_c3m = tem_c3m + c3_mem
tem_fea = tem_c3m / time_window
tem_fea = self.relu_tem(self.bn_tem(tem_fea))
spa_fea = transformer_fea
return tem_fea, spa_fea, trans_snn
| [
"torch.zeros",
"torch.min",
"torch.max",
"torch.nn.Sigmoid",
"torch.histc",
"torch.nn.BatchNorm2d",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.count_nonzero",
"torch.nn.AdaptiveAvgPool2d",
"torch.log",
"torch.mean",
"torch.where"
] | 1.4.0 | Jee-King/STNet | 221ab60c4fccfce5a03e8878fb168e0baa7152f4 |
1.4 | # -*- coding: utf-8 -*
import torch.nn as nn
from videoanalyst.model.backbone.backbone_base import (TRACK_BACKBONES,
VOS_BACKBONES)
from videoanalyst.model.common_opr.common_block import conv_bn_relu
from videoanalyst.model.module_base import ModuleBase
@VOS_BACKBONES.register
@TRACK_BACKBONES.register
class AlexNet(ModuleBase):
r"""
AlexNet
Hyper-parameters
----------------
pretrain_model_path: string
Path to pretrained backbone parameter file,
Parameter to be loaded in _update_params_
"""
default_hyper_params = {"pretrain_model_path": ""}
def __init__(self):
super(AlexNet, self).__init__()
self.conv1 = conv_bn_relu(3, 96, stride=2, kszie=11, pad=0)
self.pool1 = nn.MaxPool2d(3, 2, 0, ceil_mode=True)
self.conv2 = conv_bn_relu(96, 256, 1, 5, 0)
self.pool2 = nn.MaxPool2d(3, 2, 0, ceil_mode=True)
self.conv3 = conv_bn_relu(256, 384, 1, 3, 0)
self.conv4 = conv_bn_relu(384, 384, 1, 3, 0)
self.conv5 = conv_bn_relu(384, 256, 1, 3, 0, has_relu=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
| [
"torch.nn.MaxPool2d"
] | 1.4.0 | Jee-King/STNet | 221ab60c4fccfce5a03e8878fb168e0baa7152f4 |
1.2 | import math
import os
import signal
import sys
from typing import Callable, Any, Tuple, List, Union, Optional
import ptan
import torch
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch import multiprocessing
from torch.optim import Adam, SGD
import autograph.lib.envs.mazeenv
from autograph.lib.automata import AutomatonSet
from autograph.lib.envs.mazeenv import FuelMazeEnv, FuelMazeObservation
from autograph.lib.envs.mazeenv import transform_coordinate
from autograph.lib.envs.mineworldenv_adv import MineWorldEnv
from autograph.lib.loss_functions import TakeSimilarActionsLossFunction, PPOLossFunction, \
AdvantageActorCriticLossFunction
from autograph.lib.mcts_aut_adv import MCTSAut_adv, AutStats, ExponentialAnnealedAutStats, UCBAnnealedAutStats
from autograph.lib.running import get_parallel_queue, RandomReplayTrainingLoop, run_episode_generic
from autograph.lib.shaping import AutShapingWrapperAdv
from autograph.lib.util import element_add
from autograph.lib.util.checkpoint_manager import CheckpointManager, StateDictLoadHandler, CombinedLoadHandler, \
InitZeroLoadHandler, PickleLoadHandler, TransplantCheckpointManager
from autograph.lib.util.trace_return_step import TraceStep, TraceReturnStep
from autograph.net.curiosity.curiosity_optimizer import ModuleCuriosityOptimizer, NoopCuriosityOptimizer
from autograph.net.maze_constructors import mazenet_v1, mazernd_v1, maze_obs_rewrite_creator
from autograph.net.mine_constructors import minenet_v1, mine_obs_rewriter_creator, minernd_v1, mine_mazenet_v1
from autograph.net.misc_constructors import gym_make, no_op_cur_make, basic_net, no_op_make
import random
math.sqrt(1) # So that the import isn't optimized away (very useful when setting conditional debug breakpoints)
sys.modules["autograph.lib.mazeenv"] = autograph.lib.envs.mazeenv # Fix broken pickle loading
def throwKeyInterr():
raise KeyboardInterrupt()
def full_fuel(action, obs: FuelMazeObservation, rew, done, info):
return obs.fuel_level == info["max_fuel"]
def key(action, obs: FuelMazeObservation, rew, done, info):
return len(obs.keys) == 0
def goal(action, obs: FuelMazeObservation, rew, done, info):
corner = element_add(info["maze_shape"], (-1, -1))
trans_corner = transform_coordinate(corner)
return obs.position == trans_corner
class MineInfoAutAP:
def __init__(self, apname: str = None, ap_name: str = None):
if not (apname or ap_name):
raise ValueError("Did not provide ap_name to info aut")
self.name = apname or ap_name
def __call__(self, action, obs, rew, done, info):
return self.name in info["atomic_propositions"]
class MineInventoryAP:
def __init__(self, inventory_item, quantity):
self.item = inventory_item
self.quantity = quantity
def __call__(self, action, obs, rew, done, info):
return info["inventory"][self.item] == self.quantity
class MineLocationAP:
def __init__(self, location):
self.location = tuple(location)
def __call__(self, action, obs, rew, done, info):
position, *_ = obs
return position == self.location
optimizers = {
"Adam": Adam,
"SGD": SGD
}
aut_funcs = {
"full_fuel": full_fuel,
"key": key,
"goal": goal,
"info_aut": MineInfoAutAP,
"mine_inventory": MineInventoryAP,
"mine_location": MineLocationAP
}
env_constructors = {
"minecraft": MineWorldEnv.from_dict,
"maze": FuelMazeEnv.from_dict,
"gym": gym_make
}
def no_op_rewriter(x):
return torch.Tensor([0.0])
training_nets = {
"mazenet_v1": (mazenet_v1, maze_obs_rewrite_creator),
"minenet_v1": (minenet_v1, mine_obs_rewriter_creator),
"mine_mazenet_v1": (mine_mazenet_v1, mine_obs_rewriter_creator),
"basicnet": (basic_net, lambda e: torch.Tensor),
"no-op": (no_op_make, lambda e: no_op_rewriter)
}
curiosity_nets = {
"mazernd_v1": (mazernd_v1, maze_obs_rewrite_creator),
"minernd_v1": (minernd_v1, mine_obs_rewriter_creator),
"no-op": (no_op_cur_make, no_op_rewriter)
}
loss_funcs = {
"MCTS": TakeSimilarActionsLossFunction,
"PPO": PPOLossFunction,
"A2C": AdvantageActorCriticLossFunction
}
aut_transplant_anneals = {
"Exponential": ExponentialAnnealedAutStats,
"UCB": UCBAnnealedAutStats
}
if __name__ == '__main__':
import argparse
import json5 as json
p = argparse.ArgumentParser()
p.add_argument("config")
p.add_argument("--device", default=("cuda:0" if torch.cuda.is_available() else "cpu"))
p.add_argument("--log")
p.add_argument("--checkpoint")
p.add_argument("--run-name")
p.add_argument("--do-not-load-from-checkpoint", dest="load_checkpoint", action="store_false")
p.add_argument("--do-not-save-checkpoint", dest="save_checkpoint", action="store_false")
p.add_argument("--checkpoint-every", default=1)
p.add_argument("--workers", default=8)
p.add_argument("--post", help="Add a postfix to the checkpoint and tensorboard names")
p.add_argument("--stop-after", dest="stop_after",
help="Stop after roughly a certain number of steps have been reached")
args = vars(p.parse_args())
run_name = args.get("run_name")
STOP_AFTER = args.get("stop_after")
if STOP_AFTER:
STOP_AFTER = int(STOP_AFTER)
def interpolate(text):
if not text:
return text
if run_name and "%s" in text:
return text % (run_name,)
else:
return text
config_file = interpolate(args["config"])
postfix = ""
if args.get("post"):
postfix = "_" + args["post"]
with open(config_file) as f:
config = json.load(f)
aut: dict = config["automaton"]
LTLF_SPEC = aut["spec"]
AUT_PARAM_NAMES = [param["name"] for param in aut["params"]]
def get_func(param: dict):
func_or_generator = aut_funcs[param["func"]]
func_params = param.get("params")
if func_params is None:
return func_or_generator
else:
return func_or_generator(**func_params)
AUT_PARAM_FUNCS = [get_func(p) for p in aut["params"]]
AUT_OTHER_PARAMS = {
"terminate_on_fail": aut.get("terminate_on_fail", True),
"termination_fail_reward": aut.get("termination_fail_reward", 0),
"terminate_on_accept": aut.get("terminate_on_accept", False),
"termination_accept_reward": aut.get("termination_accept_reward", 1)
}
AUT_STATS_PARAMS = aut.get("aut_stats_params", dict())
DISCOUNT = config["discount"]
if "maze" in config:
maze = config["maze"]
config["env"] = dict()
config["env"]["type"] = "maze"
config["env"]["max_episode_len"] = maze["max_episode_len"]
del maze["max_episode_len"]
config["env"]["params"] = maze
del config["maze"]
env = config["env"]
MAX_EPISODE_LEN = env["max_episode_len"]
MAX_LEN_REWARD = env.get("max_len_reward")
ENV_CONFIG = env["params"]
ENV_TYPE = env["type"]
# Policy training hyperparameters
training: dict = config["training"]
LEARNING_RATE = training["learning_rate"]
REPLAY_BUFFER = training["replay_buffer"]
MIN_TRACE_TO_TRAIN = training["min_trace_to_train"]
PPO_TRAIN_ROUNDS = training["train_rounds"]
NETWORK = training.get("network", "mazenet_v1")
NETWORK_PARAMS = training.get("params", dict())
OPTIMIZER = optimizers[training.get("optimizer")]
OPTIMIZER_PARAMS = training.get("opt_params", {})
# Loss function
loss: dict = config.get("loss")
if loss:
LOSS_FUNC = loss["type"]
LOSS_PARAMS = loss.get("params", dict())
else:
LOSS_FUNC = "MCTS"
LOSS_PARAMS = dict()
if config.get("mcts"):
config["episode_runner"] = {
"type": "mcts_aut_episode",
"params": config.pop("mcts")
}
# Policy runner parameters
episode_runner = config["episode_runner"]
EPISODE_RUNNER_TYPE = episode_runner["type"]
EPISODE_RUNNER_PARAMS = episode_runner.get("params", dict())
# Curiosity Parameters
curiosity: dict = config.get("curiosity")
if curiosity:
if "feature_space" in curiosity:
curiosity["type"] = "mazernd_v1"
curiosity["params"] = {"feature_space": curiosity["feature_space"]}
del curiosity["feature_space"]
CURIOSITY_LEARNING_RATE = curiosity["learning_rate"]
CURIOSITY_NET = curiosity["type"]
CURIOSITY_PARAMS = curiosity.get("params", dict())
else:
CURIOSITY_NET = None
# Logging and checkpointing
LOG_FOLDER = interpolate(args.get("log")) + postfix
CHECKPOINT_EVERY = int(args["checkpoint_every"])
CHECKPOINT_PATH = interpolate(args.get("checkpoint")) + postfix
"""
There are two types of "transplants":
1. "Old" transplant, this just literally loads the state from the "from" checkpoint instead of creating the state
from scratch
2. "Regular" transplant, this is only for the automaton statistics, and it anneals between the imported values and
the values created during this run."""
transplant_config = config.get("transplant")
TRANSPLANT = False
OLD_TRANSPLANT: Union[bool, List[str]] = False
if transplant_config:
TRANSPLANT_FROM = transplant_config["from"]
if transplant_config.get("fields"):
OLD_TRANSPLANT = transplant_config["fields"]
else:
TRANSPLANT = True
aut_transplant = transplant_config["automaton"]
ANNEAL_AUT_TRANSPLANT = aut_transplant["type"]
ANNEAL_AUT_TRANSPLANT_PARAMS = aut_transplant.get("params", {})
if CHECKPOINT_PATH:
LOAD_FROM_CHECKPOINT = args["load_checkpoint"]
if not os.path.isfile(CHECKPOINT_PATH):
LOAD_FROM_CHECKPOINT = False
print("NOTE: no existing checkpoint found, will create new one if checkpoint saving is enabled.")
else:
if OLD_TRANSPLANT:
OLD_TRANSPLANT = False
print("NOTE: Loading from checkpoint, so transplant disabled")
SAVE_CHECKPOINTS = args["save_checkpoint"]
else:
CHECKPOINT_PATH = None
LOAD_FROM_CHECKPOINT = False
SAVE_CHECKPOINTS = False
if not args["save_checkpoint"]:
print("WARNING: This run is not being checkpointed! Use --do-not-save-checkpoint to suppress.")
NUM_PROCESSES = int(args["workers"])
DEVICE = torch.device(args["device"])
def run_mcts_aut_episode(net: torch.nn.Module, env: AutShapingWrapperAdv, max_length: int,
max_len_reward: Union[int, None],
curiosity: ModuleCuriosityOptimizer,
device, c_puct, c_aut,
num_batches, batch_size, stats: AutStats, train_state_rewriter: Callable[[Any], torch.Tensor],
state_observer: Callable[[Any], None] = None, c_sigma=1, c_intrins=1, **kwargs) \
-> Tuple[List[TraceStep], float]:
"""
Run an episode using MCTS with curiosity as the action selection
:param net: The policy/value network
:param env: The environment to run the simulation in
:param max_length: When to cut off the simulation
:param curiosity: Something to calculate the relative "newness" of a state
:param device: The device to run the simulation on
:param c_puct: Puct constant of MCTS
:param num_batches: How many groups of MCTS sims to run
:param batch_size: How many MCTS sims per group
:param state_observer: Function to call for every state seen
:return: A trace and final value estimate
"""
def curiosity_evaluator(sars):
states, actions, rewards, next_states, _ = zip(*sars)
rewards = curiosity.get_curiosity(states, actions, next_states)
return rewards.tolist()
def curiosity_trainer(sars):
states, actions, rewards, next_states, _ = zip(*sars)
curiosity.train(states, actions, next_states, train_rounds=1)
def state_evaluator(states):
states_transformed = torch.stack(tuple(train_state_rewriter(s) for s in states))
pols, vals = net(states_transformed.to(device))
pollist = F.softmax(pols, dim=-1).tolist()
vallist = vals.squeeze(-1).tolist()
return list(zip(pollist, vallist))
stats.synchronize()
mcts = MCTSAut_adv(env.action_space.n, curiosity_evaluator, state_evaluator, curiosity_trainer, c_puct=c_puct,
aut_stats=stats, c_aut=c_aut, c_sigma=c_sigma, c_intrins=c_intrins, **kwargs)
def action_value_generator(state, step):
mcts.mcts_batch(env, state, num_batches, batch_size)
probs, values = mcts.get_policy_value(state, 1)
return probs, max(values)
return run_episode_generic(env, action_value_generator, max_length, max_len_reward,
ptan.actions.ProbabilityActionSelector(),
state_observer)
def run_aut_episode(net: torch.nn.Module, env: AutShapingWrapperAdv, max_length: int, max_len_reward: Optional[int],
curiosity: ModuleCuriosityOptimizer, device,
train_state_rewriter: Callable[[Any], torch.Tensor], stats: AutStats,
state_observer: Callable[[Any], None] = None, render_every_frame=False) -> Tuple[
List[TraceStep], float]:
stats.synchronize()
def action_value_generator(state, step):
obs_tensor = train_state_rewriter(state).to(device)
obs_batch = obs_tensor.unsqueeze(dim=0)
probs, values = net(obs_batch)
pols_soft = F.softmax(probs.double(), dim=-1).squeeze(0)
pols_soft /= pols_soft.sum()
pols_soft = pols_soft.tolist()
val = values.squeeze(0).tolist()[0]
if render_every_frame:
env.render()
return pols_soft, val
# TODO curiosity and automaton bonuses
return run_episode_generic(env, action_value_generator, max_length, max_len_reward,
ptan.actions.EpsilonGreedyActionSelector(
selector=ptan.actions.ProbabilityActionSelector(),
epsilon=.1),
state_observer)
episode_runners = {
"mcts_aut_episode": run_mcts_aut_episode,
"aut_episode": run_aut_episode
}
def run():
torch.multiprocessing.set_start_method("spawn", force=True)
signal.signal(signal.SIGHUP, throwKeyInterr)
try:
cman = CheckpointManager(CHECKPOINT_PATH, LOAD_FROM_CHECKPOINT, SAVE_CHECKPOINTS, device=DEVICE)
except EOFError:
cman = CheckpointManager(CHECKPOINT_PATH + "_copy", LOAD_FROM_CHECKPOINT, SAVE_CHECKPOINTS, device=DEVICE)
if TRANSPLANT:
cman = TransplantCheckpointManager(cman, TRANSPLANT_FROM)
cman.transplant("aut") # Generating the automaton may not be completely deterministic, we want the same states
elif OLD_TRANSPLANT:
cman = TransplantCheckpointManager(cman, TRANSPLANT_FROM)
for field in OLD_TRANSPLANT:
cman.transplant(field)
aut = cman.load("aut", AutomatonSet.from_ltlf(LTLF_SPEC, AUT_PARAM_NAMES), PickleLoadHandler())
orig_env = env_constructors[ENV_TYPE](ENV_CONFIG)
env = AutShapingWrapperAdv(orig_env, AUT_PARAM_FUNCS, aut, use_potential=False, **AUT_OTHER_PARAMS)
action_space = env.action_space.n
writer = SummaryWriter(LOG_FOLDER)
train_net_creator, train_rewriter_creator = training_nets[NETWORK]
net = cman.load("net", train_net_creator(orig_env, **NETWORK_PARAMS),
CombinedLoadHandler(StateDictLoadHandler(), InitZeroLoadHandler())).to(DEVICE)
net.share_memory()
if CURIOSITY_NET:
curiosity_net_creator, curiosity_rewriter_creator = curiosity_nets[CURIOSITY_NET]
icm = cman.load("icm", curiosity_net_creator(orig_env, **CURIOSITY_PARAMS), StateDictLoadHandler()).to(
DEVICE)
icm.share_memory()
icm_opt = cman.load("icm_opt", ModuleCuriosityOptimizer(icm, curiosity_rewriter_creator(orig_env), action_space,
CURIOSITY_LEARNING_RATE,
DEVICE), StateDictLoadHandler())
else:
icm_opt = NoopCuriosityOptimizer()
loss_func = loss_funcs[LOSS_FUNC](net=net, device=DEVICE, discount=DISCOUNT, **LOSS_PARAMS)
optimizer = cman.load("opt", OPTIMIZER(net.parameters(), lr=LEARNING_RATE, **OPTIMIZER_PARAMS),
StateDictLoadHandler())
train_rewriter = train_rewriter_creator(orig_env)
train_loop = cman.load("train_loop",
RandomReplayTrainingLoop(DISCOUNT, REPLAY_BUFFER, MIN_TRACE_TO_TRAIN, PPO_TRAIN_ROUNDS,
train_rewriter, writer, DEVICE),
StateDictLoadHandler())
aut_stats = cman.load("aut_stats", AutStats(len(aut.graph.network), **AUT_STATS_PARAMS), StateDictLoadHandler())
if TRANSPLANT:
orig_alt_stats = cman.load_from_alt("aut_stats", AutStats(len(aut.graph.network)), StateDictLoadHandler())
wrapped_aut_stats = aut_transplant_anneals[ANNEAL_AUT_TRANSPLANT](orig_alt_stats, aut_stats,
**ANNEAL_AUT_TRANSPLANT_PARAMS)
wrapped_aut_stats.set_step(train_loop.num_rounds)
train_loop.add_round_hook(wrapped_aut_stats.set_step)
else:
wrapped_aut_stats = aut_stats
def aut_hook(trace: List[TraceReturnStep], final_value):
# Only count each state once per run
prev_edges = set()
last_state = None
for trst in trace: # TODO does this need to be reversed?
this_state = frozenset(trst.info["automaton_states"])
if len(this_state) > 0:
this_state = set(this_state).pop()
else:
this_state = None
edge = (last_state, this_state)
last_state = this_state
if edge[0] is not None and edge[1] is not None:
if edge not in prev_edges:
aut_stats.visit(edge, trst.discounted_return)
prev_edges.add(edge)
train_loop.add_trace_hook(aut_hook)
with get_parallel_queue(num_processes=NUM_PROCESSES, episode_runner=episode_runners[EPISODE_RUNNER_TYPE],
net=net, env=env, max_length=MAX_EPISODE_LEN, max_len_reward=MAX_LEN_REWARD,
curiosity=icm_opt, state_observer=None, device=DEVICE,
stats=wrapped_aut_stats, train_state_rewriter=train_rewriter,
**EPISODE_RUNNER_PARAMS) as sim_round_queue:
# random.seed(798)
while True:
train_loop(sim_round_queue, loss_func, optimizer)
if train_loop.num_rounds % CHECKPOINT_EVERY == 0:
print("num_rounds=", train_loop.num_rounds)
save_dict = {
"net": net,
"opt": optimizer,
"train_loop": train_loop,
"aut_stats": aut_stats,
"aut": aut,
}
if CURIOSITY_NET:
save_dict.update({
"icm": icm,
"icm_opt": icm_opt
})
cman.save(save_dict)
if STOP_AFTER and train_loop.global_step > STOP_AFTER:
print("STOPPING: step limit " + str(train_loop.global_step) + "/" + str(STOP_AFTER))
break
if __name__ == '__main__':
multiprocessing.freeze_support()
run()
| [
"torch.device",
"torch.multiprocessing.freeze_support",
"torch.multiprocessing.set_start_method",
"torch.cuda.is_available",
"torch.nn.functional.softmax",
"torch.Tensor"
] | 1.2.0 | bb912/MATS-DRS | 6e1ae9ba3b865e321d6a2d100d29693b776e1d36 |
1.4 | import argparse
import logging
import os
import string
from datetime import datetime
from time import time
import torch
import torchaudio
from torch.optim import SGD, Adadelta, Adam, AdamW
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.models.wav2letter import Wav2Letter
from ctc_decoders import GreedyDecoder
from datasets import collate_factory, split_process_librispeech
from languagemodels import LanguageModel
from metrics import levenshtein_distance
from transforms import Normalize, UnsqueezeFirst
from utils import MetricLogger, count_parameters, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--type",
metavar="T",
default="mfcc",
choices=["waveform", "mfcc"],
help="input type for model",
)
parser.add_argument(
"--freq-mask",
default=0,
type=int,
metavar="N",
help="maximal width of frequency mask",
)
parser.add_argument(
"--win-length",
default=400,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--hop-length",
default=160,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--time-mask",
default=0,
type=int,
metavar="N",
help="maximal width of time mask",
)
parser.add_argument(
"--workers",
default=0,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint",
)
parser.add_argument(
"--epochs",
default=200,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="manual epoch number"
)
parser.add_argument(
"--reduce-lr-valid",
action="store_true",
help="reduce learning rate based on validation loss",
)
parser.add_argument(
"--normalize", action="store_true", help="normalize model input"
)
parser.add_argument(
"--progress-bar", action="store_true", help="use progress bar while training"
)
parser.add_argument(
"--decoder",
metavar="D",
default="greedy",
choices=["greedy"],
help="decoder to use",
)
parser.add_argument(
"--batch-size", default=128, type=int, metavar="N", help="mini-batch size"
)
parser.add_argument(
"--n-bins",
default=13,
type=int,
metavar="N",
help="number of bins in transforms",
)
parser.add_argument(
"--optimizer",
metavar="OPT",
default="adadelta",
choices=["sgd", "adadelta", "adam", "adamw"],
help="optimizer to use",
)
parser.add_argument(
"--scheduler",
metavar="S",
default="reduceonplateau",
choices=["exponential", "reduceonplateau"],
help="optimizer to use",
)
parser.add_argument(
"--learning-rate",
default=0.6,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--gamma",
default=0.99,
type=float,
metavar="GAMMA",
help="learning rate exponential decay constant",
)
parser.add_argument(
"--momentum", default=0.8, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay", default=1e-5, type=float, metavar="W", help="weight decay"
)
parser.add_argument("--eps", metavar="EPS", type=float, default=1e-8)
parser.add_argument("--rho", metavar="RHO", type=float, default=0.95)
parser.add_argument("--clip-grad", metavar="NORM", type=float, default=0.0)
parser.add_argument(
"--dataset-root",
type=str,
help="specify dataset root folder",
)
parser.add_argument(
"--dataset-folder-in-archive",
type=str,
help="specify dataset folder in archive",
)
parser.add_argument(
"--dataset-train",
default=["train-clean-100"],
nargs="+",
type=str,
help="select which part of librispeech to train with",
)
parser.add_argument(
"--dataset-valid",
default=["dev-clean"],
nargs="+",
type=str,
help="select which part of librispeech to validate with",
)
parser.add_argument(
"--distributed", action="store_true", help="enable DistributedDataParallel"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument(
"--world-size", type=int, default=8, help="the world size to initiate DPP"
)
parser.add_argument("--jit", action="store_true", help="if used, model is jitted")
args = parser.parse_args()
logging.info(args)
return args
def setup_distributed(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
torch.distributed.init_process_group("nccl", rank=rank, world_size=world_size)
def model_length_function(tensor):
if tensor.shape[1] == 1:
# waveform mode
return int(tensor.shape[0]) // 160 // 2 + 1
return int(tensor.shape[0]) // 2 + 1
def compute_error_rates(outputs, targets, decoder, language_model, metric):
output = outputs.transpose(0, 1).to("cpu")
output = decoder(output)
# Compute CER
output = language_model.decode(output.tolist())
target = language_model.decode(targets.tolist())
print_length = 20
for i in range(2):
# Print a few examples
output_print = output[i].ljust(print_length)[:print_length]
target_print = target[i].ljust(print_length)[:print_length]
logging.info("Target: %s Output: %s", target_print, output_print)
cers = [levenshtein_distance(t, o) for t, o in zip(target, output)]
cers = sum(cers)
n = sum(len(t) for t in target)
metric["batch char error"] = cers
metric["batch char total"] = n
metric["batch char error rate"] = cers / n
metric["epoch char error"] += cers
metric["epoch char total"] += n
metric["epoch char error rate"] = metric["epoch char error"] / metric["epoch char total"]
# Compute WER
output = [o.split(language_model.char_space) for o in output]
target = [t.split(language_model.char_space) for t in target]
wers = [levenshtein_distance(t, o) for t, o in zip(target, output)]
wers = sum(wers)
n = sum(len(t) for t in target)
metric["batch word error"] = wers
metric["batch word total"] = n
metric["batch word error rate"] = wers / n
metric["epoch word error"] += wers
metric["epoch word total"] += n
metric["epoch word error rate"] = metric["epoch word error"] / metric["epoch word total"]
def train_one_epoch(
model,
criterion,
optimizer,
scheduler,
data_loader,
decoder,
language_model,
device,
epoch,
clip_grad,
disable_logger=False,
reduce_lr_on_plateau=False,
):
model.train()
metric = MetricLogger("train", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
start = time()
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
loss = criterion(outputs, targets, tensors_lengths, target_lengths)
optimizer.zero_grad()
loss.backward()
if clip_grad > 0:
metric["gradient"] = torch.nn.utils.clip_grad_norm_(
model.parameters(), clip_grad
)
optimizer.step()
compute_error_rates(outputs, targets, decoder, language_model, metric)
try:
metric["lr"] = scheduler.get_last_lr()[0]
except AttributeError:
metric["lr"] = optimizer.param_groups[0]["lr"]
metric["batch size"] = len(inputs)
metric["n_channel"] = inputs.shape[1]
metric["n_time"] = inputs.shape[-1]
metric["dataset length"] += metric["batch size"]
metric["iteration"] += 1
metric["loss"] = loss.item()
metric["cumulative loss"] += metric["loss"]
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["iteration time"] = time() - start
metric["epoch time"] += metric["iteration time"]
metric()
if reduce_lr_on_plateau and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metric["average loss"])
elif not isinstance(scheduler, ReduceLROnPlateau):
scheduler.step()
def evaluate(
model,
criterion,
data_loader,
decoder,
language_model,
device,
epoch,
disable_logger=False,
):
with torch.no_grad():
model.eval()
start = time()
metric = MetricLogger("validation", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
metric["cumulative loss"] += criterion(
outputs, targets, tensors_lengths, target_lengths
).item()
metric["dataset length"] += len(inputs)
metric["iteration"] += 1
compute_error_rates(outputs, targets, decoder, language_model, metric)
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["validation time"] = time() - start
metric()
return metric["average loss"]
def main(rank, args):
# Distributed setup
if args.distributed:
setup_distributed(rank, args.world_size)
not_main_rank = args.distributed and rank != 0
logging.info("Start time: %s", datetime.now())
# Explicitly set seed to make sure models created in separate processes
# start from same random weights and biases
torch.manual_seed(args.seed)
# Empty CUDA cache
torch.cuda.empty_cache()
# Change backend for flac files
torchaudio.set_audio_backend("soundfile")
# Transforms
melkwargs = {
"n_fft": args.win_length,
"n_mels": args.n_bins,
"hop_length": args.hop_length,
}
sample_rate_original = 16000
if args.type == "mfcc":
transforms = torch.nn.Sequential(
torchaudio.transforms.MFCC(
sample_rate=sample_rate_original,
n_mfcc=args.n_bins,
melkwargs=melkwargs,
),
)
num_features = args.n_bins
elif args.type == "waveform":
transforms = torch.nn.Sequential(UnsqueezeFirst())
num_features = 1
else:
raise ValueError("Model type not supported")
if args.normalize:
transforms = torch.nn.Sequential(transforms, Normalize())
augmentations = torch.nn.Sequential()
if args.freq_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.FrequencyMasking(freq_mask_param=args.freq_mask),
)
if args.time_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.TimeMasking(time_mask_param=args.time_mask),
)
# Text preprocessing
char_blank = "*"
char_space = " "
char_apostrophe = "'"
labels = char_blank + char_space + char_apostrophe + string.ascii_lowercase
language_model = LanguageModel(labels, char_blank, char_space)
# Dataset
training, validation = split_process_librispeech(
[args.dataset_train, args.dataset_valid],
[transforms, transforms],
language_model,
root=args.dataset_root,
folder_in_archive=args.dataset_folder_in_archive,
)
# Decoder
if args.decoder == "greedy":
decoder = GreedyDecoder()
else:
raise ValueError("Selected decoder not supported")
# Model
model = Wav2Letter(
num_classes=language_model.length,
input_type=args.type,
num_features=num_features,
)
if args.jit:
model = torch.jit.script(model)
if args.distributed:
n = torch.cuda.device_count() // args.world_size
devices = list(range(rank * n, (rank + 1) * n))
model = model.to(devices[0])
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=devices)
else:
devices = ["cuda" if torch.cuda.is_available() else "cpu"]
model = model.to(devices[0], non_blocking=True)
model = torch.nn.DataParallel(model)
n = count_parameters(model)
logging.info("Number of parameters: %s", n)
# Optimizer
if args.optimizer == "adadelta":
optimizer = Adadelta(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
eps=args.eps,
rho=args.rho,
)
elif args.optimizer == "sgd":
optimizer = SGD(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adam":
optimizer = Adam(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adamw":
optimizer = AdamW(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
else:
raise ValueError("Selected optimizer not supported")
if args.scheduler == "exponential":
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
elif args.scheduler == "reduceonplateau":
scheduler = ReduceLROnPlateau(optimizer, patience=10, threshold=1e-3)
else:
raise ValueError("Selected scheduler not supported")
criterion = torch.nn.CTCLoss(
blank=language_model.mapping[char_blank], zero_infinity=False
)
# Data Loader
collate_fn_train = collate_factory(model_length_function, augmentations)
collate_fn_valid = collate_factory(model_length_function)
loader_training_params = {
"num_workers": args.workers,
"pin_memory": True,
"shuffle": True,
"drop_last": True,
}
loader_validation_params = loader_training_params.copy()
loader_validation_params["shuffle"] = False
loader_training = DataLoader(
training,
batch_size=args.batch_size,
collate_fn=collate_fn_train,
**loader_training_params,
)
loader_validation = DataLoader(
validation,
batch_size=args.batch_size,
collate_fn=collate_fn_valid,
**loader_validation_params,
)
# Setup checkpoint
best_loss = 1.0
load_checkpoint = args.checkpoint and os.path.isfile(args.checkpoint)
if args.distributed:
torch.distributed.barrier()
if load_checkpoint:
logging.info("Checkpoint: loading %s", args.checkpoint)
checkpoint = torch.load(args.checkpoint)
args.start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
logging.info(
"Checkpoint: loaded '%s' at epoch %s", args.checkpoint, checkpoint["epoch"]
)
else:
logging.info("Checkpoint: not found")
save_checkpoint(
{
"epoch": args.start_epoch,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
False,
args.checkpoint,
not_main_rank,
)
if args.distributed:
torch.distributed.barrier()
torch.autograd.set_detect_anomaly(False)
for epoch in range(args.start_epoch, args.epochs):
logging.info("Epoch: %s", epoch)
train_one_epoch(
model,
criterion,
optimizer,
scheduler,
loader_training,
decoder,
language_model,
devices[0],
epoch,
args.clip_grad,
not_main_rank,
not args.reduce_lr_valid,
)
loss = evaluate(
model,
criterion,
loader_validation,
decoder,
language_model,
devices[0],
epoch,
not_main_rank,
)
if args.reduce_lr_valid and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_best,
args.checkpoint,
not_main_rank,
)
logging.info("End time: %s", datetime.now())
if args.distributed:
torch.distributed.destroy_process_group()
def spawn_main(main, args):
if args.distributed:
torch.multiprocessing.spawn(
main, args=(args,), nprocs=args.world_size, join=True
)
else:
main(0, args)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parse_args()
spawn_main(main, args)
| [
"torch.optim.lr_scheduler.ExponentialLR",
"torch.multiprocessing.spawn",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.nn.CTCLoss",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.jit.script",
"torch.nn.Sequential",
"torch.autograd.set_detect_anomaly",
"torch.nn.parallel.DistributedDataParallel",
"torch.cuda.device_count",
"torch.cuda.empty_cache",
"torch.distributed.barrier",
"torch.distributed.destroy_process_group",
"torch.no_grad",
"torch.optim.lr_scheduler.ReduceLROnPlateau"
] | 1.4.0 | zkneupper/audio | 1f136671b84071a2fe1d5b762df64f3a76310c31 |
1.4 | import torch
from torchaudio.datasets import LIBRISPEECH
class MapMemoryCache(torch.utils.data.Dataset):
"""
Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms, encode):
self.dataset = dataset
self.transforms = transforms
self.encode = encode
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
transformed = item[0]
target = item[2].lower()
transformed = self.transforms(transformed)
transformed = transformed[0, ...].transpose(0, -1)
target = self.encode(target)
target = torch.tensor(target, dtype=torch.long, device=transformed.device)
return transformed, target
def split_process_librispeech(
datasets, transforms, language_model, root, folder_in_archive,
):
def create(tags, cache=True):
if isinstance(tags, str):
tags = [tags]
if isinstance(transforms, list):
transform_list = transforms
else:
transform_list = [transforms]
data = torch.utils.data.ConcatDataset(
[
Processed(
LIBRISPEECH(
root, tag, folder_in_archive=folder_in_archive, download=False,
),
transform,
language_model.encode,
)
for tag, transform in zip(tags, transform_list)
]
)
data = MapMemoryCache(data)
return data
# For performance, we cache all datasets
return tuple(create(dataset) for dataset in datasets)
def collate_factory(model_length_function, transforms=None):
if transforms is None:
transforms = torch.nn.Sequential()
def collate_fn(batch):
tensors = [transforms(b[0]) for b in batch if b]
tensors_lengths = torch.tensor(
[model_length_function(t) for t in tensors],
dtype=torch.long,
device=tensors[0].device,
)
tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)
tensors = tensors.transpose(1, -1)
targets = [b[1] for b in batch if b]
target_lengths = torch.tensor(
[target.shape[0] for target in targets],
dtype=torch.long,
device=tensors.device,
)
targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)
return tensors, targets, tensors_lengths, target_lengths
return collate_fn
| [
"torch.nn.Sequential",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor"
] | 1.4.0 | zkneupper/audio | 1f136671b84071a2fe1d5b762df64f3a76310c31 |
1.6 | from unittest import mock
import pytest
import torch
from tests import DATASETS_PATH
@pytest.mark.parametrize('cli_args', [
f"--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2"
])
def test_cli_run_self_supervised_amdim(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.amdim.amdim_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
# TODO: this test is hanging (runs for more then 10min) so we need to use GPU or optimize it...
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
@pytest.mark.parametrize('cli_args', [
f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --encoder resnet18'
])
def test_cli_run_self_supervised_cpc(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.cpc.cpc_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', [
f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2'
])
def test_cli_run_self_supervised_moco(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.moco.moco2_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', [
f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft'
])
def test_cli_run_self_supervised_simclr(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.simclr.simclr_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize('cli_args', [
f'--data_dir {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2 --online_ft'
])
def test_cli_run_self_supervised_byol(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.byol.byol_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
@pytest.mark.parametrize(
'cli_args', [
f'--dataset cifar10 --data_path {DATASETS_PATH} --max_epochs 1 --max_steps 3 --fast_dev_run --batch_size 2'
' --gpus 0 --arch resnet18 --hidden_mlp 512 --fp32 --sinkhorn_iterations 1 --nmb_prototypes 2'
]
)
def test_cli_run_self_supervised_swav(cli_args):
"""Test running CLI for an example with default params."""
from pl_bolts.models.self_supervised.swav.swav_module import cli_main
cli_args = cli_args.split(' ') if cli_args else []
with mock.patch("argparse._sys.argv", ["any.py"] + cli_args):
cli_main()
| [
"torch.cuda.is_available"
] | 1.6 | hecoding/pytorch-lightning-bolts | 4d254fde6112b21436003028d553a726bf7ea6ef |
1.5 |
from awsio.python.lib.io.s3.s3dataset import S3Dataset
from torch.utils.data import DataLoader
url_list = ['s3://image-data-bucket/train/n01440764/n01440764_10026.JPEG',
's3://image-data-bucket/train/n01440764/n01440764_10027.JPEG',
's3://image-data-bucket/train/n01440764/n01440764_10029.JPEG']
dataset = S3Dataset(url_list)
dataloader = DataLoader(dataset,
batch_size=2,
num_workers=64)
for i, (image, label) in enumerate(dataloader):
print(type(image), len(image))
| [
"torch.utils.data.DataLoader"
] | 1.5.1 | judyheflin/amazon-s3-plugin-for-pytorch | 38284c8a5e92be3bbf47b08e8c90d94be0cb79e7 |
1.1 | from __future__ import print_function, absolute_import
import time
from time import gmtime, strftime
from datetime import datetime
from collections import OrderedDict
import torch
import numpy as np
from random import randint
from PIL import Image
import sys
from . import metric
from metric import Accuracy, EditDistance, RecPostProcess
from tqdm import tqdm
class BaseEvaluator(object):
def __init__(self, model, metric, use_cuda = True):
super(BaseEvaluator, self).__init__()
self.model = model
self.metric = metric
self.use_cuda = use_cuda
self.device = torch.device('cuda' if use_cuda else 'cpu')
self.cos_sim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
def evaluate(self, data_loader, step = 1, print_freq =1, tfLogger = None, dataset = None, vis_dir = None):
self.model.eval()
# batch_time =
# data_time =
# forward the network
image, outputs, targets, embeds, losses = [], {}, [], [], []
file_names = []
end = time.time()
for i, input in enumerate(tqdm(data_loader)):
data_time.update(time.time()-end)
input_dict = self._parse_data(input)
output_dict = self._forward(input_dict)
batch_size = input_dict['images'].size(0)
total_loss_batch = 0
for k, loss in output_dict['images'].item():
loss = loss.mean(dim = 0, keepdim = True)
total_loss_batch += loss.item() * batch_size
image.append(input_dict['images'])
targets.append(input_dict['rec_tragets'])
embeds.append(input_dict['rec_embeds'])
losses.append(total_loss_batch)
## the normal situation is without lexicon, especially for IIIT5k, IC03, IC13
if global_args.evaluate_with_lexicon:
file_names = input_dict['file_name']
for k, v in output_dict['output'].item():
if k not in outputs:
outputs[k] = []
outputs[k].append(v.cpu())
batch_time.update(time.time()-end)
if (i+1) % print_freq == 0:
print('[{}]\t'
'Evaluation : {}/{}\t'
'Time: {:.3f} ({:.3f})\t'
'Data: {:.3f} ({:.3f})\t'
.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
i+1, len(data_loader),
batch_time.val, batch_time.avg,
data_time.val, data_time.avg
))
if not global_args.keep_ratio():
image = torch.cat(image)
num_sample = image.size(0)
else:
num_sample = sum([subimage.size(0) for subimage in image])
targets = torch.cat(targets)
losses = np.sum(losses)/(1.0*num_sample)
for k , v in outputs.items():
outputs[k] = torch.cat(outputs[k])
# save info for recognition
if 'pred_rec' in outputs:
# evaluation with metric
if global_args.evaluate_with_lexicon:
eval_res = metrics_factory[self.metric + '_with_lexicon'](outputs['pred_rec'], targets, dataset,
file_names)
print('lexicon0: {0}, {1:.3f}'.format(self.metric, eval_res[0]))
print('lexicon50: {0}, {1:.3f}'.format(self.metric, eval_res[1]))
print('lexicon1k: {0}, {1:.3f}'.format(self.metric, eval_res[2]))
print('lexiconfull: {0}, {1:.3f}'.format(self.metric, eval_res[3]))
eval_res = eval_res[0]
else:
eval_res = metrics_factory[self.metric](outputs['pred_rec'], targets, dataset)
print('lexicon0: {0}: {1:.3f}'.format(self.metric, eval_res))
pred_list, targ_list, score_list = RecPostProcess(outputs['pred_rec'], targets, outputs['pred_rec_score'],
dataset)
with open("embed_v1_results.txt", "w", encoding="utf-8") as f:
for pred, targ in zip(pred_list, targ_list):
f.write("{} {}\n".format(pred, targ))
if 'pred_embed' in outputs:
output_cos_sim = self.cos_sim(outputs['pred_embed'], torch.cat(embeds).cpu())
output_cos_sim = torch.mean(torch.abs(output_cos_sim))
print("Emebedding vector cos similarity: {:3f}".format(output_cos_sim.item()))
if tfLogger is not None:
# (1) Log the scalar values
info = {
'loss': losses,
self.metric: eval_res,
}
for tag, value in info.items():
tfLogger.scalar_summary(tag, value, step)
# ====== Visualization ======#
if vis_dir is not None:
# recognition_vis(images, outputs['pred_rec'], targets, score_list, dataset, vis_dir)
stn_vis(images, outputs['rectified_images'], outputs['ctrl_points'], outputs['pred_rec'],
targets, score_list, outputs['pred_score'] if 'pred_score' in outputs else None, dataset, vis_dir)
return eval_res
# NotImplementedError, ValueError will represent what , the framework of python
def _parse_data(self, input):
raise NotImplementedError
def _forward(self, input):
raise NotImplementedError
class Evaluator(BaseEvaluator):
def _parse_data(self, input):
input_dict = {}
if global_args.evaluate_with_lexicon:
imgs, label_encs, lengths, file_name = inputs
else:
imgs, label_encs, lengths, embeds_ = inputs
with torch.no_grad():
images = imgs.to(self.device)
if label_encs is None:
labels = label_encs.to(self.device)
if embeds_ is not None:
embeds_ = embeds_.to(self.device)
input_dict['images'] = images
input_dict['rec_tragets'] = labels
input_dict['rec_lengths'] = lengths
input_dict['rec_embeds'] = embeds
if global_args.evaluate_with_lexicon:
input_dict['file_name'] = file_name
return input_dict
def _forward(self, input_dict):
self.model.eval()
with torch.no_grad:
output_dict = self.model(input_dict)
return output_dict | [
"torch.device",
"torch.cat",
"torch.no_grad",
"torch.abs",
"torch.nn.CosineSimilarity"
] | 1.1.0 | adeline-cs/GTR | 889b0cda8a3c2b061371c4a63ea871821ddcd3d7 |
1.9 | from data_loader import load_data, tokenizer
from models import BertForMultipleSequenceClassification
from transformers import AutoConfig
import torch
from tqdm.auto import tqdm
from transformers import get_scheduler
from transformers import AdamW
from sklearn.metrics import accuracy_score, f1_score
label_list = ['확진자수','완치자수','사망여부','집단감염','백신관련','방역지침','경제지원','마스크','국제기구','병원관련']
def train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device):
progress_bar = tqdm(range(num_training_steps))
model.train()
for epoch in range(num_epochs):
for batch in train_dataloader:
batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
def eval(model, eval_dataloader, metric, device):
model.eval()
preds = []
targets = []
probs = []
for batch in eval_dataloader:
batch = {k: v.to(device) for k, v in batch.items()}
with torch.no_grad():
outputs = model(**batch)
logits = outputs.logits
predictions = torch.stack([torch.argmax(logit, dim=-1) for logit in logits], dim=1)
preds.append(predictions)
targets.append(batch["labels"])
preds = torch.cat(preds, dim=0).cpu().numpy()
targets = torch.cat(targets, dim=0).cpu().numpy()
N, M = preds.shape
for i in range(M):
print("%s results" % label_list[i])
acc = accuracy_score(targets[:,i], preds[:,i])
f1 = f1_score(targets[:,i], preds[:,i], average='binary')
print('accuracy', acc * 100)
print('f1 score', f1 * 100)
def main():
checkpoint = "klue/bert-base"
train_dataloader, eval_dataloader = load_data()
config = AutoConfig.from_pretrained(checkpoint)
config.num_classes=[2] * 10
model = BertForMultipleSequenceClassification.from_pretrained(checkpoint, config=config)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model.to(device)
optimizer = AdamW(model.parameters(), lr=5e-5)
num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps
)
train(model, optimizer, lr_scheduler, train_dataloader, num_epochs, num_training_steps, device)
print()
eval(model, eval_dataloader, 'metric', device)
if __name__ == '__main__':
main() | [
"torch.device",
"torch.cat",
"torch.no_grad",
"torch.cuda.is_available",
"torch.argmax"
] | 1.9.1 | Ckst123/KoBERT-events | 68eb22845b179bcaf13771fea776be3d9772306f |
1.2 | from typing import Optional
from overrides import overrides
import torch
from allennlp.training.metrics.metric import Metric
@Metric.register("entropy")
class Entropy(Metric):
def __init__(self) -> None:
self._entropy = 0.0
self._count = 0
@overrides
def __call__(
self, # type: ignore
logits: torch.Tensor,
mask: Optional[torch.Tensor] = None,
):
"""
Parameters
----------
logits : ``torch.Tensor``, required.
A tensor of unnormalized log probabilities of shape (batch_size, ..., num_classes).
mask: ``torch.Tensor``, optional (default = None).
A masking tensor of shape (batch_size, ...).
"""
logits, mask = self.unwrap_to_tensors(logits, mask)
if mask is None:
mask = torch.ones(logits.size()[:-1])
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
probabilities = torch.exp(log_probs) * mask.unsqueeze(-1)
weighted_negative_likelihood = -log_probs * probabilities
entropy = weighted_negative_likelihood.sum(-1)
self._entropy += entropy.sum() / mask.sum()
self._count += 1
@overrides
def get_metric(self, reset: bool = False):
"""
Returns
-------
The scalar average entropy.
"""
average_value = self._entropy / self._count if self._count > 0 else 0
if reset:
self.reset()
return average_value
@overrides
def reset(self):
self._entropy = 0.0
self._count = 0
| [
"torch.exp",
"torch.nn.functional.log_softmax"
] | 1.2.0 | tianjianjiang/allennlp | 35b285585e0677b1025eac1c19b5eefe7e2a70db |
1.2 | import math
from overrides import overrides
import torch
from torch.nn.parameter import Parameter
from allennlp.modules.similarity_functions.similarity_function import SimilarityFunction
from allennlp.nn import Activation, util
@SimilarityFunction.register("linear")
class LinearSimilarity(SimilarityFunction):
"""
This similarity function performs a dot product between a vector of weights and some
combination of the two input vectors, followed by an (optional) activation function. The
combination used is configurable.
If the two vectors are ``x`` and ``y``, we allow the following kinds of combinations: ``x``,
``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations is performed
elementwise. You can list as many combinations as you want, comma separated. For example, you
might give ``x,y,x*y`` as the ``combination`` parameter to this class. The computed similarity
function would then be ``w^T [x; y; x*y] + b``, where ``w`` is a vector of weights, ``b`` is a
bias parameter, and ``[;]`` is vector concatenation.
Note that if you want a bilinear similarity function with a diagonal weight matrix W, where the
similarity function is computed as `x * w * y + b` (with `w` the diagonal of `W`), you can
accomplish that with this class by using "x*y" for `combination`.
Parameters
----------
tensor_1_dim : ``int``
The dimension of the first tensor, ``x``, described above. This is ``x.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
tensor_2_dim : ``int``
The dimension of the second tensor, ``y``, described above. This is ``y.size()[-1]`` - the
length of the vector that will go into the similarity computation. We need this so we can
build weight vectors correctly.
combination : ``str``, optional (default="x,y")
Described above.
activation : ``Activation``, optional (default=linear (i.e. no activation))
An activation function applied after the ``w^T * [x;y] + b`` calculation. Default is no
activation.
"""
def __init__(
self,
tensor_1_dim: int,
tensor_2_dim: int,
combination: str = "x,y",
activation: Activation = None,
) -> None:
super().__init__()
self._combination = combination
combined_dim = util.get_combined_dim(combination, [tensor_1_dim, tensor_2_dim])
self._weight_vector = Parameter(torch.Tensor(combined_dim))
self._bias = Parameter(torch.Tensor(1))
self._activation = activation or Activation.by_name("linear")()
self.reset_parameters()
def reset_parameters(self):
std = math.sqrt(6 / (self._weight_vector.size(0) + 1))
self._weight_vector.data.uniform_(-std, std)
self._bias.data.fill_(0)
@overrides
def forward(self, tensor_1: torch.Tensor, tensor_2: torch.Tensor) -> torch.Tensor:
combined_tensors = util.combine_tensors(self._combination, [tensor_1, tensor_2])
dot_product = torch.matmul(combined_tensors, self._weight_vector)
return self._activation(dot_product + self._bias)
| [
"torch.Tensor",
"torch.matmul"
] | 1.2.0 | tianjianjiang/allennlp | 35b285585e0677b1025eac1c19b5eefe7e2a70db |
1.7 | import torch
import torch.nn as nn
from torch.nn import functional as F
import torchvision
import torchvision.transforms as transforms
from torchvision.transforms import ToTensor
from torchvision.transforms import ToPILImage
import os
import cv2
import wget
import imutils
from tqdm import tqdm, tqdm_notebook
from PIL import Image
import numpy as np
from collections import deque
import pandas as pd
import matplotlib.pyplot as plt
import segmentation_models_pytorch as smp
import warnings
warnings.filterwarnings("ignore")
from ..base_inference_engine import InferenceEngine
"""
3d segmentation model for C elegans embryo
"""
def generate_centroid_image(thresh):
"""Used when centroid_mode is set to True
Args:
thresh (np.array): 2d numpy array that is returned from the segmentation model
Returns:
np.array : image containing the contours and their respective centroids
list : list of all centroids for the given image as [(x1,y1), (x2,y2)...]
"""
thresh = cv2.blur(thresh, (5,5))
thresh = thresh.astype(np.uint8)
centroid_image = np.zeros(thresh.shape)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
centroids = []
for c in cnts:
try:
# compute the center of the contour
M = cv2.moments(c)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# draw the contour and center of the shape on the image
cv2.drawContours(centroid_image, [c], -1, (255, 255, 255), 2)
cv2.circle(centroid_image, (cX, cY), 2, (255, 255, 255), -1)
centroids.append((cX, cY))
except:
pass
return centroid_image, centroids
class cell_membrane_segmentor(InferenceEngine):
def __init__(self, device = "cpu"):
"""Segments the c. elegans embryo from images/videos,
depends on segmentation-models-pytorch for the model backbone
Args:
device (str, optional): set to "cuda", runs operations on gpu and set to "cpu", runs operations on cpu. Defaults to "cpu".
"""
self.device = device
self.ENCODER = 'resnet18'
self.ENCODER_WEIGHTS = 'imagenet'
self.CLASSES = ["nucleus"]
self.ACTIVATION = 'sigmoid'
self.in_channels = 1
self.model_url = "https://github.com/DevoLearn/devolearn/raw/master/devolearn/cell_membrane_segmentor/cell_membrane_segmentation_model.pth"
self.model_name = "cell_membrane_segmentation_model.pth"
self.model_dir = os.path.dirname(__file__)
# print("at : ", os.path.dirname(__file__))
self.model = smp.FPN(
encoder_name= self.ENCODER,
encoder_weights= self.ENCODER_WEIGHTS,
classes=len(self.CLASSES),
activation= self.ACTIVATION,
in_channels = self.in_channels
)
self.download_checkpoint()
self.model.to(self.device)
self.model.eval()
self.mini_transform = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((256,256), interpolation = Image.NEAREST),
transforms.ToTensor(),
])
def download_checkpoint(self):
try:
# print("model already downloaded, loading model...")
self.model = torch.load(self.model_dir + "/" + self.model_name, map_location= self.device)
except:
print("model not found, downloading from:", self.model_url)
if os.path.isdir(self.model_dir) == False:
os.mkdir(self.model_dir)
filename = wget.download(self.model_url, out= self.model_dir)
# print(filename)
self.model = torch.load(self.model_dir + "/" + self.model_name, map_location= self.device)
def preprocess(self, image_grayscale_numpy):
tensor = self.mini_transform(image_grayscale_numpy).unsqueeze(0).to(self.device)
return tensor
def predict(self, image_path, pred_size = (350,250), centroid_mode = False):
"""Loads an image from image_path and converts it to grayscale,
then passes it through the model and returns centroids of the segmented features.
reference{
https://github.com/DevoLearn/devolearn#segmenting-the-c-elegans-embryo
}
Args:
image_path (str): path to image
pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).
centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.
Returns:
centroid_mode set to False:
np.array : 1 channel image.
centroid_mode set to True:
np.array : 1 channel image,
list : list of centroids.
"""
im = cv2.imread(image_path,0)
tensor = self.preprocess(im)
res = self.model(tensor).detach().cpu().numpy()[0][0]
res = cv2.resize(res,pred_size)
if centroid_mode == False:
return res
else:
centroid_image, centroids = generate_centroid_image(res)
return centroid_image, centroids
def predict_from_video(self, video_path, pred_size = (350,250), save_folder = "preds", centroid_mode = False, notebook_mode = False):
"""Splits a video from video_path into frames and passes the
frames through the model for predictions. Saves predicted images in save_folder.
And optionally saves all the centroid predictions into a pandas.DataFrame.
Args:
video_path (str): path to the video file.
pred_size (tuple, optional): size of output image,(width,height). Defaults to (350,250).
save_folder (str, optional): path to folder to be saved in. Defaults to "preds".
centroid_mode (bool, optional): set to true to return both the segmented image and the list of centroids. Defaults to False.
notebook_mode (bool, optional): toogle between script(False) and notebook(True), for better user interface. Defaults to False.
Returns:
centroid_mode set to True:
pd.DataFrame : containing file name and their centriods
centroid_mode set to False:
list : list containing the names of the entries in the save_folder directory
"""
vidObj = cv2.VideoCapture(video_path)
success = 1
images = deque()
count = 0
if centroid_mode == True:
filenames_centroids = []
while success:
success, image = vidObj.read()
try:
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
except:
# print("skipped possible corrupt frame number : ", count)
pass
count += 1
if os.path.isdir(save_folder) == False:
os.mkdir(save_folder)
if notebook_mode == True:
for i in tqdm_notebook(range(len(images)), desc = "saving predictions: "):
save_name = save_folder + "/" + str(i) + ".jpg"
tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)
res = self.model(tensor).detach().cpu().numpy()[0][0]
if centroid_mode == True:
res, centroids = generate_centroid_image(res)
filenames_centroids.append([save_name, centroids])
res = cv2.resize(res,pred_size)
cv2.imwrite(save_name, res*255)
else :
for i in tqdm(range(len(images)), desc = "saving predictions: "):
save_name = save_folder + "/" + str(i) + ".jpg"
tensor = self.mini_transform(images[i]).unsqueeze(0).to(self.device)
res = self.model(tensor).detach().cpu().numpy()[0][0]
if centroid_mode == True:
res, centroids = generate_centroid_image(res)
filenames_centroids.append([save_name, centroids])
res = cv2.resize(res,pred_size)
cv2.imwrite(save_name, res*255)
if centroid_mode == True:
df = pd.DataFrame(filenames_centroids, columns = ["filenames", "centroids"])
return df
else:
return os.listdir(save_folder)
| [
"torch.load"
] | 1.7.0 | kingjuno/devolearn | 555c8c55441a4f0b9ed8801c37d07c45b03ec774 |
1.4 | import os
import os.path as osp
import numpy as np
from summit.benchmarks.experiment_emulator.emulator import Emulator
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from blitz.modules import BayesianLinear
from blitz.utils import variational_estimator
from sklearn.metrics import r2_score
# =======================================================================
class BNNEmulator(Emulator):
"""BNN Emulator
A Bayesian Neural Network (BNN) emulator.
Parameters
---------
domain: summit.domain.Domain
The domain of the experiment
dataset: class:~summit.utils.dataset.DataSet, optional
A DataSet with data for training where the data columns correspond to the domain and the data rows correspond to the training points.
By default: None
model_name: string, optional
Name of the model that is used for saving model parameters. Should be unique.
By default: "dataset_emulator_model_name"
"""
# =======================================================================
def __init__(self, domain, dataset, model_name, kwargs={}):
super().__init__(domain, dataset, model_name, kwargs)
self._model = self._setup_model()
# Set model name for saving
self.save_path = kwargs.get(
"save_path",
osp.join(osp.dirname(osp.realpath(__file__)), "trained_models/BNN"),
)
# Set up training hyperparameters
self.set_training_hyperparameters()
# =======================================================================
def _setup_model(self, **kwargs):
""" Setup the BNN model """
@variational_estimator
class BayesianRegressor(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.blinear1 = BayesianLinear(input_dim, 24)
self.blinear2 = BayesianLinear(24, 24)
self.blinear3 = BayesianLinear(24, 24)
self.blinear4 = BayesianLinear(24, 1)
def forward(self, x):
x = F.leaky_relu(self.blinear1(x))
x = F.leaky_relu(self.blinear2(x))
x = F.dropout(x, p=0.1, training=self.training)
x = F.leaky_relu(self.blinear3(x))
x = F.dropout(x, p=0.1, training=self.training)
x = F.relu(self.blinear4(x))
y = x
return y.view(-1)
# Training of model on given dataloader
def _train(self, regressor, device, optimizer, criterion, X_train, loader):
regressor.train()
for i, (datapoints, labels) in enumerate(loader):
optimizer.zero_grad()
loss = regressor.sample_elbo(
inputs=datapoints.to(device),
labels=labels.to(device),
criterion=criterion,
sample_nbr=3,
complexity_cost_weight=1 / X_train.shape[0],
)
loss.backward()
optimizer.step()
# Evaluate model for given dataloader
def _evaluate_regression(
self,
regressor,
device,
loader,
fun_untransform_data,
out_transform,
get_predictions=False,
):
regressor.eval()
regressor.freeze_()
mae = 0
pred_data = []
real_data = []
for i, (datapoints, labels) in enumerate(loader):
data = datapoints.to(device)
pred = regressor(data)
tmp_pred_data = fun_untransform_data(
data=pred, reduce=out_transform[0], divide=out_transform[1]
)
tmp_real_data = fun_untransform_data(
data=labels, reduce=out_transform[0], divide=out_transform[1]
)
mae += (tmp_pred_data - tmp_real_data).abs().sum(0).item()
if get_predictions:
pred_data.extend(tmp_pred_data.tolist())
real_data.extend(tmp_real_data.tolist())
if get_predictions:
return pred_data, real_data
regressor.unfreeze_()
return mae / len(loader.dataset)
regression_model = BayesianRegressor(self.input_dim)
return regression_model
# =======================================================================
def set_training_hyperparameters(self, kwargs={}):
# Setter method for hyperparameters of training
self.epochs = kwargs.get(
"epochs", 300
) # number of max epochs the model is trained
self.initial_lr = kwargs.get("initial_lr", 0.001) # initial learning rate
self.min_lr = kwargs.get("min_lr", 0.00001)
self.lr_decay = kwargs.get("lr_decay", 0.7) # learning rate decay
self.lr_decay_patience = kwargs.get(
"lr_decay_patience", 3
) # number of epochs before learning rate is reduced by lr_decay
self.early_stopping_epochs = kwargs.get(
"early_stopping_epochs", 30
) # number of epochs before early stopping
self.batch_size_train = kwargs.get("batch_size_train", 4)
self.transform_input = kwargs.get("transform_input", "standardize")
self.transform_output = kwargs.get("transform_output", "standardize")
self.test_size = kwargs.get("test_size", 0.1)
self.shuffle = kwargs.get("shuffle", False)
# =======================================================================
def train_model(self, dataset=None, verbose=True, kwargs={}):
# Manual call of training -> overwrite dataset with new dataset for training
if dataset is not None:
self._dataset = dataset
# #<cv_fold>-fold cross-validation
cv_fold = kwargs.get("cv_fold", 10)
# Data preprocess
train_dataset, test_dataset = self._data_preprocess(
transform_input=self.transform_input,
transform_output=self.transform_output,
test_size=self.test_size,
shuffle=self.shuffle,
)
X_train_init, y_train_init = (
torch.tensor(train_dataset[0]).float(),
torch.tensor(train_dataset[1]).float(),
)
X_test, y_test = (
torch.tensor(test_dataset[0]).float(),
torch.tensor(test_dataset[1]).float(),
)
shuffle_train = kwargs.get("shuffle_train", False)
if shuffle_train:
perm = torch.randperm(len(y_train_init))
train_data = torch.cat([X_train_init, y_train_init], axis=1)[perm]
X_train, y_train = (
train_data[:, : -self.output_dim],
train_data[:, -self.output_dim :],
)
else:
X_train, y_train = X_train_init, y_train_init
if verbose:
print("\n<---- Start training of BNN model ---->")
print(" --- Length of train dataset: {} ---".format(X_train.shape[0]))
print(" --- Length of test dataset: {} ---".format(X_test.shape[0]))
for i, k in enumerate(self.output_models):
if verbose:
print(
"\n <-- Start {}-fold cross-validation training of BNN regressor on objective: {} -->\n".format(
cv_fold, k
)
)
train_acc, val_acc, test_acc = [], [], []
y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (
[],
[],
[],
[],
)
for j in range(cv_fold):
if verbose:
print(" ---------------- Split {} ----------------".format(j + 1))
# Set training details
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
regressor = self._setup_model().to(device)
optimizer = optim.Adam(regressor.parameters(), lr=self.initial_lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
factor=self.lr_decay,
patience=self.lr_decay_patience,
min_lr=self.min_lr,
)
criterion = torch.nn.MSELoss()
model_save_name = (
self.model_name + "_" + str(k) + "_" + str(j + 1) + "_BNN_model.pt"
)
model_save_dir = osp.join(self.save_path, model_save_name)
storable = self._check_file_path(model_save_dir)
if not storable:
self.output_models[k] = self._load_model(self.model_name)[k]
continue
# Setup train and val dataset for cross-validation
if cv_fold <= 1:
raise ValueError(
"{}-fold Cross-Validation not possible. Increase cv_fold.".format(
cv_fold
)
)
if len(X_train) < cv_fold:
raise ValueError(
"Too few data points ({}) for training provided. Decrease cv_fold.".format(
len(X_train)
)
)
n = len(X_train) // cv_fold
r = len(X_train) % cv_fold
val_mask = torch.zeros(len(X_train), dtype=torch.uint8)
# make sure every data point is included in the validation set once
if j < r:
val_mask[j * (n + 1) : (j + 1) * (n + 1)] = 1
else:
val_mask[j * n + r : (j + 1) * n + r] = 1
X_val_cv, y_val_cv = X_train[val_mask], y_train[val_mask]
X_train_cv, y_train_cv = X_train[1 - val_mask], y_train[1 - val_mask]
out_transform = self.data_transformation_dict[k]
y_train_obj, y_val_obj, y_test_obj = (
y_train_cv[:, i],
y_val_cv[:, i],
y_test[:, i],
)
ds_train = torch.utils.data.TensorDataset(X_train_cv, y_train_obj)
dataloader_train = torch.utils.data.DataLoader(
ds_train, batch_size=self.batch_size_train, shuffle=True
)
ds_val = torch.utils.data.TensorDataset(X_val_cv, y_val_obj)
dataloader_val = torch.utils.data.DataLoader(
ds_val, batch_size=16, shuffle=False
)
ds_test = torch.utils.data.TensorDataset(X_test, y_test_obj)
dataloader_test = torch.utils.data.DataLoader(
ds_test, batch_size=16, shuffle=False
)
max_iter_stop = (
self.early_stopping_epochs
) # maximum number of consecutive iteration w/o improvement after which training is stopped
tmp_iter_stop = 0
best_train_mae, best_val_mae, best_test_mae = (
float("inf"),
float("inf"),
float("inf"),
)
for epoch in range(self.epochs):
lr = scheduler.optimizer.param_groups[0]["lr"]
# train model
self._model._train(
regressor,
device,
optimizer,
criterion,
X_train_cv,
dataloader_train,
)
train_mae = self._model._evaluate_regression(
regressor,
device,
dataloader_train,
self._untransform_data,
out_transform,
)
val_mae = self._model._evaluate_regression(
regressor,
device,
dataloader_val,
self._untransform_data,
out_transform,
)
scheduler.step(val_mae)
if verbose:
print(
" -- Epoch: {:03d}, LR: {:7f}, Train MAE: {:4f}, Val MAE: {:4f}".format(
epoch, lr, train_mae, val_mae
)
)
# if prediction accuracy was improved in current epoch, reset <tmp_iter_stop> and save model
if best_val_mae > val_mae:
best_val_mae = val_mae
tmp_iter_stop = 0
torch.save(regressor.state_dict(), model_save_dir)
test_mae = self._model._evaluate_regression(
regressor,
device,
dataloader_test,
self._untransform_data,
out_transform,
)
best_train_mae, best_test_mae = train_mae, test_mae
if verbose:
print(
" -> Val MAE improved, current Test MAE: {:4f}".format(
test_mae
)
)
# if prediction accuracy was not imporved in current epoch, increase <tmp_iter_stop> and stop training if <max_iter_stop> is reached
else:
tmp_iter_stop += 1
if tmp_iter_stop >= max_iter_stop:
break
train_acc.append(best_train_mae)
val_acc.append(best_val_mae)
test_acc.append(best_test_mae)
y_train_obj = y_train_init[:, i]
ds_train_all = torch.utils.data.TensorDataset(X_train_init, y_train_obj)
# load final model from epoch with lowest prediction accuracy
regressor.load_state_dict(torch.load(model_save_dir))
# get final model predictions for training and test data
y_train_pred, y_train_real = self._model._evaluate_regression(
regressor=regressor,
device=device,
loader=torch.utils.data.DataLoader(ds_train_all, shuffle=False),
fun_untransform_data=self._untransform_data,
out_transform=out_transform,
get_predictions=True,
)
y_test_pred, y_test_real = self._model._evaluate_regression(
regressor=regressor,
device=device,
loader=torch.utils.data.DataLoader(ds_test, shuffle=False),
fun_untransform_data=self._untransform_data,
out_transform=out_transform,
get_predictions=True,
)
y_train_pred_l.append(y_train_pred), y_train_real_l.append(y_train_real)
y_test_pred_l.append(y_test_pred), y_test_real_l.append(y_test_real)
train_acc, val_acc, test_acc = (
torch.tensor(train_acc),
torch.tensor(val_acc),
torch.tensor(test_acc),
)
y_train_pred_l, y_train_real_l, y_test_pred_l, y_test_real_l = (
torch.tensor(y_train_pred_l),
torch.tensor(y_train_real_l),
torch.tensor(y_test_pred_l),
torch.tensor(y_test_real_l),
)
X_train_final = np.asarray(X_train_init.tolist())
X_test_final = np.asarray(X_test.tolist())
for ind, inp_var in enumerate(self.input_names_transformable):
tmp_inp_transform = self.data_transformation_dict[inp_var]
X_train_final[:, ind] = self._untransform_data(
data=X_train_final[:, ind],
reduce=tmp_inp_transform[0],
divide=tmp_inp_transform[1],
)
X_test_final[:, ind] = self._untransform_data(
data=X_test_final[:, ind],
reduce=tmp_inp_transform[0],
divide=tmp_inp_transform[1],
)
self.output_models[k] = {
"model_save_dirs": [
self.model_name + "_" + str(k) + "_" + str(j + 1)
for j in range(cv_fold)
],
"Final train MAE": train_acc.mean().tolist(),
"Final validation MAE": val_acc.mean().tolist(),
"Final test MAE": test_acc.mean().tolist(),
"data_transformation_dict": self.data_transformation_dict,
"X variable names": self.input_names,
"X_train": X_train_final.tolist(),
"y_train_real": y_train_real_l.mean(axis=0).tolist(),
"y_train_pred_average": y_train_pred_l.mean(axis=0).tolist(),
"X_test": X_test_final.tolist(),
"y_test_real": y_test_real_l.mean(axis=0).tolist(),
"y_test_pred_average": y_test_pred_l.mean(axis=0).tolist(),
}
if verbose:
print(
"\n <-- Finished training of BNN model on objective: {} -->\n"
" -- Final Train MAE: {:4f}, Final Val MAE: {:4f}, Final Test MAE: {:4f} --\n"
" -- Model saved at: {} --\n".format(
k,
train_acc.mean(),
val_acc.mean(),
test_acc.mean(),
model_save_dir,
)
)
self._save_model()
if verbose:
print("<---- End training of BNN regressor ---->\n")
# =======================================================================
def validate_model(
self, dataset=None, parity_plots=False, get_pred=False, kwargs={}
):
self.output_models = self._load_model(self.model_name)
self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means
self._model.eval() # set to evaluation mode (may be redundant)
val_dict = {}
lst_parity_plots = None
if parity_plots:
lst_parity_plots = []
if dataset is not None:
for i, (k, v) in enumerate(self.output_models.items()):
model_load_dirs = v["model_save_dirs"]
self.data_transformation_dict = v["data_transformation_dict"]
out_transform = self.data_transformation_dict[k]
X_val = self._data_preprocess(
inference=True, infer_dataset=dataset, validate=True
)
X_val = torch.tensor(X_val).float()
y_val = torch.tensor(dataset[(k, "DATA")].to_numpy()).float()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
prediction_l = []
for m in model_load_dirs:
model_load_dir = osp.join(self.save_path, m + "_BNN_model.pt")
self._model.load_state_dict(
torch.load(model_load_dir, map_location=torch.device(device))
)
data = X_val.to(device)
predictions = self._model(data).detach()
predictions = self._untransform_data(
data=predictions,
reduce=out_transform[0],
divide=out_transform[1],
)
prediction_l.append(predictions)
prediction_l = torch.tensor(prediction_l)
predictions = prediction_l.mean(axis=0)
val_dict[k] = {
"MAE": (predictions - y_val).abs().mean().item(),
"RMSE": ((((predictions - y_val) ** 2).mean()) ** (1 / 2)).item(),
"r2": r2_score(y_val, predictions)
if y_val.shape[0] > 1
else "Too few data points to calculate r2.",
}
if parity_plots:
parity_plot = self.create_parity_plot(
datasets_pred=[predictions],
datasets_real=[y_val],
kwargs=kwargs,
)
lst_parity_plots.append(parity_plot)
else:
for i, (k, v) in enumerate(self.output_models.items()):
y_train_real, y_train_pred, y_test_real, y_test_pred = (
torch.tensor(v["y_train_real"]).float(),
torch.tensor(v["y_train_pred_average"]).float(),
torch.tensor(v["y_test_real"]).float(),
torch.tensor(v["y_test_pred_average"]).float(),
)
val_dict[k] = {
"Train": {
"MAE": (y_train_real - y_train_pred).abs().mean().item(),
"RMSE": (
(((y_train_real - y_train_pred) ** 2).mean()) ** (1 / 2)
).item(),
"r2": r2_score(y_train_real, y_train_pred)
if y_train_pred.shape[0] > 1
else "Too few data points to calculate r2.",
},
"Test": {
"MAE": (y_test_real - y_test_pred).abs().mean().item(),
"RMSE": (
(((y_test_real - y_test_pred) ** 2).mean()) ** (1 / 2)
).item(),
"r2": r2_score(y_test_real, y_test_pred)
if y_test_pred.shape[0] > 1
else "Too few data points to calculate r2.",
},
}
if parity_plots:
parity_plot = self.create_parity_plot(
datasets_pred=[y_train_pred, y_test_pred],
datasets_real=[y_train_real, y_test_real],
kwargs=kwargs,
)
lst_parity_plots.append(parity_plot)
if get_pred:
return predictions
return val_dict, lst_parity_plots
# =======================================================================
def infer_model(self, dataset):
self.output_models = self._load_model(self.model_name)
self._model.eval() # set to evaluation mode (may be redundant)
self._model.freeze_() # freeze the model, in order to predict using only their weight distribution means
infer_dict = {}
for i, (k, v) in enumerate(self.output_models.items()):
model_load_dirs = v["model_save_dirs"]
self.data_transformation_dict = v["data_transformation_dict"]
out_transform = self.data_transformation_dict[k]
X_infer = self._data_preprocess(inference=True, infer_dataset=dataset)
X_infer = torch.tensor(X_infer).float()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
prediction_l = []
for m in model_load_dirs:
model_load_dir = osp.join(self.save_path, m + "_BNN_model.pt")
self._model.load_state_dict(
torch.load(model_load_dir, map_location=torch.device(device))
)
data = X_infer.to(device)
predictions = self._model(data).item()
predictions = self._untransform_data(
data=predictions, reduce=out_transform[0], divide=out_transform[1]
)
prediction_l.append(predictions)
prediction_l = torch.tensor(prediction_l)
predictions = prediction_l.mean(axis=0).item()
infer_dict[k] = predictions
return infer_dict
| [
"torch.device",
"torch.cat",
"torch.nn.MSELoss",
"torch.nn.functional.dropout",
"torch.cuda.is_available",
"torch.tensor",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.TensorDataset"
] | 1.4.0 | jezsadler/summit | 982de7f6424bb94da2084d4d84396b4b2673eeca |
1.4 | from __future__ import division
import argparse
import numpy as np
import torch
from dim_red.triplet import train_triplet
from dim_red.angular import train_angular
from dim_red.support_func import sanitize
from dim_red.data import load_dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('dataset options')
aa("--database", default="sift")
aa("--method", type=str, default="triplet")
group = parser.add_argument_group('Model hyperparameters')
aa("--dout", type=int, default=16,
help="output dimension")
aa("--dint", type=int, default=1024)
group = parser.add_argument_group('Computation params')
aa("--seed", type=int, default=1234)
aa("--device", choices=["cuda", "cpu", "auto"], default="auto")
aa("--val_freq", type=int, default=10,
help="frequency of validation calls")
aa("--optim", type=str, default="sgd")
aa("--print_results", type=int, default=0)
aa("--save", type=int, default=0)
aa("--full", type=int, default=0)
aa("--val_freq_search", type=int, default=5,
help="frequency of validation calls")
aa("--save_knn_1k", type=int, default=0)
aa("--save_optimal", type=int, default=0)
aa("--batch_size", type=int, default=64)
aa("--epochs", type=int, default=40)
aa("--lr_schedule", type=str, default="0.1,0.1,0.05,0.01")
aa("--momentum", type=float, default=0.9)
args = parser.parse_args()
if args.device == "auto":
args.device = "cuda" if torch.cuda.is_available() else "cpu"
np.random.seed(args.seed)
torch.manual_seed(args.seed)
print(args)
results_file_name = "/home/shekhale/results/dim_red/" + args.database + "/train_results_" + args.method + ".txt"
if args.print_results > 0:
with open(results_file_name, "a") as rfile:
rfile.write("\n\n")
rfile.write("START TRAINING \n")
print ("load dataset %s" % args.database)
(_, xb, xq, _) = load_dataset(args.database, args.device, calc_gt=False, mnt=True)
base_size = xb.shape[0]
threshold = int(base_size * 0.01)
perm = np.random.permutation(base_size)
xv = xb[perm[:threshold]]
if args.full:
xt = xb
else:
xt = xb[perm[threshold:]]
print(xb.shape, xt.shape, xv.shape, xq.shape)
xt = sanitize(xt)
xv = sanitize(xv)
xb = sanitize(xb)
xq = sanitize(xq)
if args.method == "triplet":
train_triplet(xb, xt, xv, xq, args, results_file_name)
elif args.method == "angular":
train_angular(xb, xt, xv, xq, args, results_file_name, perm)
else:
print("Select an available method") | [
"torch.manual_seed",
"torch.cuda.is_available"
] | 1.4.0 | symphony233/gbnns_dim_red | 2403411600a60ad4365aba3d78a81da144a456b7 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics.functional.regression.psnr import _psnr_compute, _psnr_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
class PSNR(Metric):
r"""
Computes `peak signal-to-noise ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_ (PSNR):
.. math:: \text{PSNR}(I, J) = 10 * \log_{10} \left(\frac{\max(I)^2}{\text{MSE}(I, J)}\right)
Where :math:`\text{MSE}` denotes the `mean-squared-error
<https://en.wikipedia.org/wiki/Mean_squared_error>`_ function.
Args:
data_range:
the range of the data. If None, it is determined from the data (max - min).
The ``data_range`` must be given when ``dim`` is not None.
base: a base of a logarithm to use (default: 10)
reduction: a method to reduce metric score over labels.
- ``'elementwise_mean'``: takes the mean (default)
- ``'sum'``: takes the sum
- ``'none'``: no reduction will be applied
dim:
Dimensions to reduce PSNR scores over, provided as either an integer or a list of integers. Default is
None meaning scores will be reduced across all dimensions and all batches.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Raises:
ValueError:
If ``dim`` is not ``None`` and ``data_range`` is not given.
Example:
>>> from torchmetrics import PSNR
>>> psnr = PSNR()
>>> preds = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> target = torch.tensor([[3.0, 2.0], [1.0, 0.0]])
>>> psnr(preds, target)
tensor(2.5527)
.. note::
Half precision is only support on GPU for this metric
"""
def __init__(
self,
data_range: Optional[float] = None,
base: float = 10.0,
reduction: str = 'elementwise_mean',
dim: Optional[Union[int, Tuple[int, ...]]] = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
if dim is None and reduction != 'elementwise_mean':
rank_zero_warn(f'The `reduction={reduction}` will not have any effect when `dim` is None.')
if dim is None:
self.add_state("sum_squared_error", default=tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=tensor(0), dist_reduce_fx="sum")
else:
self.add_state("sum_squared_error", default=[])
self.add_state("total", default=[])
if data_range is None:
if dim is not None:
# Maybe we could use `torch.amax(target, dim=dim) - torch.amin(target, dim=dim)` in PyTorch 1.7 to
# calculate `data_range` in the future.
raise ValueError("The `data_range` must be given when `dim` is not None.")
self.data_range = None
self.add_state("min_target", default=tensor(0.0), dist_reduce_fx=torch.min)
self.add_state("max_target", default=tensor(0.0), dist_reduce_fx=torch.max)
else:
self.add_state("data_range", default=tensor(float(data_range)), dist_reduce_fx='mean')
self.base = base
self.reduction = reduction
self.dim = tuple(dim) if isinstance(dim, Sequence) else dim
def update(self, preds: Tensor, target: Tensor):
"""
Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
sum_squared_error, n_obs = _psnr_update(preds, target, dim=self.dim)
if self.dim is None:
if self.data_range is None:
# keep track of min and max target values
self.min_target = min(target.min(), self.min_target)
self.max_target = max(target.max(), self.max_target)
self.sum_squared_error += sum_squared_error
self.total += n_obs
else:
self.sum_squared_error.append(sum_squared_error)
self.total.append(n_obs)
def compute(self):
"""
Compute peak signal-to-noise ratio over state.
"""
if self.data_range is not None:
data_range = self.data_range
else:
data_range = self.max_target - self.min_target
if self.dim is None:
sum_squared_error = self.sum_squared_error
total = self.total
else:
sum_squared_error = torch.cat([values.flatten() for values in self.sum_squared_error])
total = torch.cat([values.flatten() for values in self.total])
return _psnr_compute(sum_squared_error, total, data_range, base=self.base, reduction=self.reduction)
| [
"torch.tensor"
] | 1.3.1 | IgorHoholko/metrics | 5510ccd99eaec5ab8175bbd5e2ad9e66e82d10e4 |
1.8 | """
taken from: https://github.com/karpathy/minGPT/
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
import math
import logging
import torch
import torch.nn as nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class GPT2Config(GPTConfig):
""" GPT-2 like network roughly 1.5B params """
# TODO
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
mask = torch.tril(torch.ones(config.block_size,
config.block_size))
if hasattr(config, "n_unmasked"):
mask[:config.n_unmasked, :config.n_unmasked] = 1
self.register_buffer("mask", mask.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(), # nice
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, vocab_size, block_size, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
class DummyGPT(nn.Module):
# for debugging
def __init__(self, add_value=1):
super().__init__()
self.add_value = add_value
def forward(self, idx):
return idx + self.add_value, None
class CodeGPT(nn.Module):
"""Takes in semi-embeddings"""
def __init__(self, vocab_size, block_size, in_channels, n_layer=12, n_head=8, n_embd=256,
embd_pdrop=0., resid_pdrop=0., attn_pdrop=0., n_unmasked=0):
super().__init__()
config = GPTConfig(vocab_size=vocab_size, block_size=block_size,
embd_pdrop=embd_pdrop, resid_pdrop=resid_pdrop, attn_pdrop=attn_pdrop,
n_layer=n_layer, n_head=n_head, n_embd=n_embd,
n_unmasked=n_unmasked)
# input embedding stem
self.tok_emb = nn.Linear(in_channels, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
self.config = config
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, embeddings=None, targets=None):
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
if embeddings is not None: # prepend explicit embeddings
token_embeddings = torch.cat((embeddings, token_embeddings), dim=1)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
#### sampling utils
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in range(steps):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
#### clustering utils
class KMeans(nn.Module):
def __init__(self, ncluster=512, nc=3, niter=10):
super().__init__()
self.ncluster = ncluster
self.nc = nc
self.niter = niter
self.shape = (3,32,32)
self.register_buffer("C", torch.zeros(self.ncluster,nc))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def is_initialized(self):
return self.initialized.item() == 1
@torch.no_grad()
def initialize(self, x):
N, D = x.shape
assert D == self.nc, D
c = x[torch.randperm(N)[:self.ncluster]] # init clusters at random
for i in range(self.niter):
# assign all pixels to the closest codebook element
a = ((x[:, None, :] - c[None, :, :])**2).sum(-1).argmin(1)
# move each codebook element to be the mean of the pixels that assigned to it
c = torch.stack([x[a==k].mean(0) for k in range(self.ncluster)])
# re-assign any poorly positioned codebook elements
nanix = torch.any(torch.isnan(c), dim=1)
ndead = nanix.sum().item()
print('done step %d/%d, re-initialized %d dead clusters' % (i+1, self.niter, ndead))
c[nanix] = x[torch.randperm(N)[:ndead]] # re-init dead clusters
self.C.copy_(c)
self.initialized.fill_(1)
def forward(self, x, reverse=False, shape=None):
if not reverse:
# flatten
bs,c,h,w = x.shape
assert c == self.nc
x = x.reshape(bs,c,h*w,1)
C = self.C.permute(1,0)
C = C.reshape(1,c,1,self.ncluster)
a = ((x-C)**2).sum(1).argmin(-1) # bs, h*w indices
return a
else:
# flatten
bs, HW = x.shape
"""
c = self.C.reshape( 1, self.nc, 1, self.ncluster)
c = c[bs*[0],:,:,:]
c = c[:,:,HW*[0],:]
x = x.reshape(bs, 1, HW, 1)
x = x[:,3*[0],:,:]
x = torch.gather(c, dim=3, index=x)
"""
x = self.C[x]
x = x.permute(0,2,1)
shape = shape if shape is not None else self.shape
x = x.reshape(bs, *shape)
return x
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.LayerNorm",
"torch.cat",
"torch.isnan",
"torch.no_grad",
"torch.randperm",
"torch.ones",
"torch.multinomial",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.nn.GELU",
"torch.nn.Embedding",
"torch.topk"
] | 1.8.1 | adeptflax/image2image | 8c7c531176d261789f90a27125b31d6241bc9c27 |
1.2 | """
Example template for defining a system
"""
import os
from argparse import ArgumentParser
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision.datasets import MNIST
import pytorch_lightning as pl
from pytorch_lightning.root_module.root_module import LightningModule
class LightningTemplateModel(LightningModule):
"""
Sample model to show how to define a template
"""
def __init__(self, hparams):
"""
Pass in parsed HyperOptArgumentParser to the model
:param hparams:
"""
# init superclass
super(LightningTemplateModel, self).__init__()
self.hparams = hparams
self.batch_size = hparams.batch_size
# if you specify an example input, the summary will show input/output for each layer
self.example_input_array = torch.rand(5, 28 * 28)
# build model
self.__build_model()
# ---------------------
# MODEL SETUP
# ---------------------
def __build_model(self):
"""
Layout model
:return:
"""
self.c_d1 = nn.Linear(in_features=self.hparams.in_features,
out_features=self.hparams.hidden_dim)
self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim,
out_features=self.hparams.out_features)
# ---------------------
# TRAINING
# ---------------------
def forward(self, x):
"""
No special modification required for lightning, define as you normally would
:param x:
:return:
"""
x = self.c_d1(x)
x = torch.tanh(x)
x = self.c_d1_bn(x)
x = self.c_d1_drop(x)
x = self.c_d2(x)
logits = F.log_softmax(x, dim=1)
return logits
def loss(self, labels, logits):
nll = F.nll_loss(logits, labels)
return nll
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop
:param batch:
:return:
"""
# forward pass
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
# calculate loss
loss_val = self.loss(y, y_hat)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss_val = loss_val.unsqueeze(0)
tqdm_dict = {'train_loss': loss_val}
output = OrderedDict({
'loss': loss_val,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
# can also return just a scalar instead of a dict (return loss_val)
return output
def validation_step(self, batch, batch_idx):
"""
Lightning calls this inside the validation loop
:param batch:
:return:
"""
x, y = batch
x = x.view(x.size(0), -1)
y_hat = self.forward(x)
loss_val = self.loss(y, y_hat)
# acc
labels_hat = torch.argmax(y_hat, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
val_acc = torch.tensor(val_acc)
if self.on_gpu:
val_acc = val_acc.cuda(loss_val.device.index)
# in DP mode (default) make sure if result is scalar, there's another dim in the beginning
if self.trainer.use_dp or self.trainer.use_ddp2:
loss_val = loss_val.unsqueeze(0)
val_acc = val_acc.unsqueeze(0)
output = OrderedDict({
'val_loss': loss_val,
'val_acc': val_acc,
})
# can also return just a scalar instead of a dict (return loss_val)
return output
def validation_end(self, outputs):
"""
Called at the end of validation to aggregate outputs
:param outputs: list of individual outputs of each validation step
:return:
"""
# if returned a scalar from validation_step, outputs is a list of tensor scalars
# we return just the average in this case (if we want)
# return torch.stack(outputs).mean()
val_loss_mean = 0
val_acc_mean = 0
for output in outputs:
val_loss = output['val_loss']
# reduce manually when using dp
if self.trainer.use_dp:
val_loss = torch.mean(val_loss)
val_loss_mean += val_loss
# reduce manually when using dp
val_acc = output['val_acc']
if self.trainer.use_dp or self.trainer.use_ddp2:
val_acc = torch.mean(val_acc)
val_acc_mean += val_acc
val_loss_mean /= len(outputs)
val_acc_mean /= len(outputs)
tqdm_dict = {'val_loss': val_loss_mean, 'val_acc': val_acc_mean}
result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}
return result
# ---------------------
# TRAINING SETUP
# ---------------------
def configure_optimizers(self):
"""
return whatever optimizers we want here
:return: list of optimizers
"""
optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
return [optimizer], [scheduler]
def __dataloader(self, train):
# init data generators
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root=self.hparams.data_root, train=train,
transform=transform, download=True)
# when using multi-node (ddp) we need to add the datasampler
train_sampler = None
batch_size = self.hparams.batch_size
if self.use_ddp:
train_sampler = DistributedSampler(dataset)
should_shuffle = train_sampler is None
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=should_shuffle,
sampler=train_sampler,
num_workers=0
)
return loader
@pl.data_loader
def train_dataloader(self):
print('training data loader called')
return self.__dataloader(train=True)
@pl.data_loader
def val_dataloader(self):
print('val data loader called')
return self.__dataloader(train=False)
@pl.data_loader
def test_dataloader(self):
print('test data loader called')
return self.__dataloader(train=False)
@staticmethod
def add_model_specific_args(parent_parser, root_dir): # pragma: no cover
"""
Parameters you define here will be available to your model through self.hparams
:param parent_parser:
:param root_dir:
:return:
"""
parser = ArgumentParser(parents=[parent_parser])
# param overwrites
# parser.set_defaults(gradient_clip_val=5.0)
# network params
parser.add_argument('--in_features', default=28 * 28, type=int)
parser.add_argument('--out_features', default=10, type=int)
# use 500 for CPU, 50000 for GPU to see speed difference
parser.add_argument('--hidden_dim', default=50000, type=int)
parser.add_argument('--drop_prob', default=0.2, type=float)
parser.add_argument('--learning_rate', default=0.001, type=float)
# data
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
# training params (opt)
parser.add_argument('--optimizer_name', default='adam', type=str)
parser.add_argument('--batch_size', default=64, type=int)
return parser
| [
"torch.nn.Linear",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.functional.nll_loss",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.functional.log_softmax",
"torch.mean",
"torch.nn.BatchNorm1d",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.tanh",
"torch.argmax",
"torch.sum"
] | 1.2.0 | ryosukehata/pytorch-lightning | a5bd2edefbafa6e03acffd4ba1a8816bbc1682a3 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
from functools import partial
__all__ = [
'ResNet', 'resnet10', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200'
]
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = Variable(torch.cat([out.data, zero_pads], dim=1))
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
sample_size,
sample_duration,
shortcut_type='B',
num_classes=400):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(
3,
64,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], shortcut_type, stride=2)
last_duration = int(math.ceil(sample_duration / 16))
last_size = int(math.ceil(sample_size / 32))
self.avgpool = nn.AvgPool3d(
(last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def get_fine_tuning_parameters(model, ft_portion):
if ft_portion == "complete":
return model.parameters()
elif ft_portion == "last_layer":
ft_module_names = []
ft_module_names.append('classifier')
parameters = []
for k, v in model.named_parameters():
for ft_module in ft_module_names:
if ft_module in k:
parameters.append({'params': v})
break
else:
parameters.append({'params': v, 'lr': 0.0})
return parameters
else:
raise ValueError("Unsupported ft_portion: 'complete' or 'last_layer' expected")
def resnet10(**kwargs):
"""Constructs a ResNet-10 model.
"""
model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)
return model
def resnet18(**kwargs):
"""Constructs a ResNet-18 model.
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model.
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def resnet200(**kwargs):
"""Constructs a ResNet-101 model.
"""
model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model
| [
"torch.nn.Linear",
"torch.nn.functional.avg_pool3d",
"torch.cat",
"torch.nn.Sequential",
"torch.nn.MaxPool3d",
"torch.nn.init.kaiming_normal",
"torch.nn.ReLU",
"torch.nn.Conv3d",
"torch.nn.AvgPool3d",
"torch.nn.BatchNorm3d"
] | 1.7.1 | Faiz99khan/ISL_hand_gesture_recognition_in_real-time | dade99478e9b37440ebe7fb7842d451582132f0a |
1.6 | import configparser
from adapter_entity_typing.network_classes.classifiers import EarlyStoppingWithColdStart
from torch.utils.data.dataloader import DataLoader
from adapter_entity_typing.network import load_model
from collections import defaultdict
import torch
import json
import numpy as np
from tqdm import tqdm
import sys
# parameter_tags = ['bert_ft_2_figer']
parameter_tags = [sys.argv[1]]
config = configparser.ConfigParser()
training_config_file = "result_scripts/generate_predictions_parameters.ini"
config.read("result_scripts/generate_predictions_parameters.ini")
print(list(config.keys()))
config = config[parameter_tags[0]]
sig = torch.nn.Sigmoid()
# model_path = config['ModelRootPath'] + config['ModelName']
# classifier = get_model(model_path)
# max_context_side_size = classifier.configuration('MaxContextSideSize')
# max_entity_size = classifier.configuration('MaxEntitySize')
# train_dataset, dev_dataset, test_dataset, label2id = prepare_entity_typing_datasets(classifier)
# vocab_len = len(id2label)
# add_classifier(model = classifier, labels = label2id)
# model = adapterPLWrapper.load_from_checkpoint(model_path,
# adapterClassifier = classifier,
# id2label = id2label,
# lr = 1e-4)
# model.cuda()
# model.eval()
micros = {
"p": [],
"r": [],
"f1": []}
macros = {
"p": [],
"r": [],
"f1": []}
macro_examples = {
"p": [],
"r": [],
"f1": []}
experiment_name = config['fileName']
performance_file = config['performanceFile'] + experiment_name
prediction_file = config['predictionFile'] + experiment_name
average_std_file = config['AvgStdFile'] + experiment_name
dev_or_test = config['dev_or_test']
if dev_or_test == 'both':
keys = ['dev', 'test']
elif dev_or_test == 'dev':
keys = ['dev']
elif dev_or_test == 'test':
keys = ['test']
else:
raise Exception('please provide a meaningfull value for "dev_or_test"')
macros = {k: {subk: [] for subk in keys} for k, v in macros.items()}
micros = {k: {subk: [] for subk in keys} for k, v in macros.items()}
macro_examples= {k: {subk: [] for subk in keys} for k, v in macros.items()}
for model, _, dev_dataset, test_dataset, label2id in load_model(parameter_tags[0]): # , "results_scripts/generate_preditcions_parameters.ini"):
dev_loader = DataLoader(dev_dataset, batch_size = 100, num_workers=20)
test_loader = DataLoader(test_dataset, batch_size = 100, num_workers=20)
id2label = {v: k for k,v in label2id.items()}
if dev_or_test == 'both':
data_to_pred = ['dev', 'test']
datasets = [dev_loader, test_loader]
dataset_paths = [model.configuration('PathInputDev'), model.configuration('PathInputTest')]
elif dev_or_test == 'dev':
data_to_pred = ['dev']
datasets = [dev_loader]
dataset_paths = [model.configuration('PathInputDev')]
elif dev_or_test == 'test':
data_to_pred = ['test']
datasets = [test_loader]
dataset_paths = [model.configuration('PathInputTest')]
else:
raise Exception('please provide a meaningfull value for "dev_or_test"')
for dataset_id, d in enumerate(data_to_pred):
all_preds = []
all_preds_and_logits = []
all_labels = []
top_k_labels = []
loader = datasets[dataset_id]
for mention, attn, labels in loader:
mention = mention.cuda()
attn = attn.cuda()
preds = sig(model(mention, attn))
batch_preds = []
batch_preds_and_logits = []
batch_top_k_labels = []
for i, pred in enumerate(preds):
mask = pred > .5
ex_preds = []
ex_preds_and_logits = []
pred_ids = mask.nonzero()
no_pred = True
for p in pred_ids:
ex_preds.append(id2label[p.item()])
ex_preds_and_logits.append((id2label[p.item()], round(preds[i][p].item(), 3)))
no_pred = False
# sort logits by pred
topk_values, topk_indexes = torch.topk(pred, k = 5)
top_k_l = []
for val, index in zip(topk_values, topk_indexes):
val = round(val.item(), 3)
lab = id2label[index.item()]
top_k_l.append((lab, val))
if no_pred:
ex_preds.append(top_k_l[0][0])
ex_preds_and_logits.append(top_k_l[0])
sorted_ex_preds_and_logits = sorted(ex_preds_and_logits, key=lambda tup: tup[1], reverse = True)
batch_preds.append(ex_preds)
batch_preds_and_logits.append(sorted_ex_preds_and_logits)
batch_top_k_labels.append(top_k_l)
all_preds.extend(batch_preds)
all_preds_and_logits.extend(batch_preds_and_logits)
top_k_labels.extend(batch_top_k_labels)
mask = labels == 1
batch_labels = []
for m in mask:
ex_labels = []
labels_ids = m.nonzero()
for l in labels_ids:
ex_labels.append(id2label[l.item()])
batch_labels.append(ex_labels)
all_labels.extend(batch_labels)
correct_count = defaultdict(int)
actual_count = defaultdict(int)
predict_count = defaultdict(int)
# compute singular class performances and macro performances
bar = tqdm(desc="computing macro performances", total=len(all_preds))
for labels, preds in zip(all_labels, all_preds):
for pred in preds:
predict_count[pred] += 1
if pred in labels:
correct_count[pred] += 1
for label in labels:
actual_count[label] += 1
bar.update(1)
bar.close()
def compute_f1(p, r):
return (2*p*r)/(p + r) if p + r else 0
precisions = {k: correct_count[k]/predict_count[k] if predict_count[k] else 0 for k in label2id.keys()}
recalls = {k: correct_count[k]/actual_count[k] if actual_count[k] else 0 for k in label2id.keys()}
f1s = {k: compute_f1(precisions[k], recalls[k]) for k in label2id.keys()}
macro_p = np.mean(list(precisions.values()))
macro_r = np.mean(list(recalls.values()))
macro_f1 = compute_f1(macro_p, macro_r)
macros['p'][d].append(macro_p)
macros['r'][d].append(macro_r)
macros['f1'][d].append(macro_f1)
#compute macro_example performances
ma_e_precisions = []
ma_e_recalls = []
n = len(all_labels)
bar = tqdm(desc="computing macro examples performances", total=len(all_preds))
for labels, preds in zip(all_labels, all_preds):
correct_preds = len(set(labels).intersection(set(preds)))
ma_e_precisions.append(correct_preds/len(preds))
ma_e_recalls.append(correct_preds / len(labels))
bar.update(1)
bar.close()
macro_example_p = np.mean(ma_e_precisions)
macro_example_r = np.mean(ma_e_recalls)
macro_example_f1 = compute_f1(macro_example_p, macro_example_r)
macro_examples['p'][d].append(macro_example_p)
macro_examples['r'][d].append(macro_example_r)
macro_examples['f1'][d].append(macro_example_f1)
#compute micro performances
micro_correct_counter = 0
micro_true_counter = 0
micro_pred_counter = 0
bar = tqdm(desc="computing micro performances", total=len(all_preds))
for labels, preds in zip(all_labels, all_preds):
micro_true_counter += len(labels)
micro_pred_counter += len(preds)
correct_preds = len(set(labels).intersection(set(preds)))
micro_correct_counter += correct_preds
bar.update(1)
bar.close()
micro_p = micro_correct_counter/micro_pred_counter
micro_r = micro_correct_counter/micro_true_counter
micro_f1 = compute_f1(micro_p, micro_r)
micros['p'][d].append(micro_p)
micros['r'][d].append(micro_r)
micros['f1'][d].append(micro_f1)
with open(dataset_paths[dataset_id], 'r') as inp:
lines = [json.loads(l) for l in inp.readlines()]
label_sentences = defaultdict(list)
bar = tqdm(desc="generating sentences", total=len(lines))
for l, preds_and_logits, top_k in zip(lines, all_preds_and_logits, top_k_labels):
sentence = ' '.join(l['left_context_token'])
sentence += ' ' + l['mention_span'] + ' '
sentence += ' '.join(l['right_context_token'])
labels = l['y_str']
for lab in labels:
label_sentences[lab].append((sentence, l['mention_span'], preds_and_logits, top_k, labels))
bar.update(1)
bar.close()
ordered_labels = list(sorted(label2id.keys()))
with open(prediction_file + '_' + d + '.txt', 'a') as out:
out.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format('label_#', 'precision',
'recall', 'f1', 'sentence', 'mention',
'preds_and_logits', 'top_k_labels_and_logits', 'true_labels'))
for label in ordered_labels:
i = 0
for sentence, mention, preds_and_logits, top_k, true_label in label_sentences[label]:
out_string = '{}\t{:.4f}\t{:.4f}\t{:.4f}\t{}\t{}\t{}\t{}\t{}\n'.format(label + '_' + str(i + 1),
precisions[label],
recalls[label],
f1s[label],
sentence,
mention,
preds_and_logits,
top_k,
true_label)
out.write(out_string)
i += 1
with open(performance_file + '_' + d + '.txt', 'a') as out:
out.write('{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format('macro_examples_p', 'macro_examples_r', 'macro_examples_f1',
'macro_p','macro_r', 'macro_f1',
'micro_p', 'micro_r', 'micro_f1'))
out.write('{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\t{:.4f}\n'.format(macro_example_p,
macro_example_r,
macro_example_f1,
macro_p,
macro_r,
macro_f1,
micro_p,
micro_r,
micro_f1))
name = {
"p": "precision",
"r": "recall",
"f1": "f1"
}
for d in keys:
results = {}
for result_name, result in zip(["micro", "macro", "example"],
[ micros, macros, macro_examples]):
print(result_name)
print(result)
print()
for k, v in result.items():
v = np.array(v[d])
mu = np.mean(v)
sd = np.std(v)
results["{}_{}".format(result_name, k)] = (mu, sd)
with open(average_std_file + '_'+ d + '.txt', 'a') as out:
# out.write('{:^40}\n'.format('-'))
out.write("model,mu,sd\n")
for k, (m, s) in results.items():
out.write('{},{:.4f},{:.4f}\n'.format(k, m, s))
out.write('\n')
| [
"torch.utils.data.dataloader.DataLoader",
"torch.nn.Sigmoid",
"torch.topk"
] | 1.6.0 | NooneBug/adapter_on_entity_typing | b8d2850dbed47adbf21c9a8021cef69a9b5d60dd |
1.7 | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import tests.test_utils as test_utils
import torch
from VisualBERT.mmf.common.sample import Sample, SampleList
from VisualBERT.mmf.models.mmbt import MMBT
from VisualBERT.mmf.modules.encoders import (
ImageEncoderFactory,
ImageEncoderTypes,
ResNet152ImageEncoder,
TextEncoderFactory,
TextEncoderTypes,
)
from VisualBERT.mmf.utils.build import build_model
from VisualBERT.mmf.utils.configuration import Configuration
from VisualBERT.mmf.utils.env import setup_imports
from omegaconf import OmegaConf
class TestMMBTTorchscript(unittest.TestCase):
def setUp(self):
test_utils.setup_proxy()
setup_imports()
model_name = "mmbt"
args = test_utils.dummy_args(model=model_name)
configuration = Configuration(args)
config = configuration.get_config()
model_config = config.model_config[model_name]
model_config["training_head_type"] = "classification"
model_config["num_labels"] = 2
model_config.model = model_name
self.finetune_model = build_model(model_config)
def test_load_save_finetune_model(self):
self.assertTrue(test_utils.verify_torchscript_models(self.finetune_model))
def test_finetune_model(self):
self.finetune_model.eval()
test_sample = Sample()
test_sample.input_ids = torch.randint(low=0, high=30255, size=(128,)).long()
test_sample.input_mask = torch.ones(128).long()
test_sample.segment_ids = torch.zeros(128).long()
test_sample.image = torch.rand((3, 300, 300)).float()
test_sample_list = SampleList([test_sample.copy()])
with torch.no_grad():
model_output = self.finetune_model.model(test_sample_list)
test_sample_list = SampleList([test_sample])
script_model = torch.jit.script(self.finetune_model.model)
with torch.no_grad():
script_output = script_model(test_sample_list)
self.assertTrue(torch.equal(model_output["scores"], script_output["scores"]))
def test_modal_end_token(self):
self.finetune_model.eval()
# Suppose 0 for <cls>, 1 for <pad> 2 for <sep>
CLS = 0
PAD = 1
SEP = 2
size = 128
input_ids = torch.randint(low=0, high=30255, size=(size,)).long()
input_mask = torch.ones(size).long()
input_ids[0] = CLS
length = torch.randint(low=2, high=size - 1, size=(1,))
input_ids[length] = SEP
input_ids[length + 1 :] = PAD
input_mask[length + 1 :] = 0
test_sample = Sample()
test_sample.input_ids = input_ids.clone()
test_sample.input_mask = input_mask.clone()
test_sample.segment_ids = torch.zeros(size).long()
test_sample.image = torch.rand((3, 300, 300)).float()
test_sample_list = SampleList([test_sample])
mmbt_base = self.finetune_model.model.bert
with torch.no_grad():
actual_modal_end_token = mmbt_base.extract_modal_end_token(test_sample_list)
expected_modal_end_token = torch.zeros([1]).fill_(SEP).long()
self.assertTrue(torch.equal(actual_modal_end_token, expected_modal_end_token))
self.assertTrue(torch.equal(test_sample_list.input_ids[0, :-1], input_ids[1:]))
self.assertTrue(
torch.equal(test_sample_list.input_mask[0, :-1], input_mask[1:])
)
class TestMMBTConfig(unittest.TestCase):
def test_mmbt_from_params(self):
# default init
mmbt = MMBT.from_params(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
config = OmegaConf.structured(
MMBT.Config(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
)
self.assertIsNotNone(mmbt)
# Make sure that the config is created from MMBT.Config
self.assertEqual(mmbt.config, config)
def test_mmbt_pretrained(self):
test_utils.setup_proxy()
mmbt = MMBT.from_params()
self.assertIsNotNone(mmbt)
def test_mmbt_directly_from_config(self):
config = OmegaConf.structured(
MMBT.Config(
modal_encoder=ImageEncoderFactory.Config(
type=ImageEncoderTypes.resnet152,
params=ResNet152ImageEncoder.Config(pretrained=False),
),
text_encoder=TextEncoderFactory.Config(type=TextEncoderTypes.identity),
)
)
mmbt = MMBT(config)
self.assertIsNotNone(mmbt)
# Make sure that the config is created from MMBT.Config
self.assertEqual(mmbt.config, config)
| [
"torch.zeros",
"torch.rand",
"torch.no_grad",
"torch.ones",
"torch.randint",
"torch.jit.script",
"torch.equal"
] | 1.7.0 | Fostereee/Transformer-MM-Explainability | 6dc4925b83a38e39069369da599b11d548128eb5 |
1.7 | # Copyright (c) Facebook, Inc. and its affiliates.
"""
Text utils module contains implementations for various decoding strategies like
Greedy, Beam Search and Nucleus Sampling.
In your model's config you can specify ``inference`` attribute to use these strategies
in the following way:
.. code::
model_config:
some_model:
inference:
- type: greedy
- params: {}
"""
import os
import re
from collections import Counter
from itertools import chain
import torch
from VisualBERT.mmf.common.registry import registry
from VisualBERT.mmf.utils.file_io import PathManager
from VisualBERT.mmf.utils.general import get_absolute_path
SENTENCE_SPLIT_REGEX = re.compile(r"(\W+)")
def generate_ngrams(tokens, n=1):
"""Generate ngrams for particular 'n' from a list of tokens
Args:
tokens (List[str]): List of tokens for which the ngram are to be generated
n (int, optional): n for which ngrams are to be generated. Defaults to 1.
Returns:
List[str]: List of ngrams generated.
"""
shifted_tokens = (tokens[i:] for i in range(n))
tuple_ngrams = zip(*shifted_tokens)
return (" ".join(i) for i in tuple_ngrams)
def generate_ngrams_range(tokens, ngram_range=(1, 3)):
"""Generates and returns a list of ngrams for all n present in ngram_range
Args:
tokens (List[str]): List of string tokens for which ngram are to be generated
ngram_range (List[int], optional): List of 'n' for which ngrams are to be
generated. For e.g. if ngram_range = (1, 4) then it will returns
1grams, 2grams and 3grams. Defaults to (1, 3).
Returns:
List[str]: List of ngrams for each n in ngram_range
"""
assert len(ngram_range) == 2, (
"'ngram_range' should be a tuple" " of two elements which is range of numbers"
)
return chain(*(generate_ngrams(tokens, i) for i in range(*ngram_range)))
def tokenize(sentence, regex=SENTENCE_SPLIT_REGEX, keep=None, remove=None):
if keep is None:
keep = ["'s"]
if remove is None:
remove = [",", "?"]
sentence = sentence.lower()
for token in keep:
sentence = sentence.replace(token, " " + token)
for token in remove:
sentence = sentence.replace(token, "")
tokens = regex.split(sentence)
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def word_tokenize(word, remove=None):
if remove is None:
remove = [",", "?"]
word = word.lower()
for item in remove:
word = word.replace(item, "")
word = word.replace("'s", " 's")
return word.strip()
def load_str_list(fname):
with PathManager.open(fname) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
return lines
class VocabDict:
UNK_TOKEN = "<unk>"
PAD_TOKEN = "<pad>"
START_TOKEN = "<s>"
END_TOKEN = "</s>"
PAD_INDEX = 0
SOS_INDEX = 1
EOS_INDEX = 2
UNK_INDEX = 3
def __init__(self, vocab_file, data_dir=None):
if not os.path.isabs(vocab_file) and data_dir is not None:
vocab_file = get_absolute_path(os.path.join(data_dir, vocab_file))
if not PathManager.exists(vocab_file):
raise RuntimeError(f"Vocab file {vocab_file} for vocab dict doesn't exist")
self.word_list = load_str_list(vocab_file)
self._build()
def _build(self):
if self.UNK_TOKEN not in self.word_list:
self.word_list = [self.UNK_TOKEN] + self.word_list
self.word2idx_dict = {w: n_w for n_w, w in enumerate(self.word_list)}
# String (word) to integer (index) dict mapping
self.stoi = self.word2idx_dict
# Integer to string (word) reverse mapping
self.itos = self.word_list
self.num_vocab = len(self.word_list)
self.UNK_INDEX = (
self.word2idx_dict[self.UNK_TOKEN]
if self.UNK_TOKEN in self.word2idx_dict
else None
)
self.PAD_INDEX = (
self.word2idx_dict[self.PAD_TOKEN]
if self.PAD_TOKEN in self.word2idx_dict
else None
)
def idx2word(self, n_w):
return self.word_list[n_w]
def __len__(self):
return len(self.word_list)
def get_size(self):
return len(self.word_list)
def get_unk_index(self):
return self.UNK_INDEX
def get_unk_token(self):
return self.UNK_TOKEN
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_INDEX is not None:
return self.UNK_INDEX
else:
raise ValueError(
"word %s not in dictionary \
(while dictionary does not contain <unk>)"
% w
)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
class VocabFromText(VocabDict):
DEFAULT_TOKENS = [
VocabDict.PAD_TOKEN,
VocabDict.UNK_TOKEN,
VocabDict.START_TOKEN,
VocabDict.END_TOKEN,
]
def __init__(
self,
sentences,
min_count=1,
regex=SENTENCE_SPLIT_REGEX,
keep=None,
remove=None,
only_unk_extra=False,
):
if keep is None:
keep = []
if remove is None:
remove = []
token_counter = Counter()
for sentence in sentences:
tokens = tokenize(sentence, regex=regex, keep=keep, remove=remove)
token_counter.update(tokens)
token_list = []
for token in token_counter:
if token_counter[token] >= min_count:
token_list.append(token)
extras = self.DEFAULT_TOKENS
if only_unk_extra:
extras = [self.UNK_TOKEN]
self.word_list = extras + token_list
self._build()
class TextDecoder:
"""Base class to be inherited by all decoding strategies. Contains
implementations that are common for all strategies.
Args:
vocab (list): Collection of all words in vocabulary.
"""
def __init__(self, vocab):
self._vocab = vocab
self._vocab_size = vocab.get_size()
# Lists to store completed sequences and scores
self._complete_seqs = []
self._complete_seqs_scores = []
def init_batch(self, sample_list):
img_size = sample_list.image_feature_0.size()
self._batch_size, feature_size_1, feature_size_2 = img_size
t_batch_size = self._batch_size * self._decode_size
self.seqs = sample_list.answers.new_full(
(t_batch_size, 1), self._vocab.SOS_INDEX, dtype=torch.long
)
sample_list.image_feature_0 = (
sample_list.image_feature_0.unsqueeze(1)
.expand(-1, self._decode_size, -1, -1)
.reshape(t_batch_size, feature_size_1, feature_size_2)
)
self.sample_list = sample_list
return sample_list
def add_next_word(self, seqs, prev_word_inds, next_word_inds):
return torch.cat([seqs[prev_word_inds], next_word_inds.unsqueeze(1)], dim=1)
def find_complete_inds(self, next_word_inds):
incomplete_inds = []
for ind, next_word in enumerate(next_word_inds):
if next_word != self._vocab.EOS_INDEX:
incomplete_inds.append(ind)
complete_inds = list(set(range(len(next_word_inds))) - set(incomplete_inds))
return complete_inds, incomplete_inds
def update_data(self, data, prev_word_inds, next_word_inds, incomplete_inds):
data["texts"] = next_word_inds[incomplete_inds].unsqueeze(1)
h1 = data["state"]["td_hidden"][0][prev_word_inds[incomplete_inds]]
c1 = data["state"]["td_hidden"][1][prev_word_inds[incomplete_inds]]
h2 = data["state"]["lm_hidden"][0][prev_word_inds[incomplete_inds]]
c2 = data["state"]["lm_hidden"][1][prev_word_inds[incomplete_inds]]
data["state"] = {"td_hidden": (h1, c1), "lm_hidden": (h2, c2)}
return data
@registry.register_decoder("beam_search")
class BeamSearch(TextDecoder):
def __init__(self, vocab, config):
super().__init__(vocab)
self._decode_size = config["inference"]["params"]["beam_length"]
def init_batch(self, sample_list):
self.sample_list = super().init_batch(sample_list)
# initialize with t_batch_size = _batch_size * _decode_size
self.top_k_scores = sample_list.answers.new_zeros(
(self._batch_size * self._decode_size, 1), dtype=torch.float
)
# maintain _decode_size, _complete_seqs and _complete_seqs_scores
# for each example in a batch.
self._decode_sizes = [self._decode_size] * self._batch_size
for _ in range(self._batch_size):
self._complete_seqs.append([])
self._complete_seqs_scores.append([])
return self.sample_list
def decode(self, t, data, scores):
# Add predicted scores to top_k_scores
scores = torch.nn.functional.log_softmax(scores, dim=1)
scores = self.top_k_scores.expand_as(scores) + scores
# Find next top k scores and words. We flatten the scores tensor here
# and get the top_k_scores and their indices top_k_words
top_k_scores, top_k_words = [], []
ex_start = 0
for decode_size in self._decode_sizes:
ex_end = ex_start + decode_size
if t == 0:
top_k_score, top_k_word = scores[ex_start].topk(
decode_size, 0, True, True
)
else:
top_k_score, top_k_word = (
scores[ex_start:ex_end].view(-1).topk(decode_size, 0, True, True)
)
top_k_scores.extend(top_k_score)
top_k_words.append(top_k_word)
ex_start = ex_end
self.top_k_scores = torch.stack(top_k_scores)
# Convert to vocab indices. top_k_words contain indices from a flattened
# k x vocab_size tensor. To get prev_word_indices we divide top_k_words
# by vocab_size to determine which index in the beam among k generated
# the next top_k_word. To get next_word_indices we take top_k_words
# modulo vocab_size index. For example :
# vocab_size : 9491
# top_k_words : [610, 7, 19592, 9529, 292]
# prev_word_ind : [0, 0, 2, 1, 0]
# next_word_ind : [610, 7, 610, 38, 292]
# further, shift the prev_word_ind by ex_start to find corresponding example
# within a batch.
ex_start = 0
prev_word_inds, next_word_inds = [], []
for ex_idx, decode_size in enumerate(self._decode_sizes):
prev_word_inds.extend((top_k_words[ex_idx] // self._vocab_size) + ex_start)
next_word_inds.extend(top_k_words[ex_idx] % self._vocab_size)
ex_start += decode_size
prev_word_inds = torch.stack(prev_word_inds)
next_word_inds = torch.stack(next_word_inds)
# Add new words to sequences
self.seqs = self.add_next_word(self.seqs, prev_word_inds, next_word_inds)
# Find completed sequences
complete_inds, incomplete_inds = self.find_complete_inds(next_word_inds)
# Add to completed sequences and Reduce beam length
ex_start = 0
for ex_idx, decode_size in enumerate(self._decode_sizes):
for beam_idx in range(ex_start, ex_start + decode_size):
if beam_idx in complete_inds:
top_k_score = self.top_k_scores[beam_idx]
self._complete_seqs[ex_idx].append(self.seqs[beam_idx].tolist())
self._complete_seqs_scores[ex_idx].append(top_k_score)
self._decode_sizes[ex_idx] -= 1
ex_start += decode_size
# Proceed with incomplete sequences
if sum(self._decode_sizes) == 0:
return True, data, 0
self.seqs = self.seqs[incomplete_inds]
self.top_k_scores = self.top_k_scores[incomplete_inds].unsqueeze(1)
# TODO: Make the data update generic for any type of model
# This is specific to BUTD model only.
image_feature_0 = self.sample_list.image_feature_0
self.sample_list.image_feature_0 = image_feature_0[incomplete_inds]
data = self.update_data(data, prev_word_inds, next_word_inds, incomplete_inds)
next_beam_length = len(prev_word_inds[incomplete_inds])
return False, data, next_beam_length
def get_result(self):
captions = []
max_len = 0
for ex_idx in range(len(self._complete_seqs_scores)):
if len(self._complete_seqs_scores[ex_idx]) == 0:
captions.append([0] * 5)
max_len = max(5, max_len)
else:
max_score = max(self._complete_seqs_scores[ex_idx])
max_idx = self._complete_seqs_scores[ex_idx].index(max_score)
captions.append(self._complete_seqs[ex_idx][max_idx])
max_len = max(max_len, len(captions[-1]))
for ex_idx in range(len(captions)):
padded_tokens = [self._vocab.PAD_INDEX] * (max_len - len(captions[ex_idx]))
captions[ex_idx].extend(padded_tokens)
return torch.FloatTensor(captions)
@registry.register_decoder("nucleus_sampling")
class NucleusSampling(TextDecoder):
"""Nucleus Sampling is a new text decoding strategy that avoids likelihood
maximization. Rather, it works by sampling from the smallest set of top
tokens which have a cumulative probability greater than a specified
threshold.
Present text decoding strategies like beam search do not work well on open-ended
generation tasks (even on strong language models like GPT-2). They tend to repeat
text a lot and the main reason behind it is that they try to maximize likelihood,
which is a contrast from human-generated text which has a mix of high and low
probability tokens.
Nucleus Sampling is a stochastic approach and resolves this issue. Moreover,
it improves upon other stochastic methods like top-k sampling by choosing the
right amount of tokens to sample from. The overall result is better text
generation on the same language model.
Link to the paper introducing Nucleus Sampling (Section 6) -
https://arxiv.org/pdf/1904.09751.pdf
Args:
vocab (list): Collection of all words in vocabulary.
sum_threshold (float): Ceiling of sum of probabilities of tokens to
sample from.
"""
def __init__(self, vocab, config):
super().__init__(vocab)
self._decode_size = 1
# Threshold for sum of probability
self._threshold = config["inference"]["params"]["sum_threshold"]
def decode(self, t, data, scores):
# Convert scores to probabilities
scores = torch.nn.functional.softmax(scores, dim=1)
# Sort scores in descending order and then select the top m elements having
# sum more than threshold.
# We get the top_m_scores and their indices top_m_words
if t == 0:
top_m_scores, top_m_words = scores[0].sort(0, True)
else:
top_m_scores, top_m_words = scores.view(-1).sort(0, True)
last_index = 0
score_sum = 0
for score in top_m_scores:
last_index += 1
score_sum += score
if score_sum >= self._threshold:
break
top_m_scores = torch.div(top_m_scores[:last_index], score_sum)
top_m_words = top_m_words[:last_index]
# Zero value inside prev_word_inds because we are predicting a single
# stream of output.
prev_word_ind = torch.tensor([0])
# Get next word based on probabilities of top m words.
next_word_ind = top_m_words[torch.multinomial(top_m_scores, 1)]
# Add next word to sequence
self.seqs = self.add_next_word(self.seqs, prev_word_ind, next_word_ind)
# Check if sequence is complete
complete_inds, incomplete_inds = self.find_complete_inds(next_word_ind)
# If sequence is complete then return
if len(complete_inds) > 0:
self._complete_seqs.extend(self.seqs[complete_inds].tolist())
return True, data, 0
self.seqs = self.seqs[incomplete_inds]
data = self.update_data(data, prev_word_ind, next_word_ind, incomplete_inds)
return False, data, 1
def get_result(self):
if len(self._complete_seqs) == 0:
captions = torch.FloatTensor([0] * 5).unsqueeze(0)
else:
captions = torch.FloatTensor(self._complete_seqs[0]).unsqueeze(0)
return captions
| [
"torch.stack",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.multinomial",
"torch.tensor",
"torch.nn.functional.softmax",
"torch.div"
] | 1.7.0 | Fostereee/Transformer-MM-Explainability | 6dc4925b83a38e39069369da599b11d548128eb5 |
1.2 | """
``KnowledgeGraphField`` is a ``Field`` which stores a knowledge graph representation.
"""
from typing import Callable, Dict, List, Set
from collections import defaultdict
import editdistance
from overrides import overrides
import torch
from allennlp.common import util
from allennlp.common.checks import ConfigurationError
from allennlp.data.fields.field import Field
from allennlp.data.token_indexers.token_indexer import TokenIndexer, TokenType
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from allennlp.data.tokenizers.token import Token
from allennlp.data.tokenizers import Tokenizer, WordTokenizer
from allennlp.data.vocabulary import Vocabulary
from allennlp.nn import util as nn_util
from allennlp.semparse.contexts.knowledge_graph import KnowledgeGraph
TokenList = List[TokenType]
class KnowledgeGraphField(Field[Dict[str, torch.Tensor]]):
"""
A ``KnowledgeGraphField`` represents a ``KnowledgeGraph`` as a ``Field`` that can be used in a
``Model``. For each entity in the graph, we output two things: a text representation of the
entity, handled identically to a ``TextField``, and a list of linking features for each token
in some input utterance.
The output of this field is a dictionary::
{
"text": Dict[str, torch.Tensor], # each tensor has shape (batch_size, num_entities, num_entity_tokens)
"linking": torch.Tensor # shape (batch_size, num_entities, num_utterance_tokens, num_features)
}
The ``text`` component of this dictionary is suitable to be passed into a
``TextFieldEmbedder`` (which handles the additional ``num_entities`` dimension without any
issues). The ``linking`` component of the dictionary can be used however you want to decide
which tokens in the utterance correspond to which entities in the knowledge graph.
In order to create the ``text`` component, we use the same dictionary of ``TokenIndexers``
that's used in a ``TextField`` (as we're just representing the text corresponding to each
entity). For the ``linking`` component, we use a set of hard-coded feature extractors that
operate between the text corresponding to each entity and each token in the utterance.
Parameters
----------
knowledge_graph : ``KnowledgeGraph``
The knowledge graph that this field stores.
utterance_tokens : ``List[Token]``
The tokens in some utterance that is paired with the ``KnowledgeGraph``. We compute a set
of features for linking tokens in the utterance to entities in the graph.
tokenizer : ``Tokenizer``, optional (default=``WordTokenizer()``)
We'll use this ``Tokenizer`` to tokenize the text representation of each entity.
token_indexers : ``Dict[str, TokenIndexer]``
Token indexers that convert entities into arrays, similar to how text tokens are treated in
a ``TextField``. These might operate on the name of the entity itself, its type, its
neighbors in the graph, etc.
feature_extractors : ``List[str]``, optional
Names of feature extractors to use for computing linking features. These must be
attributes of this object, without the first underscore. The feature extraction functions
are listed as the last methods in this class. For example, to use
:func:`_exact_token_match`, you would pass the string ``exact_token_match``. We will add
an underscore and look for a function matching that name. If this list is omitted, we will
use all available feature functions.
entity_tokens : ``List[List[Token]]``, optional
If you have pre-computed the tokenization of the table text, you can pass it in here. The
must be a list of the tokens in the entity text, for each entity in the knowledge graph, in
the same order in which the knowledge graph returns entities.
linking_features : ``List[List[List[float]]]``, optional
If you have pre-computed the linking features between the utterance and the table text, you
can pass it in here.
include_in_vocab : ``bool``, optional (default=True)
If this is ``False``, we will skip the ``count_vocab_items`` logic, leaving out all table
entity text from the vocabulary computation. You might want to do this if you have a lot
of rare entities in your tables, and you see the same table in multiple training instances,
so your vocabulary counts get skewed and include too many rare entities.
max_table_tokens : ``int``, optional
If given, we will only keep this number of total table tokens. This bounds the memory
usage of the table representations, truncating cells with really long text. We specify a
total number of tokens, not a max cell text length, because the number of table entities
varies.
"""
def __init__(
self,
knowledge_graph: KnowledgeGraph,
utterance_tokens: List[Token],
token_indexers: Dict[str, TokenIndexer],
tokenizer: Tokenizer = None,
feature_extractors: List[str] = None,
entity_tokens: List[List[Token]] = None,
linking_features: List[List[List[float]]] = None,
include_in_vocab: bool = True,
max_table_tokens: int = None,
) -> None:
self.knowledge_graph = knowledge_graph
self._tokenizer = tokenizer or WordTokenizer(word_splitter=SpacyWordSplitter(pos_tags=True))
if not entity_tokens:
entity_texts = [
knowledge_graph.entity_text[entity].lower() for entity in knowledge_graph.entities
]
# TODO(mattg): Because we do tagging on each of these entities in addition to just
# tokenizations, this is quite slow, and about half of our data processing time just
# goes to this (~15 minutes when there are 7k instances). The reason we do tagging is
# so that we can add lemma features. If we can remove the need for lemma / other
# hand-written features, like with a CNN, we can cut down our data processing time by a
# factor of 2.
self.entity_texts = self._tokenizer.batch_tokenize(entity_texts)
else:
self.entity_texts = entity_tokens
self.utterance_tokens = utterance_tokens
self._token_indexers: Dict[str, TokenIndexer] = token_indexers
self._include_in_vocab = include_in_vocab
self._indexed_entity_texts: Dict[str, TokenList] = None
self._max_table_tokens = max_table_tokens
feature_extractors = (
feature_extractors
if feature_extractors is not None
else [
"number_token_match",
"exact_token_match",
"contains_exact_token_match",
"lemma_match",
"contains_lemma_match",
"edit_distance",
"related_column",
"related_column_lemma",
"span_overlap_fraction",
"span_lemma_overlap_fraction",
]
)
self._feature_extractors: List[
Callable[[str, List[Token], Token, int, List[Token]], float]
] = []
for feature_extractor_name in feature_extractors:
extractor = getattr(self, "_" + feature_extractor_name, None)
if not extractor:
raise ConfigurationError(
f"Invalid feature extractor name: {feature_extractor_name}"
)
self._feature_extractors.append(extractor)
if not linking_features:
# For quicker lookups in our feature functions, we'll additionally store some
# dictionaries that map entity strings to useful information about the entity.
self._entity_text_map: Dict[str, List[Token]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_map[entity] = entity_text
self._entity_text_exact_text: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_exact_text[entity] = set(e.text for e in entity_text)
self._entity_text_lemmas: Dict[str, Set[str]] = {}
for entity, entity_text in zip(knowledge_graph.entities, self.entity_texts):
self._entity_text_lemmas[entity] = set(e.lemma_ for e in entity_text)
self.linking_features = self._compute_linking_features()
else:
self.linking_features = linking_features
@overrides
def count_vocab_items(self, counter: Dict[str, Dict[str, int]]):
if self._include_in_vocab:
for indexer in self._token_indexers.values():
for entity_text in self.entity_texts:
for token in entity_text:
indexer.count_vocab_items(token, counter)
@overrides
def index(self, vocab: Vocabulary):
self._indexed_entity_texts = {}
for indexer_name, indexer in self._token_indexers.items():
indexer_arrays: Dict[str, List] = defaultdict(list)
for entity_text in self.entity_texts:
for index_name, indexed in indexer.tokens_to_indices(
entity_text, vocab, indexer_name
).items():
indexer_arrays[index_name].append(indexed)
self._indexed_entity_texts.update(indexer_arrays)
@overrides
def get_padding_lengths(self) -> Dict[str, int]:
num_entities = len(self.entity_texts)
num_entity_tokens = max(len(entity_text) for entity_text in self.entity_texts)
if self._max_table_tokens:
# This truncates the number of entity tokens used, enabling larger tables (either in
# the number of entities in the table, or the number of tokens per entity) to fit in
# memory, particularly when using ELMo.
if num_entities * num_entity_tokens > self._max_table_tokens:
num_entity_tokens = int(self._max_table_tokens / num_entities)
padding_lengths = {
"num_entities": num_entities,
"num_utterance_tokens": len(self.utterance_tokens),
}
padding_lengths["num_entity_tokens"] = num_entity_tokens
lengths = []
assert self._indexed_entity_texts is not None, (
"This field is not indexed yet. Call "
".index(vocab) before determining padding "
"lengths."
)
for indexer_name, indexer in self._token_indexers.items():
indexer_lengths = {}
# This is a list of dicts, one for each token in the field.
entity_lengths = [
indexer.get_padding_lengths(token)
for entity_text in self._indexed_entity_texts[indexer_name]
for token in entity_text
]
# Iterate over the keys in the first element of the list. This is fine as for a given
# indexer, all entities will return the same keys, so we can just use the first one.
for key in entity_lengths[0].keys():
indexer_lengths[key] = max(x.get(key, 0) for x in entity_lengths)
lengths.append(indexer_lengths)
# Get all the keys which have been used for padding.
padding_keys = {key for d in lengths for key in d.keys()}
for padding_key in padding_keys:
padding_lengths[padding_key] = max(x.get(padding_key, 0) for x in lengths)
return padding_lengths
@overrides
def as_tensor(self, padding_lengths: Dict[str, int]) -> Dict[str, torch.Tensor]:
tensors = {}
desired_num_entities = padding_lengths["num_entities"]
desired_num_entity_tokens = padding_lengths["num_entity_tokens"]
desired_num_utterance_tokens = padding_lengths["num_utterance_tokens"]
for indexer_name, indexer in self._token_indexers.items():
padded_entities = util.pad_sequence_to_length(
self._indexed_entity_texts[indexer_name],
desired_num_entities,
default_value=lambda: [],
)
padded_tensors = []
for padded_entity in padded_entities:
padded_tensor = indexer.as_padded_tensor(
{"key": padded_entity}, {"key": desired_num_entity_tokens}, padding_lengths
)["key"]
padded_tensors.append(padded_tensor)
tensor = torch.stack(padded_tensors)
tensors[indexer_name] = tensor
padded_linking_features = util.pad_sequence_to_length(
self.linking_features, desired_num_entities, default_value=lambda: []
)
padded_linking_arrays = []
def default_feature_value():
return [0.0] * len(self._feature_extractors)
for linking_features in padded_linking_features:
padded_features = util.pad_sequence_to_length(
linking_features, desired_num_utterance_tokens, default_value=default_feature_value
)
padded_linking_arrays.append(padded_features)
linking_features_tensor = torch.FloatTensor(padded_linking_arrays)
return {"text": tensors, "linking": linking_features_tensor}
def _compute_linking_features(self) -> List[List[List[float]]]:
linking_features = []
for entity, entity_text in zip(self.knowledge_graph.entities, self.entity_texts):
entity_features = []
for token_index, token in enumerate(self.utterance_tokens):
token_features = []
for feature_extractor in self._feature_extractors:
token_features.append(
feature_extractor(
entity, entity_text, token, token_index, self.utterance_tokens
)
)
entity_features.append(token_features)
linking_features.append(entity_features)
return linking_features
@overrides
def empty_field(self) -> "KnowledgeGraphField":
return KnowledgeGraphField(KnowledgeGraph(set(), {}), [], self._token_indexers)
@overrides
def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
batched_text = nn_util.batch_tensor_dicts(
tensor["text"] for tensor in tensor_list # type: ignore
)
batched_linking = torch.stack([tensor["linking"] for tensor in tensor_list])
return {"text": batched_text, "linking": batched_linking}
# Below here we have feature extractor functions. To keep a consistent API for easy logic
# above, some of these functions have unused arguments.
# These feature extractors are generally pretty specific to the logical form language and
# problem setting in WikiTableQuestions. This whole notion of feature extraction should
# eventually be made more general (or just removed, if we can replace it with CNN features...).
# For the feature functions used in the original parser written in PNP, see here:
# https://github.com/allenai/pnp/blob/wikitables2/src/main/scala/org/allenai/wikitables/SemanticParserFeatureGenerator.scala
# One notable difference between how the features work here and how they worked in PNP is that
# we're using the table text when computing string matches, while PNP used the _entity name_.
# It turns out that the entity name is derived from the table text, so this should be roughly
# equivalent, except in the case of some numbers. If there are cells with different text that
# normalize to the same name, you could get `_2` or similar appended to the name, so the way we
# do it here should just be better. But it's a possible minor source of variation from the
# original parser.
# Another difference between these features and the PNP features is that the span overlap used
# a weighting scheme to downweight matches on frequent words (like "the"), and the lemma
# overlap feature value was calculated a little differently. I'm guessing that doesn't make a
# huge difference...
def _number_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# PNP had a "spanFeatures" function that said whether an entity was a-priori known to link
# to a token or set of tokens in the question. This was only used for numbers, and it's
# not totally clear to me how this number feature overlapped with the token match features
# in the original implementation (I think in most cases it was the same, except for things
# like "four million", because the token match is derived from the entity name, which would
# be 4000000, and wouldn't match "four million").
#
# Our implementation basically just adds a duplicate token match feature that's specific to
# numbers. It'll break in some rare cases (e.g., "Which four had four million ..."), but
# those shouldn't be a big deal.
if ":" in entity:
# This check works because numbers are the only entities that don't contain ":". All
# others in both WikiTables languages do (e.g.: fb:row.row.column_name,
# date_column:year, string:usl_a_league etc.).
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _exact_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_exact_token_match(entity, entity_text, token, token_index, tokens)
def _contains_exact_token_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
return 0.0
def _lemma_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if len(entity_text) != 1:
return 0.0
return self._contains_lemma_match(entity, entity_text, token, token_index, tokens)
def _contains_lemma_match(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
if token.text in self._entity_text_exact_text[entity]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[entity]:
return 1.0
return 0.0
def _edit_distance(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
edit_distance = float(editdistance.eval(" ".join(e.text for e in entity_text), token.text))
return 1.0 - edit_distance / len(token.text)
def _related_column(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith("fb:row.row") and "_column:" not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
return 0.0
def _related_column_lemma(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
# Check if the entity is a column name in one of the two WikiTables languages.
if not entity.startswith("fb:row.row") and "_column:" not in entity:
return 0.0
for neighbor in self.knowledge_graph.neighbors[entity]:
if token.text in self._entity_text_exact_text[neighbor]:
return 1.0
if token.lemma_ in self._entity_text_lemmas[neighbor]:
return 1.0
return 0.0
def _span_overlap_fraction(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
entity_words = set(entity_token.text for entity_token in entity_text)
if not entity_words:
# Some tables have empty cells.
return 0
seen_entity_words = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].text in entity_words:
seen_entity_words.add(tokens[token_index].text)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].text in entity_words:
seen_entity_words.add(tokens[token_index_left].text)
token_index_left -= 1
return len(seen_entity_words) / len(entity_words)
def _span_lemma_overlap_fraction(
self,
entity: str,
entity_text: List[Token],
token: Token,
token_index: int,
tokens: List[Token],
) -> float:
entity_lemmas = set(entity_token.lemma_ for entity_token in entity_text)
if not entity_lemmas:
# Some tables have empty cells.
return 0
seen_entity_lemmas = set()
token_index_left = token_index
while token_index < len(tokens) and tokens[token_index].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index].lemma_)
token_index += 1
while token_index_left >= 0 and tokens[token_index_left].lemma_ in entity_lemmas:
seen_entity_lemmas.add(tokens[token_index_left].lemma_)
token_index_left -= 1
return len(seen_entity_lemmas) / len(entity_lemmas)
| [
"torch.FloatTensor",
"torch.stack"
] | 1.2.0 | entslscheia/allennlp | eeba62e34c8e211ed5963f830528c957f178607b |
0.4 | import torch
import torch.nn as nn
from torch.optim import Adam
from torch.utils.data import DataLoader
from ..model import BERTLM, BERT
from .optim_schedule import ScheduledOptim
import tqdm
import numpy
def hook(arr, l, iNo):
def trace(module, input, output):
if iNo:
cid = input[0].get_device()
arr[l][cid].append(input[0].cpu().detach().numpy())
print(input[0].shape)
else:
cid = output[0].get_device()
arr[l][cid].append(output[0].cpu().detach().numpy())
print(output[0].shape)
return trace
class BERTTrainer:
"""
BERTTrainer make the pretrained BERT model with two LM training method.
1. Masked Language Model : 3.3.1 Task #1: Masked LM
2. Next Sentence prediction : 3.3.2 Task #2: Next Sentence Prediction
please check the details on README.md with simple example.
"""
def __init__(self, bert: BERT, vocab_size: int,
train_dataloader: DataLoader, test_dataloader: DataLoader = None,
lr: float = 1e-4, betas=(0.9, 0.999), weight_decay: float = 0.01, warmup_steps=10000,
with_cuda: bool = True, cuda_devices=None, log_freq: int = 10):
"""
:param bert: BERT model which you want to train
:param vocab_size: total word vocab size
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
:param with_cuda: traning with cuda
:param log_freq: logging frequency of the batch iteration
"""
# Setup cuda device for BERT training, argument -c, --cuda should be true
cuda_condition = torch.cuda.is_available() and with_cuda
self.device = torch.device("cuda:0" if cuda_condition else "cpu")
# This BERT model will be saved every epoch
self.bert = bert
# Initialize the BERT Language Model, with BERT model
self.model = BERTLM(bert, vocab_size).to(self.device)
# Distributed GPU training if CUDA can detect more than 1 GPU
if with_cuda and torch.cuda.device_count() > 1:
print("Using %d GPUS for BERT" % torch.cuda.device_count())
self.model = nn.DataParallel(self.model, device_ids=cuda_devices)
# Setting the train and test data loader
self.train_data = train_dataloader
self.test_data = test_dataloader
# Setting the Adam optimizer with hyper-param
self.optim = Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
self.optim_schedule = ScheduledOptim(self.optim, self.bert.hidden, n_warmup_steps=warmup_steps)
# Using Negative Log Likelihood Loss function for predicting the masked_token
self.criterion = nn.NLLLoss(ignore_index=0)
self.log_freq = log_freq
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self, epoch, output_path):
self.iteration(epoch, self.train_data, output_path)
def test(self, epoch):
self.iteration(epoch, self.test_data, None, train=False)
def iteration(self, epoch, data_loader, output_path, train=True):
"""
loop over the data_loader for training or testing
if on train status, backward operation is activated
and also auto save the model every peoch
:param epoch: current epoch index
:param data_loader: torch.utils.data.DataLoader for iteration
:param train: boolean value of is train or test
:return: None
"""
str_code = "train" if train else "test"
# Setting the tqdm progress bar
data_iter = tqdm.tqdm(enumerate(data_loader),
desc="EP_%s:%d" % (str_code, epoch),
total=len(data_loader),
bar_format="{l_bar}{r_bar}")
avg_loss = 0.0
total_correct = 0
total_element = 0
if output_path:
handles = []
ls = range(len(self.bert.transformer_blocks))
cs = range(torch.cuda.device_count())
arrs = [[[[] for c in cs] for l in ls],
[[[] for c in cs] for l in ls]]
for l, layer in enumerate(self.bert.transformer_blocks):
handles.append(layer.register_forward_hook(hook(arrs[0], l, True)))
handles.append(layer.register_full_backward_hook(hook(arrs[1], l, True)))
# handles.append(layer.register_forward_hook(hook(arrs[0], False)))
for i, data in data_iter:
if output_path and (i == 10):
for handle in handles:
handle.remove()
arr = numpy.array(arrs)
print("[TRACE]: " + str(arr.shape))
with open(output_path + ("_ep%d.trace" % epoch), "wb") as no:
numpy.save(no, arr)
# 0. batch_data will be sent into the device(GPU or cpu)
data = {key: value.to(self.device) for key, value in data.items()}
# 1. forward the next_sentence_prediction and masked_lm model
next_sent_output, mask_lm_output = self.model.forward(data["bert_input"], data["segment_label"])
# 2-1. NLL(negative log likelihood) loss of is_next classification result
next_loss = self.criterion(next_sent_output, data["is_next"])
# 2-2. NLLLoss of predicting masked token word
mask_loss = self.criterion(mask_lm_output.transpose(1, 2), data["bert_label"])
# 2-3. Adding next_loss and mask_loss : 3.4 Pre-training Procedure
loss = next_loss + mask_loss
# 3. backward and optimization only in train
if train:
self.optim_schedule.zero_grad()
loss.backward()
self.optim_schedule.step_and_update_lr()
# next sentence prediction accuracy
correct = next_sent_output.argmax(dim=-1).eq(data["is_next"]).sum().item()
avg_loss += loss.item()
total_correct += correct
total_element += data["is_next"].nelement()
post_fix = {
"epoch": epoch,
"iter": i,
"avg_loss": avg_loss / (i + 1),
"avg_acc": total_correct / total_element * 100,
"loss": loss.item()
}
if i % self.log_freq == 0:
data_iter.write(str(post_fix))
print("EP%d_%s, avg_loss=" % (epoch, str_code), avg_loss / len(data_iter), "total_acc=",
total_correct * 100.0 / total_element)
def save(self, epoch, file_path="output/bert_trained.model"):
"""
Saving the current BERT model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + ".ep%d" % epoch
torch.save(self.bert.state_dict(), output_path)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
| [
"torch.nn.NLLLoss",
"torch.device",
"torch.cuda.device_count",
"torch.cuda.is_available",
"torch.nn.DataParallel"
] | 0.4.0 | zinechant/BERT-pytorch | 7c8bc555f29ff7ba336b38f2eddd072d7910e2bd |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from functools import partial
import numpy as np
import pytest
from sklearn.metrics import auc as _sk_auc
from torch import tensor
from tests.helpers import seed_all
from tests.helpers.testers import NUM_BATCHES, MetricTester
from torchmetrics.classification.auc import AUC
from torchmetrics.functional import auc
seed_all(42)
def sk_auc(x, y, reorder=False):
x = x.flatten()
y = y.flatten()
if reorder:
idx = np.argsort(x, kind='stable')
x = x[idx]
y = y[idx]
return _sk_auc(x, y)
Input = namedtuple('Input', ["x", "y"])
_examples = []
# generate already ordered samples, sorted in both directions
for batch_size in (8, 4049):
for i in range(4):
x = np.random.rand((NUM_BATCHES * batch_size))
y = np.random.rand((NUM_BATCHES * batch_size))
idx = np.argsort(x, kind='stable')
x = x[idx] if i % 2 == 0 else x[idx[::-1]]
y = y[idx] if i % 2 == 0 else x[idx[::-1]]
x = x.reshape(NUM_BATCHES, batch_size)
y = y.reshape(NUM_BATCHES, batch_size)
_examples.append(Input(x=tensor(x), y=tensor(y)))
@pytest.mark.parametrize("x, y", _examples)
class TestAUC(MetricTester):
@pytest.mark.parametrize("ddp", [False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_auc(self, x, y, ddp, dist_sync_on_step):
self.run_class_metric_test(
ddp=ddp,
preds=x,
target=y,
metric_class=AUC,
sk_metric=sk_auc,
dist_sync_on_step=dist_sync_on_step,
)
@pytest.mark.parametrize("reorder", [True, False])
def test_auc_functional(self, x, y, reorder):
self.run_functional_metric_test(
x, y, metric_functional=auc, sk_metric=partial(sk_auc, reorder=reorder), metric_args={"reorder": reorder}
)
@pytest.mark.parametrize(['x', 'y', 'expected'], [
pytest.param([0, 1], [0, 1], 0.5),
pytest.param([1, 0], [0, 1], 0.5),
pytest.param([1, 0, 0], [0, 1, 1], 0.5),
pytest.param([0, 1], [1, 1], 1),
pytest.param([0, 0.5, 1], [0, 0.5, 1], 0.5),
])
def test_auc(x, y, expected):
# Test Area Under Curve (AUC) computation
assert auc(tensor(x), tensor(y), reorder=True) == expected
| [
"torch.tensor"
] | 1.3.1 | hlin09/metrics | cceced613f4323a1f5124099a969f2cf32a80d7e |
1.9 | from torch import nn
from tsl.nn.utils import utils
class Dense(nn.Module):
r"""
A simple fully-connected layer.
Args:
input_size (int): Size of the input.
output_size (int): Size of the output.
activation (str, optional): Activation function.
dropout (float, optional): Dropout rate.
bias (bool, optional): Whether to use a bias.
"""
def __init__(self, input_size, output_size, activation='linear', dropout=0., bias=True):
super(Dense, self).__init__()
self.layer = nn.Sequential(
nn.Linear(input_size, output_size, bias=bias),
utils.get_layer_activation(activation)(),
nn.Dropout(dropout) if dropout > 0. else nn.Identity()
)
def forward(self, x):
return self.layer(x)
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Identity"
] | 1.9 | TorchSpatiotemporal/tsl | da13493b0cf83826bf41fe78a67e8d4ce1d7a8a0 |
1.7 | #!/usr/bin/env python
from aparse import click
from typing import List
from viewformer.utils import SplitIndices
from viewformer.data import transform_dataset
# Use memory growth for tf
try:
import tensorflow as tf
gpus = tf.config.list_physical_devices('GPU')
if gpus:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except ImportError:
pass
class LatentCodeTransformer:
def _convert_image_type(self, image):
if image.dtype == 'uint8':
image = (image.astype('float32') / 255.) * 2. - 1.
if image.shape[-1] == 3:
image = image.transpose((0, 3, 1, 2))
return image
def update_dataset_info(self, dataset_info):
dataset_info['token_image_size'] = self.image_size // self.model.config.stride
self.dataset_info = dataset_info
return dataset_info
def __init__(self, model, batch_size: int = None, device=None):
if device is not None:
model = model.to(device)
self.model = model
self.image_size = model.config.image_size
self.batch_size = batch_size if batch_size is not None else model.config.batch_size
self.device = device
def output_features(self, features):
if features is not None and 'cameras-gqn' in features:
return ['codes', 'cameras-gqn']
else:
return ['codes', 'cameras']
def __call__(self, split, dataset):
import torch
import webdataset as wds
with torch.no_grad():
dataset = wds.filters.map_(dataset, lambda x: (torch.from_numpy(x['cameras']), torch.from_numpy(self._convert_image_type(x['frames'])), [len(x['frames'])] * len(x['frames'])))
dataset = wds.filters.unbatched_(dataset)
dataset = wds.filters.batched_(dataset, self.batch_size)
past_cameras = None
past_codes = None
def update_cummulative_variable(past, value, sequence_sizes):
sequence_sizes = list(sequence_sizes)
output = []
if past is not None:
value = torch.cat([past, value], 0)
sequence_sizes = ([sequence_sizes[0]] * len(past)) + sequence_sizes
while len(sequence_sizes) > 0 and len(value) >= sequence_sizes[0]:
output.append(value[:sequence_sizes[0]])
value = value[sequence_sizes[0]:]
sequence_sizes = sequence_sizes[sequence_sizes[0]:]
past = value
return past, output
if hasattr(self.model, 'encode'):
predict_step = lambda x: self.model.encode(x.to(self.device))[-1].detach().cpu()
else:
predict_step = lambda x: self.model(x.to(self.device))[-1].detach().cpu()
for batch_id, (cameras, frames, sequence_sizes) in enumerate(dataset):
codes = predict_step(frames)
past_codes, codes = update_cummulative_variable(past_codes, codes, sequence_sizes)
past_cameras, cameras = update_cummulative_variable(past_cameras, cameras, sequence_sizes)
for cur_cameras, cur_codes in zip(cameras, codes):
yield dict(cameras=cur_cameras, codes=cur_codes)
@click.command('generate-codes')
def main(dataset: str, output: str, model: str,
shards: SplitIndices = None,
batch_size: int = None,
splits: List[str] = None,
profile_batch_id: int = None, use_gpu: bool = True):
import torch
from viewformer.utils.torch import load_model
device = 'cpu' if not use_gpu or torch.cuda.device_count() == 0 else 'cuda'
device = torch.device(device)
model = load_model(model)
transformer = LatentCodeTransformer(model, batch_size=batch_size, device=device)
transform_dataset(dataset, output, transformer,
splits=splits,
shards=shards)
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.cat",
"torch.no_grad",
"torch.cuda.device_count",
"torch.from_numpy"
] | 1.7.1 | jkulhanek/viewformer | 9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c |
1.10 | import math
import numpy as np
import torch
import torchvision
import wandb
from torch.nn import functional as F
from torch import LongTensor
from lambo import transforms as gfp_transforms, dataset as gfp_dataset
from lambo.models.shared_elements import check_early_stopping
from lambo.utils import str_to_tokens
def sample_tokens(base_tokens, logit_batch, enc_tokenizer, replacement=False, temp=1.):
logit_batch /= temp
# don't sample special tokens
non_viable_idxs = np.array(enc_tokenizer.special_idxs)[None, None, :]
np.put_along_axis(logit_batch, non_viable_idxs, -1e10, axis=-1)
if not replacement and base_tokens is not None:
# don't sample the original tokens
base_tokens = base_tokens.numpy().astype(int)[..., None]
np.put_along_axis(logit_batch, base_tokens, -1e10, axis=-1)
# sample tokens
token_samples = torch.distributions.Categorical(logits=logit_batch).sample()
# calculate entropy
entropy = -(
F.softmax(logit_batch, dim=-1) * F.log_softmax(logit_batch, dim=-1)
).sum(-1)
return token_samples, entropy
def sample_mask(
token_batch: LongTensor,
tokenizer,
mask_ratio: float = 0.125,
mask_size=None
):
"""
Args:
token_batch: (batch_size, num_tokens)
tokenizer: only necessary to avoid masking special tokens
mask_ratio: proportion of tokens to mask
mask_size: (optional) override mask_ratio with a specific mask size
Returns:
mask_idxs: (batch_size, mask_size) np.ndarray of position indexes to mask
"""
if mask_size is None:
mask_size = math.ceil(token_batch.shape[-1] * mask_ratio)
special_idxs = torch.tensor(tokenizer.special_idxs).view(-1, 1, 1)
is_non_special = token_batch.ne(special_idxs).prod(dim=0).float()
mask_weights = is_non_special / is_non_special.sum(dim=-1, keepdims=True)
mask_idxs = torch.multinomial(mask_weights, mask_size, replacement=False)
return mask_idxs.numpy()
def evaluate_windows(base_seqs, encoder, mask_size, replacement=True, encoder_obj='mlm'):
window_mask_idxs = {}
window_entropy = {}
window_features = {}
for idx, seq in enumerate(base_seqs):
window_mask_idxs[idx] = []
window_entropy[idx] = []
window_features[idx] = []
# avoids evaluating windows corresponding to padding tokens
tokens = str_to_tokens(np.array([seq]), encoder.tokenizer)
# assert torch.all(tokens.ne(encoder.tokenizer.padding_idx)) # SELFIES no-op token may trigger
mask_size = min(mask_size, tokens.shape[-1] - 2)
offset = np.random.randint(1, mask_size + 1)
for mask_start in range(offset, tokens.shape[-1] - 1, mask_size):
if mask_start + mask_size < tokens.shape[-1] - 1:
mask_idxs = np.arange(mask_start, mask_start + mask_size).reshape(1, -1)
else:
mask_stop = tokens.shape[-1] - 1
mask_idxs = np.arange(mask_stop - mask_size, mask_stop).reshape(1, -1)
with torch.no_grad():
masked_inputs = tokens.clone().to(encoder.device)
np.put_along_axis(masked_inputs, mask_idxs, encoder.tokenizer.masking_idx, axis=1)
tgt_tok_logits, tgt_mask = encoder.logits_from_tokens(masked_inputs)
if encoder_obj == 'mlm':
_, logit_entropy = sample_tokens(
tokens, tgt_tok_logits, encoder.tokenizer, replacement
)
logit_entropy = np.take_along_axis(logit_entropy, mask_idxs, axis=1)
elif encoder_obj == 'lanmt':
tgt_tok_idxs, logit_entropy = encoder.sample_tgt_tok_idxs(
tgt_tok_logits, tgt_mask, temp=1.
)
else:
raise ValueError
window_mask_idxs[idx].append(mask_idxs.copy())
window_entropy[idx].append(logit_entropy.mean().item())
return window_mask_idxs, window_entropy
def mlm_train_step(model, optimizer, token_batch, mask_ratio, loss_scale=1.):
optimizer.zero_grad(set_to_none=True)
# replace random tokens with mask token
mask_idxs = sample_mask(token_batch, model.tokenizer, mask_ratio)
masked_token_batch = token_batch.clone().to(model.device)
np.put_along_axis(masked_token_batch, mask_idxs, model.tokenizer.masking_idx, axis=1)
# get predicted logits for masked tokens
logits, _ = model.logits_from_tokens(masked_token_batch)
vocab_size = logits.shape[-1]
masked_logits = np.take_along_axis(logits, mask_idxs[..., None], axis=1).view(-1, vocab_size)
# use the ground-truth tokens as labels
masked_tokens = np.take_along_axis(token_batch, mask_idxs, axis=1)
masked_tokens = masked_tokens.view(-1).to(model.device)
loss = loss_scale * F.cross_entropy(masked_logits, masked_tokens)
loss.backward()
optimizer.step()
return loss, masked_logits, masked_tokens
def mlm_train_epoch(model, optimizer, train_loader, mask_ratio):
metrics = dict(
train_loss=0.,
train_perplexity=0.,
)
model.train()
for minibatch in train_loader:
if isinstance(minibatch, tuple):
token_batch = minibatch[0]
else:
assert torch.is_tensor(minibatch)
token_batch = minibatch
loss, masked_logits, masked_tokens = mlm_train_step(model, optimizer, token_batch, mask_ratio)
# logging
log_prob = F.log_softmax(masked_logits, dim=-1)
log_prob = np.take_along_axis(log_prob, masked_tokens.cpu().numpy()[..., None], axis=1)
metrics['train_perplexity'] += 2 ** (
-(log_prob / math.log(2)).mean().detach()
) / len(train_loader)
metrics['train_loss'] += loss.detach() / len(train_loader)
metrics = {key: val.item() for key, val in metrics.items()}
return metrics
def mlm_eval_epoch(model, eval_loader, mask_ratio, split):
metrics = dict(
perplexity=0.,
)
model.eval()
for minibatch in eval_loader:
if isinstance(minibatch, tuple):
token_batch = minibatch[0]
else:
assert torch.is_tensor(minibatch)
token_batch = minibatch
# replace random tokens with mask token
mask_idxs = sample_mask(token_batch, model.tokenizer, mask_ratio)
masked_token_batch = token_batch.clone().to(model.device)
np.put_along_axis(masked_token_batch, mask_idxs, model.tokenizer.masking_idx, axis=1)
# get predicted logits for masked tokens
logits, _ = model.logits_from_tokens(masked_token_batch)
vocab_size = logits.shape[-1]
masked_logits = np.take_along_axis(logits, mask_idxs[..., None], axis=1).view(-1, vocab_size)
# use the ground-truth tokens as labels
masked_tokens = np.take_along_axis(token_batch, mask_idxs, axis=1)
masked_tokens = masked_tokens.view(-1).to(model.device)
# logging
log_prob = F.log_softmax(masked_logits, dim=-1)
log_prob = np.take_along_axis(log_prob, masked_tokens.cpu().numpy()[..., None], axis=1)
metrics['perplexity'] += 2 ** (
-(log_prob / math.log(2)).mean().detach()
) / len(eval_loader)
metrics = {key: val.item() for key, val in metrics.items()}
metrics = {f'{split}_{key}': val for key, val in metrics.items()}
return metrics
def fit_masked_language_model(model, train_seqs, num_epochs, batch_size, lr, patience, mask_ratio, max_shift,
weights=None, log_prefix=''):
# random translation data augmentation, apply tokenizer
train_transform = []
if max_shift > 0:
train_transform.append(gfp_transforms.SequenceTranslation(max_shift))
train_transform.append(gfp_transforms.StringToLongTensor(model.tokenizer))
train_transform = torchvision.transforms.Compose(train_transform)
# make dataset, dataloader
train_dataset = gfp_dataset.TransformTensorDataset([train_seqs], train_transform)
if weights is None:
loader_kwargs = dict(batch_size=batch_size, shuffle=True)
else:
sampler = torch.utils.data.WeightedRandomSampler(weights, batch_size, replacement=True)
batch_sampler = torch.utils.data.BatchSampler(sampler, batch_size=batch_size, drop_last=False)
loader_kwargs = dict(batch_sampler=batch_sampler)
train_loader = torch.utils.data.DataLoader(
train_dataset, collate_fn=gfp_transforms.padding_collate_fn, **loader_kwargs
)
optimizer = torch.optim.Adam(model.param_groups(lr))
lr_sched = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, patience=math.ceil(patience / 2)
)
records = []
best_score, best_epoch, best_weights = None, 0, None
model.requires_grad_(True)
for epoch in range(num_epochs):
metrics = {}
metrics.update(
mlm_train_epoch(model, optimizer, train_loader, mask_ratio)
)
# use avg. train loss as convergence crit.
lr_sched.step(metrics['train_loss'])
best_score, best_epoch, best_weights, stop = check_early_stopping(
model,
best_score,
best_epoch,
best_weights,
metrics['train_loss'],
epoch + 1,
patience,
save_weights=True,
)
# logging
metrics.update(dict(best_score=best_score, best_epoch=best_epoch))
if len(log_prefix) > 0:
metrics = {'/'.join((log_prefix, key)): val for key, val in metrics.items()}
try:
wandb.log(metrics)
except:
pass
records.append(metrics)
if stop:
break
model.load_state_dict(best_weights)
model.requires_grad_(False)
return records
| [
"torch.distributions.Categorical",
"torch.nn.functional.softmax",
"torch.is_tensor",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.multinomial",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.utils.data.BatchSampler",
"torch.tensor",
"torch.utils.data.WeightedRandomSampler"
] | 1.10.0 | samuelstanton/lambo | 7b67684b884f75f7007501978c5299514d0efb75 |
1.1 | import pandas as pd
from zipfile import ZipFile
import torch as th
import cv2
import numpy as np
import os
from glob import glob
import pydicom
from matplotlib import pyplot as plt
from segmentation_model import FPNSegmentation
def main():
train_image_fns = sorted(glob(os.path.join(
'dicom-images-train', '*/*/*.dcm')))
m = {os.path.basename(fn): fn for fn in train_image_fns}
ref_file = 'Model_000_f00/f00-PREDS_VAL.zip'
slug = 'r50d'
weight = 'Model_000_f00/[email protected]'
model = FPNSegmentation(slug)
model.load_state_dict(th.load(weight))
model = model.cuda()
model.eval()
with ZipFile(ref_file) as f:
for fn in f.namelist()[::10]:
path = m[fn.replace('.png', '.dcm')]
img = pydicom.read_file(path).pixel_array
# pimg = cv2.resize(img, (640, 640), interpolation=cv2.INTER_CUBIC)
pimg = img.copy()
X = th.from_numpy(pimg).unsqueeze(0).unsqueeze(0)
with th.no_grad():
X = X.cuda().float()
y_pred = model(X).cpu().numpy().squeeze()
y_pred_flip = th.flip(model(th.flip(X, (-1, ))),
(-1, )).cpu().numpy().squeeze()
y_pred = 0.5 * (y_pred_flip + y_pred)
y_pred = (y_pred * 255).astype(np.uint8)
with f.open(fn) as h:
pred = cv2.imdecode(np.frombuffer(h.read(), 'uint8'), 0)
diff = y_pred != pred
print("DIFF: ", diff.sum())
plt.subplot(2, 2, 1)
plt.imshow(img)
plt.subplot(2, 2, 2)
plt.imshow(y_pred)
plt.subplot(2, 2, 3)
plt.imshow(pred)
plt.subplot(2, 2, 4)
plt.imshow(diff)
plt.show()
if __name__ == '__main__':
main()
| [
"torch.no_grad",
"torch.from_numpy",
"torch.load",
"torch.flip"
] | 1.1.0 | arc144/siim-pneumothorax | 98fdb1fe08e9c001e0191d5024ba6c56ec82a9c8 |
1.7 | import torch
from scipy.spatial.distance import cosine
from transformers import BertModel, BertTokenizer
import os
class SentenceSimilarity:
def __init__(self, model_path='bert-base-uncased'):
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.model = BertModel.from_pretrained(model_path)
self.model.eval()
self.device = torch.device('cuda:0')
self.model = self.model.to(self.device)
def text_to_tensor(self, text):
text = text.strip().lower()
tokens = self.tokenizer.tokenize(text)
tokens_ids = self.tokenizer.convert_tokens_to_ids(tokens)
tokens_ids = self.tokenizer.build_inputs_with_special_tokens(tokens_ids)
tokens_tensor = torch.tensor([tokens_ids])
return tokens_tensor
def get_embedding(self, sent):
tokens_tensor = self.text_to_tensor(sent)
tokens_tensor = tokens_tensor.to(self.device)
with torch.no_grad():
output = self.model(tokens_tensor)[0]
embedding = output[0].mean(dim=0).cpu().numpy()
return embedding
def similarity(self, emb1, emb2):
return cosine(emb1, emb2)
if __name__ == '__main__':
ss = SentenceSimilarity()
s1 = 'I am a girl'
s2 = 'I am a boy'
s3 = 'Thank you'
print("1")
e1 = ss.get_embedding(s1)
print(type(e1))
e2 = ss.get_embedding(s2)
e3 = ss.get_embedding(s3)
print("2")
print(1 - ss.similarity(e1, e2))
print(1 - ss.similarity(e1, e3))
print("3")
| [
"torch.device",
"torch.no_grad",
"torch.tensor"
] | 1.7.1 | lydia07/mdsearch | a328e822d6d66869aeefef687887b0a39d4f4512 |
1.2 | import torch
import torch.nn as nn
from ..layers.convolutions import Convolutional, Separable_Conv_dila, Separable_Conv, Deformable_Convolutional
import torch.nn.functional as F
from ..layers.attention_blocks import SELayer
class SPP(nn.Module):
def __init__(self, depth=512):
super(SPP,self).__init__()
self.__maxpool5 = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
self.__maxpool9 = nn.MaxPool2d(kernel_size=9, stride=1, padding=4)
self.__maxpool13 = nn.MaxPool2d(kernel_size=13, stride=1, padding=6)
self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)
def forward(self, x):
maxpool5 = self.__maxpool5(x)
maxpool9 = self.__maxpool9(x)
maxpool13 = self.__maxpool13(x)
cat_maxpool = torch.cat([x, maxpool5, maxpool9, maxpool13], dim=1)
SPP = self.__outconv(cat_maxpool)
return SPP
class SPP_rec(nn.Module):
def __init__(self, depth=512):
super(SPP_rec,self).__init__()
self.__maxpool5 = nn.MaxPool2d(kernel_size=5, stride=1, padding=2)
self.__maxpool9 = nn.MaxPool2d(kernel_size=9, stride=1, padding=4)
self.__maxpool13 = nn.MaxPool2d(kernel_size=13, stride=1, padding=6)
self.__maxpool5w = nn.MaxPool2d(kernel_size=(5,1), stride=1, padding=(2,0))
self.__maxpool5h = nn.MaxPool2d(kernel_size=(1,5), stride=1, padding=(0,2))
self.__maxpool9w = nn.MaxPool2d(kernel_size=(9,3), stride=1, padding=(4,1))
self.__maxpool9h = nn.MaxPool2d(kernel_size=(3,9), stride=1, padding=(1,4))
self.__maxpool13w = nn.MaxPool2d(kernel_size=(13,5), stride=1, padding=(6,2))
self.__maxpool13h = nn.MaxPool2d(kernel_size=(5,13), stride=1, padding=(2,6))
self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)
def forward(self, x):
maxpool5 = self.__maxpool5(x) + self.__maxpool5h(x) + self.__maxpool5w(x)
maxpool9 = self.__maxpool9(x) + self.__maxpool9h(x) + self.__maxpool9w(x)
maxpool13 = self.__maxpool13(x) + self.__maxpool13h(x) + self.__maxpool13w(x)
cat_maxpool = torch.cat([x, maxpool5, maxpool9, maxpool13], dim=1)
SPP_rec = self.__outconv(cat_maxpool)
return SPP_rec
class ASPP_se(nn.Module):
def __init__(self, in_channel=1280, depth=512):
super(ASPP_se,self).__init__()
self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)
self.__dilaconv5 = nn.Conv2d(in_channel, depth, 3, 1, padding=2, dilation=2)
self.__dilaconv9 = nn.Conv2d(in_channel, depth, 3, 1, padding=4, dilation=4)
self.__dilaconv13 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)
self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)
self.__se = SELayer(depth)
def forward(self, x):
dilaconv1 = self.__dilaconv1(x)
dilaconv5 = self.__dilaconv5(x)
dilaconv9 = self.__dilaconv9(x)
dilaconv13 = self.__dilaconv13(x)
cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)
ASPP_se = self.__se(self.__outconv(cat_dilaconv))
return ASPP_se
class ASPP(nn.Module):
def __init__(self, in_channel=1280, depth=512):
super(ASPP,self).__init__()
self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)
self.__dilaconv5 = nn.Conv2d(in_channel, depth, 3, 1, padding=2, dilation=2)
self.__dilaconv9 = nn.Conv2d(in_channel, depth, 3, 1, padding=4, dilation=4)
self.__dilaconv13 = nn.Conv2d(in_channel, depth, 3, 1, padding=6, dilation=6)
self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)
def forward(self, x):
dilaconv1 = self.__dilaconv1(x)
dilaconv5 = self.__dilaconv5(x)
dilaconv9 = self.__dilaconv9(x)
dilaconv13 = self.__dilaconv13(x)
cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)
ASPP = self.__outconv(cat_dilaconv)
return ASPP
class Sparable_ASPP(nn.Module):
def __init__(self, in_channel=1280, depth=512):
super(Sparable_ASPP,self).__init__()
self.__dilaconv1 = nn.Conv2d(in_channel, depth, 1, 1)
self.__dilaconv5 = Separable_Conv_dila(in_channel, depth, 1, pad=2, dila=2)
self.__dilaconv9 = Separable_Conv_dila(in_channel, depth, 1, pad=4, dila=4)
self.__dilaconv13 = Separable_Conv_dila(in_channel, depth, 1, pad=6, dila=6)
self.__outconv = nn.Conv2d(depth * 4, depth, 1, 1)
def forward(self, x):
dilaconv1 = self.__dilaconv1(x)
dilaconv5 = self.__dilaconv5(x)
dilaconv9 = self.__dilaconv9(x)
dilaconv13 = self.__dilaconv13(x)
cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)
ASPP = self.__outconv(cat_dilaconv)
return ASPP
class Sparable_ASPP_se(nn.Module):
def __init__(self, in_channel=1024, depth=512):
super(Sparable_ASPP_se,self).__init__()
self.__dilaconv1 = Separable_Conv(in_channel, depth, 1)
self.__dilaconv5 = Separable_Conv_dila(depth, depth, 1, pad=2, dila=2)
self.__dilaconv9 = Separable_Conv_dila(depth, depth//2, 1, pad=4, dila=4)
self.__dilaconv13 = Separable_Conv_dila(depth, depth//2, 1, pad=6, dila=6)
self.__outconv = nn.Conv2d(depth * 3, depth, 1, 1)
#self.__outconv = Convolutional(filters_in=depth * 3, filters_out=depth, kernel_size=1, stride=1, pad=0, norm='bn', activate='leaky')
self.__se = SELayer(depth)
def forward(self, x):
dilaconv1 = self.__dilaconv1(x)
dilaconv5 = self.__dilaconv5(dilaconv1)
dilaconv9 = self.__dilaconv9(dilaconv1)
dilaconv13 = self.__dilaconv13(dilaconv1)
cat_dilaconv = torch.cat([dilaconv1, dilaconv5, dilaconv9, dilaconv13], dim=1)
ASPP_se = self.__se(self.__outconv(cat_dilaconv))
#ASPP_se = self.__outconv(cat_dilaconv)
return ASPP_se
class ASFF(nn.Module):
def __init__(self, level, vis=False):
super(ASFF, self).__init__()
self.level = level
self.dim = [512,256,128]
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = Convolutional(256, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')
self.stride_level_2 = Convolutional(128, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')
self.expand = Convolutional(self.inter_dim, 1024, 3, 1, pad=1, norm='bn', activate='relu6')
elif level == 1:
self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.stride_level_2 = Convolutional(128, self.inter_dim, 3, 2, pad=1, norm='bn', activate='relu6')
self.expand = Convolutional(self.inter_dim, 512, 3, 1, pad=1, norm='bn', activate='relu6')
elif level == 2:
self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.compress_level_1 = Convolutional(256, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.expand = Convolutional(self.inter_dim, 256, 3, 1, pad=1, norm='bn', activate='relu6')
compress_c = 16
self.weight_level_0 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_level_1 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_level_2 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')
level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
level_1_resized * levels_weight[:, 1:2, :, :] + \
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
class ASFF_Mobile(nn.Module):
def __init__(self, level, vis=False):
super(ASFF_Mobile, self).__init__()
self.level = level
self.dim = [512,256,128]
self.inter_dim = self.dim[self.level]
if level == 0:
self.stride_level_1 = Separable_Conv(256, self.inter_dim, 2)
self.stride_level_2 = Separable_Conv(128, self.inter_dim, 2)
self.expand = Separable_Conv(self.inter_dim, 1024, 1)
elif level == 1:
self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.stride_level_2 = Separable_Conv(128, self.inter_dim, 2)
self.expand = Separable_Conv(self.inter_dim, 512, 1)
elif level == 2:
self.compress_level_0 = Convolutional(512, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.compress_level_1 = Convolutional(256, self.inter_dim, 1, 1, pad=0, norm='bn', activate='relu6')
self.expand = Separable_Conv(self.inter_dim, 256, 1)
compress_c = 16
self.weight_level_0 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_level_1 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_level_2 = Convolutional(self.inter_dim, compress_c, 1, 1, pad=0, norm='bn', activate='relu6')
self.weight_levels = nn.Conv2d(compress_c * 3, 3, kernel_size=1, stride=1, padding=0)
self.vis = vis
def forward(self, x_level_0, x_level_1, x_level_2):
if self.level == 0:
level_0_resized = x_level_0
level_1_resized = self.stride_level_1(x_level_1)
level_2_downsampled_inter = F.max_pool2d(x_level_2, 3, stride=2, padding=1)
level_2_resized = self.stride_level_2(level_2_downsampled_inter)
elif self.level == 1:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=2, mode='nearest')
level_1_resized = x_level_1
level_2_resized = self.stride_level_2(x_level_2)
elif self.level == 2:
level_0_compressed = self.compress_level_0(x_level_0)
level_0_resized = F.interpolate(level_0_compressed, scale_factor=4, mode='nearest')
level_1_compressed = self.compress_level_1(x_level_1)
level_1_resized = F.interpolate(level_1_compressed, scale_factor=2, mode='nearest')
level_2_resized = x_level_2
level_0_weight_v = self.weight_level_0(level_0_resized)
level_1_weight_v = self.weight_level_1(level_1_resized)
level_2_weight_v = self.weight_level_2(level_2_resized)
levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
levels_weight = self.weight_levels(levels_weight_v)
levels_weight = F.softmax(levels_weight, dim=1)
fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
level_1_resized * levels_weight[:, 1:2, :, :] + \
level_2_resized * levels_weight[:, 2:, :, :]
out = self.expand(fused_out_reduced)
if self.vis:
return out, levels_weight, fused_out_reduced.sum(dim=1)
else:
return out
class FeatureAdaption(nn.Module):
def __init__(self, in_ch, out_ch, n_anchors):
super(FeatureAdaption, self).__init__()
self.sep=False
self.conv_offset = nn.Conv2d(in_channels=2*n_anchors, out_channels=2*9*n_anchors, groups = n_anchors, kernel_size=1,stride=1,padding=0)
self.dconv = Deformable_Convolutional(filters_in=in_ch, filters_out=out_ch, kernel_size=3, stride=1, pad=1, groups=n_anchors)
def forward(self, input, wh_pred):
wh_pred_new = wh_pred.detach()
offset = self.conv_offset(wh_pred_new)
out = self.dconv(input, offset)
return out
class Features_Fusion(nn.Module):
def __init__(self, in_channels, out_channels, r=16):
super(Features_Fusion,self).__init__()
self.out_channels = out_channels
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_fc1 = Convolutional(in_channels, in_channels // r, kernel_size=1, stride=1, pad=0, norm='bn', activate='leaky')
self.conv_fc2 = nn.Conv2d(in_channels // r, out_channels * 2, kernel_size=1, padding=0, bias=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x1, x2):
batch_size = x1.size(0)
x_mix = torch.add(x1,x2) # 逐元素相加生成 混合特征U
x_avg = self.avg_pool(x_mix)
x_fcout = self.conv_fc2(self.conv_fc1(x_avg)) # 先降维,后升维,结果中前一半通道值为a,后一半为b
x_reshape = x_fcout.reshape(batch_size, self.out_channels, 2, -1) # 调整形状,变为两个全连接层的值
x_softmax = self.softmax(x_reshape) # 使得两个全连接层对应位置进行softmax
w1 = x_softmax[:, :, 0:1,:] #将tensor按照指定维度切分成2个tensor块
w2 = x_softmax[:, :, 1:2,:]
out = x1*w1 + x2*w2 # 两个加权后的特征 逐元素相加
return out | [
"torch.cat",
"torch.nn.Softmax",
"torch.nn.MaxPool2d",
"torch.nn.functional.interpolate",
"torch.add",
"torch.nn.Conv2d",
"torch.nn.functional.softmax",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.functional.max_pool2d"
] | 1.2.0 | Shank2358/NPMMR-Det | 869f3f537af9bc656f2bfdfa97ebb95bf70847a7 |
0.4 | import argparse
from random import choice
import featureflow as ff
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch.optim import Adam
import zounds
from zounds.learn import Conv1d, ConvTranspose1d, to_var, from_var
from zounds.timeseries import categorical, inverse_categorical
samplerate = zounds.SR11025()
BaseModel = zounds.resampled(resample_to=samplerate, store_resampled=True)
window_size = 8192
wscheme = zounds.SampleRate(
frequency=samplerate.frequency * (window_size // 2),
duration=samplerate.frequency * window_size)
@zounds.simple_lmdb_settings('ae', map_size=1e10, user_supplied_id=True)
class Sound(BaseModel):
windowed = zounds.ArrayWithUnitsFeature(
zounds.SlidingWindow,
wscheme=wscheme,
needs=BaseModel.resampled)
mu_law = zounds.ArrayWithUnitsFeature(
zounds.mu_law,
needs=windowed)
categorical = zounds.ArrayWithUnitsFeature(
categorical,
needs=windowed)
# TODO: Factor out the part of the pipeline that starts with samples and
# shuffled
@zounds.simple_settings
class AutoEncoderPipeline(ff.BaseModel):
samples = ff.PickleFeature(ff.IteratorNode)
shuffled = ff.PickleFeature(
zounds.ShuffledSamples,
nsamples=int(1e5),
dtype=np.float32,
needs=samples)
scaled = ff.PickleFeature(
zounds.InstanceScaling,
needs=shuffled)
autoencoder = ff.PickleFeature(
zounds.PyTorchAutoEncoder,
trainer=ff.Var('trainer'),
needs=scaled)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(scaled, autoencoder,),
store=True)
@zounds.simple_settings
class CategoricalAutoEncoderPipeline(ff.BaseModel):
samples = ff.PickleFeature(ff.IteratorNode)
shuffled = ff.PickleFeature(
zounds.ShuffledSamples,
nsamples=int(1e5),
dtype=np.float32,
needs=samples)
autoencoder = ff.PickleFeature(
zounds.PyTorchAutoEncoder,
trainer=ff.Var('trainer'),
needs=shuffled)
pipeline = ff.PickleFeature(
zounds.PreprocessingPipeline,
needs=(autoencoder,),
store=True)
class EncoderLayer(Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(EncoderLayer, self).__init__(
in_channels, out_channels, kernel_size, stride, padding)
class DecoderLayer(ConvTranspose1d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dropout=True,
activation=lambda x: F.leaky_relu(x, 0.2)):
super(DecoderLayer, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
activation,
dropout)
class Encoder(nn.Module):
def __init__(self, in_channels):
super(Encoder, self).__init__()
self.in_channels = in_channels
self.main = nn.Sequential(
EncoderLayer(in_channels, 64, 16, 8, 4),
EncoderLayer(64, 128, 8, 4, 2),
EncoderLayer(128, 128, 8, 4, 2),
EncoderLayer(128, 128, 8, 4, 2),
EncoderLayer(128, 256, 8, 4, 2),
EncoderLayer(256, 512, 4, 1, 0))
def forward(self, x):
x = x.view(-1, self.in_channels, window_size)
return self.main(x).view(-1, 512)
class Decoder(nn.Module):
def __init__(self, out_channels, output_activation):
super(Decoder, self).__init__()
act = output_activation
self.out_channels = out_channels
self.main = nn.Sequential(
DecoderLayer(512, 256, 4, 1, 0),
DecoderLayer(256, 128, 8, 4, 2),
DecoderLayer(128, 128, 8, 4, 2),
DecoderLayer(128, 128, 8, 4, 2),
DecoderLayer(128, 64, 8, 4, 2),
DecoderLayer(
64, self.out_channels, 16, 8, 4, dropout=False, activation=act))
def forward(self, x):
x = x.view(-1, 512, 1)
x = self.main(x)
x = x.view(-1, self.out_channels, window_size)
x = x.squeeze()
return x
class AutoEncoder(nn.Module):
def __init__(self, channels, output_activation):
super(AutoEncoder, self).__init__()
self.encoder = Encoder(channels)
self.decoder = Decoder(channels, output_activation)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
class RawSamplesAutoEncoder(AutoEncoder):
def __init__(self):
super(RawSamplesAutoEncoder, self).__init__(
channels=1, output_activation=F.tanh)
class CategoricalAutoEncoder(AutoEncoder):
def __init__(self):
super(CategoricalAutoEncoder, self).__init__(
channels=256, output_activation=F.log_softmax)
def raw_samples_synthesize(x):
# TODO: it should be possible to apply windowing at the synthesis step
synth = zounds.WindowedAudioSynthesizer()
return synth.synthesize(x)
def categorical_synthesize(x):
samples = inverse_categorical(x.reshape(-1, 8192, 256))
samples = zounds.ArrayWithUnits(samples, dimensions=[
zounds.TimeDimension(*wscheme),
zounds.TimeDimension(*samplerate)
])
return raw_samples_synthesize(samples)
def preprocess_categorical(x):
return categorical(x).reshape((-1, 256, 8192))
class CategoricalLoss(nn.NLLLoss):
def __init__(self):
super(CategoricalLoss, self).__init__()
def forward(self, input, target):
input = input.view(-1, 256)
target = target.view(-1, 256)
values, indices = target.max(dim=1)
return super(CategoricalLoss, self).forward(input, indices)
class FrequencyBandLoss(nn.MSELoss):
def __init__(self):
super(FrequencyBandLoss, self).__init__()
def forward(self, input, target):
target_samples = from_var(target).squeeze()
target_fft = np.fft.rfft(target_samples, axis=-1, norm='ortho')
target_fft[:, :50] = 0
recon = np.fft.irfft(target_fft, axis=-1, norm='ortho')
recon = to_var(recon)
return super(FrequencyBandLoss, self).forward(input, recon)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--internet-archive-id',
type=str,
help='the internet archive id to use for training')
parser.add_argument(
'--epochs',
type=int,
help='the number of epochs to train the network')
parser.add_argument(
'--force-train',
action='store_true',
help='re-train the network, even if it has already been trained')
parser.add_argument(
'--categorical',
action='store_true',
help='use a categorical distribution of samples')
args = parser.parse_args()
if args.internet_archive_id:
zounds.ingest(
zounds.InternetArchive(args.internet_archive_id),
Sound,
multi_threaded=True)
if args.categorical:
network = CategoricalAutoEncoder()
loss = CategoricalLoss()
synthesize = categorical_synthesize
pipeline_cls = CategoricalAutoEncoderPipeline
data_preprocessor = label_preprocessor = preprocess_categorical
batch_size = 16
else:
network = RawSamplesAutoEncoder()
loss = FrequencyBandLoss()
synthesize = raw_samples_synthesize
pipeline_cls = AutoEncoderPipeline
data_preprocessor = label_preprocessor = lambda x: x
batch_size = 64
gen = (snd.windowed for snd in Sound
if args.internet_archive_id in snd._id)
if args.force_train or not AutoEncoderPipeline.exists():
trainer = zounds.SupervisedTrainer(
network,
loss,
lambda model: Adam(model.parameters(), lr=0.0001),
epochs=args.epochs,
batch_size=batch_size,
holdout_percent=0.25,
data_preprocessor=data_preprocessor,
label_preprocessor=label_preprocessor)
gen = (snd.windowed for snd in Sound
if args.internet_archive_id in snd._id)
pipeline_cls.process(samples=gen, trainer=trainer)
# instantiate the trained pipeline
pipeline = pipeline_cls()
snds = [snd for snd in Sound if args.internet_archive_id in snd._id]
snd = choice(snds)
time_slice = zounds.TimeSlice(duration=zounds.Seconds(10))
encoded = pipeline.pipeline.transform(
data_preprocessor(snd.windowed[time_slice]))
recon = encoded.inverse_transform()
samples = synthesize(recon)
# start up an in-browser REPL to interact with the results
app = zounds.ZoundsApp(
model=Sound,
audio_feature=Sound.ogg,
visualization_feature=Sound.windowed,
globals=globals(),
locals=locals())
app.start(8888)
| [
"torch.nn.functional.leaky_relu"
] | 0.4.1 | FelixAbrahamsson/zounds | 197c358acf3bea4252cfc2561da70cbe799e2c75 |
1.7 | import numpy as np
import os
import json
from PIL import Image
import pickle
import streamlit as st
from streamlit.hashing import _CodeHasher
from streamlit.report_thread import get_report_ctx
from streamlit.server.server import Server
import sys
import urllib
import torch
import random
import biggan
from torchvision.utils import make_grid
from io import BytesIO
import base64
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def main():
first_run = not os.path.exists('state.json')
state = {}
st.title("Microstructure GAN demo")
"""This is a demonstration of conditional image generation of micrographs using [BigGAN-deep architecture](https://arxiv.org/abs/1809.11096)
The images generated are using three conditional inputs Annealing Temperature, Annealing Time and the type of cooling used.
GAN is trained using [Omni Loss](https://arxiv.org/abs/2011.13074) on [UHCSDB](http://uhcsdb.materials.cmu.edu/) images"""
st.sidebar.title('Processing Conditions',)
state['anneal_temp'] = st.sidebar.selectbox('Annealing Temperature °C',[700,750,800,900,970,1000,1100])
state['anneal_time'] = st.sidebar.selectbox('Annealing Time (M: Minutes, H: Hours)',['5M','90M','1H','3H','8H','24H','48H','85H'])
state['cooling'] = st.sidebar.selectbox('Cooling Type',['Quench','Furnace Cool','Air Cool','650C-1H'])
temp_dict = {970: 0, 800: 1, 900: 2, 1100: 3, 1000: 4, 700: 5, 750: 6}
time_dict = {'90M': 0, '24H': 1, '3H': 2, '5M': 3, '8H': 4, '85H': 5, '1H': 6, '48H': 7}
cool_dict = {'Quench': 0, 'Air Cool': 1, 'Furnace Cool': 2, '650C-1H': 3}
model = load_gan()
st.sidebar.subheader('Generate a new latent Vector')
state['seed'] = 7
if st.sidebar.button('New z'):
state['seed'] = random.randint(0,1000)
rng = np.random.RandomState(state['seed'])
noise = torch.tensor(rng.normal(0, 1, (1, 384))).float()
state['noise'] = noise.numpy()
y_temp = temp_dict[state['anneal_temp']]
y_time = time_dict[state['anneal_time']]
y_cool = cool_dict[state['cooling']]
state['image_out'] = generate_img(model, noise, y_temp, y_time, y_cool)
st.subheader('Generated Microstructure for the given processing conditions')
st.text("")
st.text(f"Random seed: {state['seed']}")
st.image(np.array(state['image_out']), use_column_width=False)
save_bool = st.button('Save Image')
if save_bool:
with open('state.json', 'r') as fp:
state_old = json.load(fp)
st.text(f"The following image was saved. It was generated using a random seed: {state_old['seed']}")
st.image(np.array(state_old['image_out']), use_column_width=False)
if not os.path.exists('Generated Images'):
os.makedirs('Generated Images')
im = Image.fromarray((np.array(state_old['image_out']).reshape(256,256) * 255).astype(np.uint8))
im.save(f"./Generated Images/{state_old['anneal_temp']}-{state_old['anneal_time']}-{state_old['cooling']}-{state_old['seed']}.png")
state['save_bool'] = save_bool
with open('state.json', 'w') as fp:
json.dump(state, fp, cls=NumpyEncoder)
@st.cache(suppress_st_warning=True)
def load_gan():
model = biggan.Generator()
model.load_state_dict(torch.load('BigGAN-deep.pth', map_location=torch.device('cpu')))
return model
@st.cache(suppress_st_warning=True)
def generate_img(model,noise, y_temp, y_time, y_cool):
y_temp = torch.tensor([y_temp])
y_time = torch.tensor([y_time])
y_cool = torch.tensor([y_cool])
with torch.no_grad():
synthetic = model(noise, y_temp, y_time, y_cool)[0]
synthetic = 0.5 * synthetic + 0.5
#synthetic = make_grid(synthetic, normalize=True)
return np.transpose(synthetic.numpy() ,(1,2,0))
main() | [
"torch.device",
"torch.no_grad",
"torch.tensor"
] | 1.7.0 | Praveenstein/bigGanMicro | d669874c0226907fa41b2140cdc8c46bdef2a283 |
1.9 | import argparse
from collections import defaultdict
import torch
from transformers import T5ForConditionalGeneration, T5Tokenizer
from tqdm import tqdm
from util.util_funcs import load_jsonl
model = T5ForConditionalGeneration.from_pretrained("t5-small")
tokenizer = T5Tokenizer.from_pretrained("t5-small")
MNLI_TO_FEVER_MAP = {
"▁entailment": "SUPPORTS",
"▁neutral": "NOT ENOUGH INFO",
"▁contradiction": "REFUTES",
}
stats = defaultdict(int)
def predict_veracity(claim, evidence):
# task = "rte"
task = "mnli"
if task == "mnli":
input_str = "{} premise: {} hypothesis: {}".format(task, evidence, claim)
if task == "rte":
input_str = "{} sentence1: {} sentence2: {}".format(task, claim, evidence)
input_ids = tokenizer(input_str, return_tensors="pt").input_ids
result = model.generate(input_ids)
result = torch.squeeze(result)
target = tokenizer.convert_ids_to_tokens(result, skip_special_tokens=True)
return target
def get_veracity_label(claim, evidence):
predicted_label = predict_veracity(claim, evidence)
predicted_label = "".join(predicted_label)
if predicted_label not in MNLI_TO_FEVER_MAP.keys():
return "NOT ENOUGH INFO"
else:
return MNLI_TO_FEVER_MAP[predicted_label]
def test_model(data):
num_correct = 0
counter = 0
for d in tqdm(data):
# if counter > 200: break
claim = d["claim"]
evidence = d["evidence"]
label = d["label"]
stats["nr_of_{}_samples".format(label)] += 1
predicted_label = predict_veracity(claim, evidence)
predicted_label = "".join(predicted_label)
if predicted_label not in MNLI_TO_FEVER_MAP.keys():
# Assume that all invalid predicted labels means not enough information
if label == "NOT ENOUGH INFO":
stats["nr_of_correct_{}_samples".format(label)] += 1
num_correct += 1
else:
if label == MNLI_TO_FEVER_MAP[predicted_label]:
stats["nr_of_correct_{}_samples".format(label)] += 1
num_correct += 1
counter += 1
accuracy = num_correct / counter
print("Accuracy for {} samples: {}".format(len(data), accuracy))
print()
print("========== STATS ============")
for label in MNLI_TO_FEVER_MAP.values():
print(
"Nr of {} samples: {}".format(
label, stats["nr_of_{}_samples".format(label)]
)
)
print(
"Nr of correct {} samples: {}".format(
label, stats["nr_of_correct_{}_samples".format(label)]
)
)
if stats["nr_of_{}_samples".format(label)] > 0:
amount_correct = (
stats["nr_of_correct_{}_samples".format(label)]
/ stats["nr_of_{}_samples".format(label)]
)
else:
amount_correct = 1.0
print("Amount of correct {} samples: {}".format(label, amount_correct))
print()
print("=============================")
def main():
parser = argparse.ArgumentParser(
description="Extracts the text from the feverous db and creates a corpus"
)
parser.add_argument(
"--data_path",
default=None,
type=str,
help="Path to the file containing the training data",
)
args = parser.parse_args()
if not args.data_path:
raise RuntimeError("Invalid train data path")
data = load_jsonl(args.data_path)
test_model(data)
if __name__ == "__main__":
main()
| [
"torch.squeeze"
] | 1.9.0 | Martin36/FEVER2021_SharedTask | 4dd49e0ddf2909a93d44dab22eae988a067fc355 |
1.9 | import os, sys
import torch
import argparse
import shutil
import pandas as pd
from tqdm import tqdm
from transformers import TapasTokenizer
from data_processing.create_tapas_tables import create_tables
from collections import defaultdict
from util.util_funcs import load_jsonl, get_tables_from_docs, store_jsonl
DIR_PATH = os.path.abspath(os.getcwd())
FEVEROUS_PATH = DIR_PATH + "/FEVEROUS/src"
sys.path.insert(0, FEVEROUS_PATH)
from database.feverous_db import FeverousDB
from utils.wiki_page import WikiPage
stats = defaultdict(int)
def predict(model, tokenizer, data_path, device):
data = pd.read_csv(data_path)
cell_classification_threshold = 0.1
claim_to_cell_id_map = defaultdict(list)
with torch.no_grad():
for idx, item in tqdm(data.iterrows()):
table = pd.read_csv(item.table_file).astype(str)
try:
batch = tokenizer(
table=table,
queries=item.question,
truncation=True,
answer_coordinates=[],
answer_text=[],
padding="max_length",
return_tensors="pt",
)
batch = {key: val for key, val in batch.items()}
if torch.gt(batch["numeric_values"], 1e20).any():
stats["tables_with_too_large_numbers"] += 1
continue
batch["float_answer"] = torch.tensor(0.0)
except:
e = sys.exc_info()[0]
stats["tokenizing_errors"] += 1
continue
input_ids = batch["input_ids"].to(device)
attention_mask = batch["attention_mask"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
labels = batch["labels"].to(device)
numeric_values = batch["numeric_values"].to(device)
numeric_values_scale = batch["numeric_values_scale"].to(device)
float_answer = batch["float_answer"].to(device)
float_answer = torch.reshape(float_answer, (1, 1))
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=labels,
numeric_values=numeric_values,
numeric_values_scale=numeric_values_scale,
float_answer=float_answer,
)
logits = outputs.logits.cpu()
logits_agg = outputs.logits_aggregation.cpu()
output_labels = tokenizer.convert_logits_to_predictions(
batch,
logits,
logits_agg,
cell_classification_threshold=cell_classification_threshold,
)
output_cells = output_labels[0][0]
# Keep only the top 5 cells,
# assuming that they are ordered by score
for output_cell in output_cells[:6]:
table_id_split = item.table_id.split("_")
page_name = table_id_split[0]
table_id = table_id_split[1]
# Example format: 'Algebraic logic_cell_0_9_1'
cell_id = "{}_cell_{}_{}_{}".format(
page_name, table_id, output_cell[0], output_cell[1]
)
claim_to_cell_id_map[item.question].append(cell_id)
return claim_to_cell_id_map
def main():
parser = argparse.ArgumentParser(
description="Retrieves the top tables cells from the top tables"
)
parser.add_argument(
"--db_path", default=None, type=str, help="Path to the FEVEROUS database"
)
parser.add_argument(
"--data_file",
default=None,
type=str,
help="Path to the csv file containing the evaluation examples",
)
parser.add_argument(
"--model_file",
default=None,
type=str,
help="Path to the trained veracity prediction model",
)
parser.add_argument(
"--tapas_model_name",
default="google/tapas-tiny",
type=str,
help="Name of the pretrained tapas model",
)
parser.add_argument(
"--batch_size",
default=1,
type=int,
help="The size of each training batch. Reduce this is you run out of memory",
)
parser.add_argument(
"--out_dir",
default=None,
type=str,
help="Path to the csv file containing the evaluation examples",
)
parser.add_argument(
"--out_file",
default=None,
type=str,
help="Path to the csv file containing the evaluation examples",
)
args = parser.parse_args()
if not args.db_path:
raise RuntimeError("Invalid database path")
if ".db" not in args.db_path:
raise RuntimeError("The database path should include the name of the db file")
if not args.data_file:
raise RuntimeError("Invalid in file path")
if ".jsonl" not in args.data_file:
raise RuntimeError(
"The train csv path should include the name of the .csv file"
)
if not args.model_file:
raise RuntimeError("Invalid model path")
if ".pth" not in args.model_file:
raise RuntimeError("The model path should include the name of the .pth file")
if not args.out_dir:
raise RuntimeError("Invalid out file path")
if not args.out_file:
raise RuntimeError("Invalid out file path")
if ".jsonl" not in args.out_file:
raise RuntimeError(
"The train csv path should include the name of the .jsonl file"
)
db = FeverousDB(args.db_path)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = torch.load(args.model_file, map_location=device)
tokenizer = TapasTokenizer.from_pretrained(args.tapas_model_name)
model.eval()
tapas_tables_folder = args.out_dir + "torch_tables/"
tapas_tables_folder = os.path.dirname(tapas_tables_folder)
if not os.path.exists(tapas_tables_folder):
print("Output directory doesn't exist. Creating {}".format(tapas_tables_folder))
os.makedirs(tapas_tables_folder)
top_tables_data = load_jsonl(args.data_file)
results = []
tapas_input_data_list = []
batch_counter = 0
for i, d in enumerate(top_tables_data):
claim = d["claim"]
doc_names = []
for table_id in d["table_ids"]:
table_id_split = table_id.split("_")
doc_names.append(table_id_split[0])
doc_names = set(doc_names)
doc_tables_dict = get_tables_from_docs(db, doc_names)
top_tables = d["table_ids"]
# First we need to convert the table data to the correct format
filtered_tables = []
ordered_table_ids = []
for doc_name, table_dicts in doc_tables_dict.items():
for j, table_dict in enumerate(table_dicts):
table_id = "{}_{}".format(doc_name, j)
if table_id in top_tables:
filtered_tables.append(table_dict)
ordered_table_ids.append(table_id)
tapas_input_data = {
"id": i, # This is actually useless
"claim": claim,
"label": "",
"has_tables": len(top_tables) > 0,
"table_dicts": filtered_tables,
"table_ids": ordered_table_ids,
"evidence": [],
}
tapas_input_data_list.append(tapas_input_data)
if len(tapas_input_data_list) == args.batch_size:
batch_counter += 1
print("=======================================")
print(
"predicting for batch: {}/{}".format(
batch_counter, int(len(top_tables_data) / args.batch_size)
)
)
print("=======================================")
tapas_data_file = create_tables(
tapas_input_data_list,
args.out_dir,
tapas_tables_folder + "/",
write_to_files=True,
is_predict=True,
)
claim_to_cell_id_map = predict(model, tokenizer, tapas_data_file, device)
result_objs = [
{"claim": claim, "cell_ids": cell_ids}
for claim, cell_ids in claim_to_cell_id_map.items()
]
results += result_objs
tapas_input_data_list = []
# Remove the previously created tables
shutil.rmtree(tapas_tables_folder)
os.makedirs(tapas_tables_folder)
# Do predict for the last (possibly incomplete batch
print("=======================================")
print("predicting for last batch")
print("=======================================")
tapas_data_file = create_tables(
tapas_input_data_list,
args.out_dir,
tapas_tables_folder + "/",
write_to_files=True,
is_predict=True,
)
claim_to_cell_id_map = predict(model, tokenizer, tapas_data_file, device)
result_objs = [
{"claim": claim, "cell_ids": cell_ids}
for claim, cell_ids in claim_to_cell_id_map.items()
]
results += result_objs
store_jsonl(results, args.out_file)
print("Stored top tables cells in '{}'".format(args.out_file))
if __name__ == "__main__":
main()
| [
"torch.device",
"torch.gt",
"torch.no_grad",
"torch.cuda.is_available",
"torch.tensor",
"torch.load",
"torch.reshape"
] | 1.9.0 | Martin36/FEVER2021_SharedTask | 4dd49e0ddf2909a93d44dab22eae988a067fc355 |
1.8 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import warnings
from urllib.error import ContentTooShortError, HTTPError
import numpy as np
import torch
from torch.utils.data import DataLoader
import monai
from monai.apps import download_and_extract
from monai.data import decollate_batch
from monai.metrics import ROCAUCMetric
from monai.networks import eval_mode
from monai.networks.nets import DenseNet121
from monai.transforms import (
Activations,
AddChannel,
AsDiscrete,
Compose,
LoadImage,
RandFlip,
RandRotate,
RandZoom,
ScaleIntensity,
ToTensor,
Transpose,
)
from monai.utils import set_determinism
from tests.testing_data.integration_answers import test_integration_value
from tests.utils import DistTestCase, TimedCall, skip_if_quick
TEST_DATA_URL = "https://drive.google.com/uc?id=1QsnnkvZyJPcbRoV_ArW8SnE1OTuoVbKE"
MD5_VALUE = "0bc7306e7427e00ad1c5526a6677552d"
TASK = "integration_classification_2d"
class MedNISTDataset(torch.utils.data.Dataset):
def __init__(self, image_files, labels, transforms):
self.image_files = image_files
self.labels = labels
self.transforms = transforms
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
return self.transforms(self.image_files[index]), self.labels[index]
def run_training_test(root_dir, train_x, train_y, val_x, val_y, device="cuda:0", num_workers=10):
monai.config.print_config()
# define transforms for image and classification
train_transforms = Compose(
[
LoadImage(image_only=True),
AddChannel(),
Transpose(indices=[0, 2, 1]),
ScaleIntensity(),
RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),
RandFlip(spatial_axis=0, prob=0.5),
RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),
ToTensor(),
]
)
train_transforms.set_random_state(1234)
val_transforms = Compose(
[LoadImage(image_only=True), AddChannel(), Transpose(indices=[0, 2, 1]), ScaleIntensity(), ToTensor()]
)
y_pred_trans = Compose([ToTensor(), Activations(softmax=True)])
y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=True, num_classes=len(np.unique(train_y)))])
auc_metric = ROCAUCMetric()
# create train, val data loaders
train_ds = MedNISTDataset(train_x, train_y, train_transforms)
train_loader = DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=num_workers)
val_ds = MedNISTDataset(val_x, val_y, val_transforms)
val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)
model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device)
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), 1e-5)
epoch_num = 4
val_interval = 1
# start training validation
best_metric = -1
best_metric_epoch = -1
epoch_loss_values = []
metric_values = []
model_filename = os.path.join(root_dir, "best_metric_model.pth")
for epoch in range(epoch_num):
print("-" * 10)
print(f"Epoch {epoch + 1}/{epoch_num}")
model.train()
epoch_loss = 0
step = 0
for batch_data in train_loader:
step += 1
inputs, labels = batch_data[0].to(device), batch_data[1].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_loss /= step
epoch_loss_values.append(epoch_loss)
print(f"epoch {epoch + 1} average loss:{epoch_loss:0.4f}")
if (epoch + 1) % val_interval == 0:
with eval_mode(model):
y_pred = torch.tensor([], dtype=torch.float32, device=device)
y = torch.tensor([], dtype=torch.long, device=device)
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
y_pred = torch.cat([y_pred, model(val_images)], dim=0)
y = torch.cat([y, val_labels], dim=0)
# compute accuracy
acc_value = torch.eq(y_pred.argmax(dim=1), y)
acc_metric = acc_value.sum().item() / len(acc_value)
# decollate prediction and label and execute post processing
y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)]
y = [y_trans(i) for i in decollate_batch(y)]
# compute AUC
auc_metric(y_pred, y)
auc_value = auc_metric.aggregate()
auc_metric.reset()
metric_values.append(auc_value)
if auc_value > best_metric:
best_metric = auc_value
best_metric_epoch = epoch + 1
torch.save(model.state_dict(), model_filename)
print("saved new best metric model")
print(
f"current epoch {epoch +1} current AUC: {auc_value:0.4f} "
f"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}"
)
print(f"train completed, best_metric: {best_metric:0.4f} at epoch: {best_metric_epoch}")
return epoch_loss_values, best_metric, best_metric_epoch
def run_inference_test(root_dir, test_x, test_y, device="cuda:0", num_workers=10):
# define transforms for image and classification
val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()])
val_ds = MedNISTDataset(test_x, test_y, val_transforms)
val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)
model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device)
model_filename = os.path.join(root_dir, "best_metric_model.pth")
model.load_state_dict(torch.load(model_filename))
y_true = []
y_pred = []
with eval_mode(model):
for test_data in val_loader:
test_images, test_labels = test_data[0].to(device), test_data[1].to(device)
pred = model(test_images).argmax(dim=1)
for i in range(len(pred)):
y_true.append(test_labels[i].item())
y_pred.append(pred[i].item())
tps = [np.sum((np.asarray(y_true) == idx) & (np.asarray(y_pred) == idx)) for idx in np.unique(test_y)]
return tps
@skip_if_quick
class IntegrationClassification2D(DistTestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testing_data")
data_dir = os.path.join(self.data_dir, "MedNIST")
dataset_file = os.path.join(self.data_dir, "MedNIST.tar.gz")
if not os.path.exists(data_dir):
try:
download_and_extract(TEST_DATA_URL, dataset_file, self.data_dir, MD5_VALUE)
except (ContentTooShortError, HTTPError, RuntimeError) as e:
print(str(e))
if isinstance(e, RuntimeError):
# FIXME: skip MD5 check as current downloading method may fail
self.assertTrue(str(e).startswith("md5 check"))
return # skipping this test due the network connection errors
assert os.path.exists(data_dir)
class_names = sorted((x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))))
image_files = [
[os.path.join(data_dir, class_name, x) for x in sorted(os.listdir(os.path.join(data_dir, class_name)))]
for class_name in class_names
]
image_file_list, image_classes = [], []
for i, _ in enumerate(class_names):
image_file_list.extend(image_files[i])
image_classes.extend([i] * len(image_files[i]))
# split train, val, test
valid_frac, test_frac = 0.1, 0.1
self.train_x, self.train_y = [], []
self.val_x, self.val_y = [], []
self.test_x, self.test_y = [], []
for i in range(len(image_classes)):
rann = np.random.random()
if rann < valid_frac:
self.val_x.append(image_file_list[i])
self.val_y.append(image_classes[i])
elif rann < test_frac + valid_frac:
self.test_x.append(image_file_list[i])
self.test_y.append(image_classes[i])
else:
self.train_x.append(image_file_list[i])
self.train_y.append(image_classes[i])
self.device = "cuda:0" if torch.cuda.is_available() else "cpu:0"
def tearDown(self):
set_determinism(seed=None)
try:
os.remove(os.path.join(self.data_dir, "best_metric_model.pth"))
except FileNotFoundError:
warnings.warn("not found best_metric_model.pth, training skipped?")
pass
def train_and_infer(self, idx=0):
results = []
if not os.path.exists(os.path.join(self.data_dir, "MedNIST")):
# skip test if no MedNIST dataset
return results
set_determinism(seed=0)
losses, best_metric, best_metric_epoch = run_training_test(
self.data_dir, self.train_x, self.train_y, self.val_x, self.val_y, device=self.device
)
infer_metric = run_inference_test(self.data_dir, self.test_x, self.test_y, device=self.device)
print(f"integration_classification_2d {losses}")
print("best metric", best_metric)
print("infer metric", infer_metric)
# check training properties
self.assertTrue(test_integration_value(TASK, key="losses", data=losses, rtol=1e-2))
self.assertTrue(test_integration_value(TASK, key="best_metric", data=best_metric, rtol=1e-4))
np.testing.assert_allclose(best_metric_epoch, 4)
model_file = os.path.join(self.data_dir, "best_metric_model.pth")
self.assertTrue(os.path.exists(model_file))
# check inference properties
self.assertTrue(test_integration_value(TASK, key="infer_prop", data=np.asarray(infer_metric), rtol=1))
results.extend(losses)
results.append(best_metric)
results.extend(infer_metric)
return results
def test_training(self):
repeated = []
for i in range(2):
results = self.train_and_infer(i)
repeated.append(results)
np.testing.assert_allclose(repeated[0], repeated[1])
@TimedCall(seconds=1000, skip_timing=not torch.cuda.is_available(), daemon=False)
def test_timing(self):
self.train_and_infer()
if __name__ == "__main__":
unittest.main()
| [
"torch.cat",
"torch.cuda.is_available",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.load",
"torch.nn.CrossEntropyLoss"
] | 1.8 | themantalope/MONAI | f398298b5aadc076102261a687a158f6ac17ad1c |
1.6 | import torch
import torchvision.transforms as transforms
import numpy as np
import cv2
import logging
from .model import Net
class Extractor(object):
def __init__(self, model_path, use_cuda=True):
self.net = Net(reid=True)
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']
self.net.load_state_dict(state_dict)
logger = logging.getLogger("root.tracker")
logger.info("Loading weights from {}... Done!".format(model_path))
self.net.to(self.device)
self.size = (64, 128)
self.norm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
def _preprocess(self, im_crops):
"""
TODO:
1. to float with scale from 0 to 1
2. resize to (64, 128) as Market1501 dataset did
3. concatenate to a numpy array
3. to torch Tensor
4. normalize
"""
def _resize(im, size):
return cv2.resize(im.astype(np.float32)/255., size)
im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()
return im_batch
def __call__(self, im_crops):
im_batch = self._preprocess(im_crops)
with torch.no_grad():
im_batch = im_batch.to(self.device)
features = self.net(im_batch)
return features.cpu().numpy()
if __name__ == '__main__':
img = cv2.imread("demo.jpg")[:,:,(2,1,0)]
extr = Extractor("checkpoint/ckpt.t7")
feature = extr(img)
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 1.6.0 | ruidongjr/Aldi | 0d2dad1ab180abb59bee15d9e5e851e4de4e8cd5 |
1.3 | import torch
import torch.nn as nn
import torch.nn.functional as F
class VQALoss(nn.Module):
def __init__(self, scale, loss_type='mixed', m=None):
super(VQALoss, self).__init__()
self.loss_type = loss_type
self.scale = scale
self.m = m #
def forward(self, y_pred, y):
relative_score, mapped_score, aligned_score = y_pred
if self.loss_type == 'mixed':
loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) +
F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]
elif self.loss_type == 'correlation' or self.loss_type == 'rank+plcc':
loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) for d in range(len(y))]
elif self.loss_type == 'rank':
loss = [loss_m(relative_score[d], y[d]) for d in range(len(y))]
elif self.loss_type == 'plcc':
loss = [loss_a(mapped_score[d], y[d]) for d in range(len(y))]
elif self.loss_type == 'rank+l1':
loss = [loss_m(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]
elif self.loss_type == 'plcc+l1':
loss = [loss_a(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]
elif 'naive' in self.loss_type:
aligned_scores = torch.cat([(aligned_score[d]-self.m[d])/self.scale[d] for d in range(len(y))])
ys = torch.cat([(y[d]-self.m[d])/self.scale[d] for d in range(len(y))])
if self.loss_type == 'naive0':
return F.l1_loss(aligned_scores, ys) #
return loss_a(aligned_scores, ys) + loss_m(aligned_scores, ys) + F.l1_loss(aligned_scores, ys)
else: # default l1
loss = [F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]
# print(loss)
# sum_loss = sum([lossi for lossi in loss]) / len(loss)
# sum_loss = len(loss) / sum([1 / lossi for lossi in loss])
sum_loss = sum([torch.exp(lossi) * lossi for lossi in loss]) / sum([torch.exp(lossi) for lossi in loss])
return sum_loss
def loss_m(y_pred, y):
"""prediction monotonicity related loss"""
assert y_pred.size(0) > 1 #
return torch.sum(F.relu((y_pred-y_pred.t()) * torch.sign((y.t()-y)))) / y_pred.size(0) / (y_pred.size(0)-1)
def loss_a(y_pred, y):
"""prediction accuracy related loss"""
assert y_pred.size(0) > 1 #
return (1 - torch.cosine_similarity(y_pred.t() - torch.mean(y_pred), y.t() - torch.mean(y))[0]) / 2
| [
"torch.nn.functional.l1_loss",
"torch.exp",
"torch.mean"
] | 1.3.0 | lidq92/MDTVSFA | 22f49a9c1b2faec4a643c92b0f6b69297f4e4121 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging as log
import os
import pickle
from copy import deepcopy
import cloudpickle
import pytest
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
import tests.base.develop_pipelines as tpipes
import tests.base.develop_utils as tutils
from pytorch_lightning import Callback, LightningModule, Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint
from tests.base import BoringModel, EvalModelTemplate, GenericEvalModelTemplate, TrialMNIST
class ModelTrainerPropertyParity(Callback):
def _check_properties(self, trainer, pl_module):
assert trainer.global_step == pl_module.global_step
assert trainer.current_epoch == pl_module.current_epoch
def on_train_start(self, trainer, pl_module):
self._check_properties(trainer, pl_module)
def on_train_batch_start(self, trainer, pl_module, *args, **kwargs):
self._check_properties(trainer, pl_module)
def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):
self._check_properties(trainer, pl_module)
def on_epoch_end(self, trainer, pl_module):
self._check_properties(trainer, pl_module)
def on_train_end(self, trainer, pl_module):
self._check_properties(trainer, pl_module)
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_model_properties_resume_from_checkpoint(enable_pl_optimizer, tmpdir):
""" Test that properties like `current_epoch` and `global_step`
in model and trainer are always the same. """
model = EvalModelTemplate()
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_last=True)
trainer_args = dict(
default_root_dir=tmpdir,
max_epochs=1,
logger=False,
enable_pl_optimizer=enable_pl_optimizer,
callbacks=[checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions
)
trainer = Trainer(**trainer_args)
trainer.fit(model)
trainer_args.update(max_epochs=2)
trainer = Trainer(**trainer_args, resume_from_checkpoint=str(tmpdir / "last.ckpt"))
trainer.fit(model)
def test_try_resume_from_non_existing_checkpoint(tmpdir):
""" Test that trying to resume from non-existing `resume_from_checkpoint` fail without error."""
model = BoringModel()
checkpoint_cb = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
logger=False,
callbacks=[checkpoint_cb],
limit_train_batches=0.1,
limit_val_batches=0.1,
)
# Generate checkpoint `last.ckpt` with BoringModel
trainer.fit(model)
# `True` if resume/restore successfully else `False`
assert trainer.checkpoint_connector.restore(str(tmpdir / "last.ckpt"), trainer.on_gpu)
assert not trainer.checkpoint_connector.restore(str(tmpdir / "last_non_existing.ckpt"), trainer.on_gpu)
class CaptureCallbacksBeforeTraining(Callback):
callbacks = []
def on_train_start(self, trainer, pl_module):
self.callbacks = deepcopy(trainer.callbacks)
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_callbacks_state_resume_from_checkpoint(enable_pl_optimizer, tmpdir):
""" Test that resuming from a checkpoint restores callbacks that persist state. """
model = EvalModelTemplate()
callback_capture = CaptureCallbacksBeforeTraining()
def get_trainer_args():
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_last=True)
trainer_args = dict(
default_root_dir=tmpdir,
max_steps=1,
logger=False,
enable_pl_optimizer=enable_pl_optimizer,
callbacks=[
checkpoint,
callback_capture,
]
)
assert checkpoint.best_model_path == ""
assert checkpoint.best_model_score is None
return trainer_args
# initial training
trainer = Trainer(**get_trainer_args())
trainer.fit(model)
callbacks_before_resume = deepcopy(trainer.callbacks)
# resumed training
trainer = Trainer(**get_trainer_args(), resume_from_checkpoint=str(tmpdir / "last.ckpt"))
trainer.fit(model)
assert len(callbacks_before_resume) == len(callback_capture.callbacks)
for before, after in zip(callbacks_before_resume, callback_capture.callbacks):
if isinstance(before, ModelCheckpoint):
assert before.best_model_path == after.best_model_path
assert before.best_model_score == after.best_model_score
@pytest.mark.parametrize("enable_pl_optimizer", [False, True])
def test_callbacks_references_resume_from_checkpoint(enable_pl_optimizer, tmpdir):
""" Test that resuming from a checkpoint sets references as expected. """
model = EvalModelTemplate()
args = {'default_root_dir': tmpdir, 'max_steps': 1, 'logger': False, "enable_pl_optimizer": enable_pl_optimizer}
# initial training
checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_last=True)
trainer = Trainer(**args, callbacks=[checkpoint])
assert checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback
trainer.fit(model)
# resumed training
new_checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor="early_stop_on", save_last=True)
# pass in a new checkpoint object, which should take
# precedence over the one in the last.ckpt file
trainer = Trainer(**args, callbacks=[new_checkpoint], resume_from_checkpoint=str(tmpdir / "last.ckpt"))
assert checkpoint is not new_checkpoint
assert new_checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback
trainer.fit(model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_running_test_pretrained_model_distrib_dp(tmpdir):
"""Verify `test()` on pretrained model."""
tutils.set_random_master_port()
model = EvalModelTemplate()
# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = dict(
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
callbacks=[checkpoint],
logger=logger,
gpus=[0, 1],
accelerator='dp',
default_root_dir=tmpdir,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'training failed to complete'
pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
# run test set
new_trainer = Trainer(**trainer_options)
results = new_trainer.test(pretrained_model)
pretrained_model.cpu()
# test we have good test accuracy
acc = results[0]['test_acc']
assert acc > 0.5, f"Model failed to get expected {0.5} accuracy. test_acc = {acc}"
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
tpipes.run_prediction(dataloader, pretrained_model)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):
"""Verify `test()` on pretrained model."""
tutils.set_random_master_port()
model = EvalModelTemplate()
# exp file to get meta
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = dict(
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
callbacks=[checkpoint],
logger=logger,
gpus=[0, 1],
accelerator='ddp_spawn',
default_root_dir=tmpdir,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir)))
# correct result and ok accuracy
assert result == 1, 'training failed to complete'
pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
# run test set
new_trainer = Trainer(**trainer_options)
results = new_trainer.test(pretrained_model)
pretrained_model.cpu()
acc = results[0]['test_acc']
assert acc > 0.5, f"Model failed to get expected {0.5} accuracy. test_acc = {acc}"
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
tpipes.run_prediction(dataloader, pretrained_model)
def test_running_test_pretrained_model_cpu(tmpdir):
"""Verify test() on pretrained model."""
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
trainer_options = dict(
progress_bar_refresh_rate=0,
max_epochs=3,
limit_train_batches=0.4,
limit_val_batches=0.2,
callbacks=[checkpoint],
logger=logger,
default_root_dir=tmpdir,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
# correct result and ok accuracy
assert result == 1, 'training failed to complete'
pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)
new_trainer = Trainer(**trainer_options)
new_trainer.test(pretrained_model)
# test we have good test accuracy
tutils.assert_ok_model_acc(new_trainer)
@pytest.mark.parametrize('model_template', [EvalModelTemplate, GenericEvalModelTemplate])
def test_load_model_from_checkpoint(tmpdir, model_template):
"""Verify test() on pretrained model."""
hparams = model_template.get_default_hparams()
model = model_template(**hparams)
trainer_options = dict(
progress_bar_refresh_rate=0,
max_epochs=2,
limit_train_batches=0.4,
limit_val_batches=0.2,
callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],
default_root_dir=tmpdir,
)
# fit model
trainer = Trainer(**trainer_options)
result = trainer.fit(model)
trainer.test(ckpt_path=None)
# correct result and ok accuracy
assert result == 1, 'training failed to complete'
# load last checkpoint
last_checkpoint = sorted(glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, "*.ckpt")))[-1]
# Since `EvalModelTemplate` has `_save_hparams = True` by default, check that ckpt has hparams
ckpt = torch.load(last_checkpoint)
assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), 'hyper_parameters missing from checkpoints'
# Ensure that model can be correctly restored from checkpoint
pretrained_model = model_template.load_from_checkpoint(last_checkpoint)
# test that hparams loaded correctly
for k, v in hparams.items():
assert getattr(pretrained_model, k) == v
# assert weights are the same
for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()):
assert torch.all(torch.eq(old_p, new_p)), 'loaded weights are not the same as the saved weights'
# Check `test` on pretrained model:
new_trainer = Trainer(**trainer_options)
new_trainer.test(pretrained_model)
# test we have good test accuracy
tutils.assert_ok_model_acc(new_trainer)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_dp_resume(tmpdir):
"""Make sure DP continues training correctly."""
hparams = EvalModelTemplate.get_default_hparams()
model = EvalModelTemplate(**hparams)
trainer_options = dict(max_epochs=1, gpus=2, accelerator='dp', default_root_dir=tmpdir)
# get logger
logger = tutils.get_default_logger(tmpdir)
# exp file to get weights
# logger file to get weights
checkpoint = tutils.init_checkpoint_callback(logger)
# add these to the trainer options
trainer_options['logger'] = logger
trainer_options['checkpoint_callback'] = checkpoint
# fit model
trainer = Trainer(**trainer_options)
trainer.is_slurm_managing_tasks = True
result = trainer.fit(model)
# track epoch before saving. Increment since we finished the current epoch, don't want to rerun
real_global_epoch = trainer.current_epoch + 1
# correct result and ok accuracy
assert result == 1, 'amp + dp model failed to complete'
# ---------------------------
# HPC LOAD/SAVE
# ---------------------------
# save
trainer.checkpoint_connector.hpc_save(tmpdir, logger)
# init new trainer
new_logger = tutils.get_default_logger(tmpdir, version=logger.version)
trainer_options['logger'] = new_logger
trainer_options['checkpoint_callback'] = ModelCheckpoint(dirpath=tmpdir)
trainer_options['limit_train_batches'] = 0.5
trainer_options['limit_val_batches'] = 0.2
trainer_options['max_epochs'] = 1
new_trainer = Trainer(**trainer_options)
# set the epoch start hook so we can predict before the model does the full training
def assert_good_acc():
assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0
# if model and state loaded correctly, predictions will be good even though we
# haven't trained with the new loaded model
dp_model = new_trainer.model
dp_model.eval()
dataloader = trainer.train_dataloader
tpipes.run_prediction(dataloader, dp_model, dp=True)
# new model
model = EvalModelTemplate(**hparams)
model.on_train_start = assert_good_acc
# fit new model which should load hpc weights
new_trainer.fit(model)
# test freeze on gpu
model.freeze()
model.unfreeze()
def test_model_saving_loading(tmpdir):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
max_epochs=1,
logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
default_root_dir=tmpdir,
)
result = trainer.fit(model)
# traning complete
assert result == 1, 'amp + ddp model failed to complete'
# make a prediction
dataloaders = model.test_dataloader()
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
for dataloader in dataloaders:
for batch in dataloader:
break
x, y = batch
x = x.view(x.size(0), -1)
# generate preds before saving model
model.eval()
pred_before_saving = model(x)
# save model
new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)
hparams_path = os.path.join(hparams_path, 'hparams.yaml')
model_2 = EvalModelTemplate.load_from_checkpoint(checkpoint_path=new_weights_path, hparams_file=hparams_path,)
model_2.eval()
# make prediction
# assert that both predictions are the same
new_pred = model_2(x)
assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1
@pytest.mark.parametrize('url_ckpt', [True, False])
def test_strict_model_load_more_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv('TORCH_HOME', tmpdir)
model = EvalModelTemplate()
# Extra layer
model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
result = trainer.fit(model)
# traning complete
assert result == 1
# save model
new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')
hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'
ckpt_path = hparams_url if url_ckpt else new_weights_path
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,
)
with pytest.raises(RuntimeError, match=r'Unexpected key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
EvalModelTemplate.load_from_checkpoint(
checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,
)
@pytest.mark.parametrize('url_ckpt', [True, False])
def test_strict_model_load_less_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):
"""Tests use case where trainer saves the model, and user loads it from tags independently."""
# set $TORCH_HOME, which determines torch hub's cache path, to tmpdir
monkeypatch.setenv('TORCH_HOME', tmpdir)
model = EvalModelTemplate()
# logger file to get meta
logger = tutils.get_default_logger(tmpdir)
# fit model
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=1, logger=logger,
callbacks=[ModelCheckpoint(dirpath=tmpdir)],
)
result = trainer.fit(model)
# traning complete
assert result == 1
# save model
new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')
trainer.save_checkpoint(new_weights_path)
# load new model
hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')
hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'
ckpt_path = hparams_url if url_ckpt else new_weights_path
class CurrentModel(EvalModelTemplate):
def __init__(self):
super().__init__()
self.c_d3 = torch.nn.Linear(7, 7)
CurrentModel.load_from_checkpoint(
checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,
)
with pytest.raises(RuntimeError, match=r'Missing key\(s\) in state_dict: "c_d3.weight", "c_d3.bias"'):
CurrentModel.load_from_checkpoint(
checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,
)
def test_model_pickle(tmpdir):
model = EvalModelTemplate()
pickle.dumps(model)
cloudpickle.dumps(model)
| [
"torch.nn.Linear",
"torch.eq",
"torch.load",
"torch.cuda.device_count"
] | 1.3 | arnaudgelas/pytorch-lightning | cc624358c8e396e966f9c51b3010f6a986047fc6 |
1.7 |
import torch
import numpy as np
from src.trainers.base_trainer import BaseTrainer
from src.evaluation.metrics import Metrics
class LSTMAttnTrainer(BaseTrainer):
"""
Trainer class. Optimizer is by default handled by BaseTrainer.
"""
def __init__(self, model, config):
super(LSTMAttnTrainer, self).__init__(model, config)
self._log_interval = config['log_interval']
self._batch_size = config['dataloader_params']['batch_size']
self._logger.info('Batch size: %d', self._batch_size)
def _train_epoch(self, epoch, train_iter, dev_iter):
"""
:param epoch:
:param train_iter:
:param dev_iter:
:return:
"""
# turn on training mode which enables dropout
self._model.train()
total_loss = 0
predicted_values = []
target_values = []
labels = np.arange(self._model.num_classes)
for batch_idx, batch in enumerate(train_iter):
(data, lengths), target = self._to_tensor(batch.text, batch.label)
self._optimizer.zero_grad()
output, attn_w = self._model(data, lengths)
# output = self._model(data, lengths)
loss = self._loss_function(output, target, reduction='sum')
loss.backward()
self._optimizer.step()
total_loss += loss.item()
predictions = torch.max(output, 1)[1].view(target.size())
predicted_values.extend(predictions.data.tolist())
target_values.extend(target.data.tolist())
if (batch_idx + 1) % self._log_interval == 0:
results = Metrics.metrics(
predicted_values, target_values, labels
)
self._logger.info(
'Epoch: {:3d} [{:5d}/{:5.0f} batches] '
'Current loss: {:5.6f}, Total average loss: {:5.6f}, '
'F-score: {:5.2f}'.format(
epoch, (batch_idx + 1),
len(train_iter.dataset) / self._batch_size,
loss.item() / self._batch_size,
total_loss / results['n_samples'],
results['f_score']
)
)
results_train = Metrics.metrics(predicted_values, target_values, labels)
results_train['loss'] = total_loss / results_train['n_samples']
results_val, _ = self.evaluate(dev_iter)
log = {'epoch': epoch}
log.update({'train_{}'.format(k): v for k, v in results_train.items()})
log.update({'val_{}'.format(k): v for k, v in results_val.items()})
return log
def evaluate(self, data_iter):
"""
Validate after training an epoch
:param data_iter:
:return:
"""
# switch to evaluation mode (won't dropout)
self._model.eval()
total_loss = 0
predicted_values = []
target_values = []
labels = np.arange(self._model.num_classes)
with torch.no_grad():
for batch_idx, batch in enumerate(data_iter):
(data, lengths), target = self._to_tensor(
batch.text, batch.label
)
output, attn_w = self._model(data, lengths)
# output = self._model(data, lengths)
loss = self._loss_function(output, target, reduction='sum')
total_loss += loss.item()
predictions = torch.max(output, 1)[1].view(target.size())
predicted_values.extend(predictions.data.tolist())
target_values.extend(target.data.tolist())
results = Metrics.metrics(predicted_values, target_values, labels)
results['loss'] = total_loss / results['n_samples']
self._logger.info(
'Evaluation: Loss: {:5.6f}, F-score: {:5.2f}% ({}/{})'.format(
results['loss'], results['f_score'],
results['correct'], results['n_samples']
)
)
return results, predicted_values
| [
"torch.no_grad",
"torch.max"
] | 1.7.1 | paxtonedgar/MisInfo | 81b32fa3e7d0d204feb83e10169093f45727a2ea |
1.8 | import pytest
import torch
from torch.autograd import gradcheck
import kornia
import kornia.geometry.transform.imgwarp
import kornia.testing as utils # test utils
from kornia.testing import assert_close
class TestAngleToRotationMatrix:
def test_shape(self, device):
inp = torch.ones(1, 3, 4, 4).to(device)
rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(inp)
assert rotmat.shape == (1, 3, 4, 4, 2, 2)
def test_angles(self, device):
ang_deg = torch.tensor([0, 90.0], device=device)
expected = torch.tensor([[[1.0, 0.0], [0.0, 1.0]], [[0, 1.0], [-1.0, 0]]], device=device)
rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(ang_deg)
assert_close(rotmat, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix, (img,), raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Problems with kornia.pi")
def test_jit(self, device, dtype):
B, C, H, W = 2, 1, 32, 32
patches = torch.rand(B, C, H, W, device=device, dtype=dtype)
model = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix
model_jit = torch.jit.script(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix)
assert_close(model(patches), model_jit(patches))
class TestGetLAFScale:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
rotmat = kornia.feature.get_laf_scale(inp)
assert rotmat.shape == (1, 3, 1, 1)
def test_scale(self, device):
inp = torch.tensor([[5.0, 1, 0], [1, 1, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[[2]]]], device=device).float()
rotmat = kornia.feature.get_laf_scale(inp)
assert_close(rotmat, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_scale, (img,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_scale
model_jit = torch.jit.script(kornia.feature.get_laf_scale)
assert_close(model(img), model_jit(img))
class TestGetLAFCenter:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
xy = kornia.feature.get_laf_center(inp)
assert xy.shape == (1, 3, 2)
def test_center(self, device):
inp = torch.tensor([[5.0, 1, 2], [1, 1, 3]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[2, 3]]], device=device).float()
xy = kornia.feature.get_laf_center(inp)
assert_close(xy, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_center, (img,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_center
model_jit = torch.jit.script(kornia.feature.get_laf_center)
assert_close(model(img), model_jit(img))
class TestGetLAFOri:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
ori = kornia.feature.get_laf_orientation(inp)
assert ori.shape == (1, 3, 1)
def test_ori(self, device):
inp = torch.tensor([[1, 1, 2], [1, 1, 3]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[45.0]]], device=device).float()
angle = kornia.feature.get_laf_orientation(inp)
assert_close(angle, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.get_laf_orientation, (img,), raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.get_laf_orientation
model_jit = torch.jit.script(kornia.feature.get_laf_orientation)
assert_close(model(img), model_jit(img))
class TestScaleLAF:
def test_shape_float(self, device):
inp = torch.ones(7, 3, 2, 3, device=device).float()
scale = 23.0
assert kornia.feature.scale_laf(inp, scale).shape == inp.shape
def test_shape_tensor(self, device):
inp = torch.ones(7, 3, 2, 3, device=device).float()
scale = torch.zeros(7, 1, 1, 1, device=device).float()
assert kornia.feature.scale_laf(inp, scale).shape == inp.shape
def test_scale(self, device):
inp = torch.tensor([[5.0, 1, 0.8], [1, 1, -4.0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
scale = torch.tensor([[[[2.0]]]], device=device).float()
out = kornia.feature.scale_laf(inp, scale)
expected = torch.tensor([[[[10.0, 2, 0.8], [2, 2, -4.0]]]], device=device).float()
assert_close(out, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
scale = torch.rand(batch_size, device=device)
scale = utils.tensor_to_gradcheck_var(scale) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.scale_laf, (laf, scale), raise_exception=True, atol=1e-4)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
scale = torch.rand(batch_size, device=device)
model = kornia.feature.scale_laf
model_jit = torch.jit.script(kornia.feature.scale_laf)
assert_close(model(laf, scale), model_jit(laf, scale))
class TestSetLAFOri:
def test_shape_tensor(self, device):
inp = torch.ones(7, 3, 2, 3, device=device).float()
ori = torch.ones(7, 3, 1, 1, device=device).float()
assert kornia.feature.set_laf_orientation(inp, ori).shape == inp.shape
def test_ori(self, device):
inp = torch.tensor([[0.0, 5.0, 0.8], [-5.0, 0, -4.0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
ori = torch.zeros(1, 1, 1, 1, device=device).float()
out = kornia.feature.set_laf_orientation(inp, ori)
expected = torch.tensor([[[[5.0, 0.0, 0.8], [0.0, 5.0, -4.0]]]], device=device).float()
assert_close(out, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
ori = torch.rand(batch_size, channels, 1, 1, device=device)
ori = utils.tensor_to_gradcheck_var(ori) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.set_laf_orientation, (laf, ori), raise_exception=True, atol=1e-4)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
ori = torch.rand(batch_size, channels, 1, 1, device=device)
model = kornia.feature.set_laf_orientation
model_jit = torch.jit.script(kornia.feature.set_laf_orientation)
assert_close(model(laf, ori), model_jit(laf, ori))
class TestMakeUpright:
def test_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
rotmat = kornia.feature.make_upright(inp)
assert rotmat.shape == (5, 3, 2, 3)
def test_do_nothing(self, device):
inp = torch.tensor([[1, 0, 0], [0, 1, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[[1, 0, 0], [0, 1, 0]]]], device=device).float()
laf = kornia.feature.make_upright(inp)
assert_close(laf, expected)
def test_do_nothing_with_scalea(self, device):
inp = torch.tensor([[2, 0, 0], [0, 2, 0]], device=device).float()
inp = inp.view(1, 1, 2, 3)
expected = torch.tensor([[[[2, 0, 0], [0, 2, 0]]]], device=device).float()
laf = kornia.feature.make_upright(inp)
assert_close(laf, expected)
def test_check_zeros(self, device):
inp = torch.rand(4, 5, 2, 3, device=device)
laf = kornia.feature.make_upright(inp)
must_be_zeros = laf[:, :, 0, 1]
assert_close(must_be_zeros, torch.zeros_like(must_be_zeros))
def test_gradcheck(self, device):
batch_size, channels, height, width = 14, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.make_upright, (img,), raise_exception=True)
@pytest.mark.jit
@pytest.mark.skip("Union")
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
img = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.make_upright
model_jit = torch.jit.script(kornia.feature.make_upright)
assert_close(model(img), model_jit(img))
class TestELL2LAF:
def test_shape(self, device):
inp = torch.ones(5, 3, 5, device=device)
inp[:, :, 3] = 0
rotmat = kornia.feature.ellipse_to_laf(inp)
assert rotmat.shape == (5, 3, 2, 3)
def test_conversion(self, device):
inp = torch.tensor([[10, -20, 0.01, 0, 0.01]], device=device).float()
inp = inp.view(1, 1, 5)
expected = torch.tensor([[10, 0, 10.0], [0, 10, -20]], device=device).float()
expected = expected.view(1, 1, 2, 3)
laf = kornia.feature.ellipse_to_laf(inp)
assert_close(laf, expected)
def test_gradcheck(self, device):
batch_size, channels, height = 1, 2, 5
img = torch.rand(batch_size, channels, height, device=device).abs()
img[:, :, 2] = img[:, :, 3].abs() + 0.3
img[:, :, 4] += 1.0
# assure it is positive definite
img = utils.tensor_to_gradcheck_var(img) # to var
assert gradcheck(kornia.feature.ellipse_to_laf, (img,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height = 1, 2, 5
img = torch.rand(batch_size, channels, height, device=device).abs()
img[:, :, 2] = img[:, :, 3].abs() + 0.3
img[:, :, 4] += 1.0
model = kornia.feature.ellipse_to_laf
model_jit = torch.jit.script(kornia.feature.ellipse_to_laf)
assert_close(model(img), model_jit(img))
class TestNormalizeLAF:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3)
img = torch.rand(5, 3, 10, 10)
assert inp.shape == kornia.feature.normalize_laf(inp, img).shape
def test_conversion(self, device):
w, h = 10, 5
laf = torch.tensor([[1, 0, 1], [0, 1, 1]]).float()
laf = laf.view(1, 1, 2, 3)
img = torch.rand(1, 3, h, w)
expected = torch.tensor([[[[0.2, 0, 0.1], [0, 0.2, 0.2]]]]).float()
lafn = kornia.feature.normalize_laf(laf, img)
assert_close(lafn, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.normalize_laf, (laf, img), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
model = kornia.feature.normalize_laf
model_jit = torch.jit.script(kornia.feature.normalize_laf)
assert_close(model(laf, img), model_jit(laf, img))
class TestLAF2pts:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
n_pts = 13
assert kornia.feature.laf_to_boundary_points(inp, n_pts).shape == (5, 3, n_pts, 2)
def test_conversion(self, device):
laf = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()
laf = laf.view(1, 1, 2, 3)
n_pts = 6
expected = torch.tensor([[[[1, 1], [1, 2], [2, 1], [1, 0], [0, 1], [1, 2]]]], device=device).float()
pts = kornia.feature.laf_to_boundary_points(laf, n_pts)
assert_close(pts, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.laf_to_boundary_points, (laf), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_to_boundary_points
model_jit = torch.jit.script(kornia.feature.laf_to_boundary_points)
assert_close(model(laf), model_jit(laf))
class TestDenormalizeLAF:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
assert inp.shape == kornia.feature.denormalize_laf(inp, img).shape
def test_conversion(self, device):
w, h = 10, 5
expected = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()
expected = expected.view(1, 1, 2, 3)
img = torch.rand(1, 3, h, w, device=device)
lafn = torch.tensor([[0.2, 0, 0.1], [0, 0.2, 0.2]], device=device).float()
laf = kornia.feature.denormalize_laf(lafn.view(1, 1, 2, 3), img)
assert_close(laf, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width, device=device)
img = torch.rand(batch_size, 3, 10, 32, device=device)
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(kornia.feature.denormalize_laf, (laf, img), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 1, 2, 2, 3
laf = torch.rand(batch_size, channels, height, width)
img = torch.rand(batch_size, 3, 10, 32)
model = kornia.feature.denormalize_laf
model_jit = torch.jit.script(kornia.feature.denormalize_laf)
assert_close(model(laf, img), model_jit(laf, img))
class TestGenPatchGrid:
def test_shape(self, device):
laf = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
PS = 3
from kornia.feature.laf import generate_patch_grid_from_normalized_LAF
grid = generate_patch_grid_from_normalized_LAF(img, laf, PS)
assert grid.shape == (15, 3, 3, 2)
def test_gradcheck(self, device):
laf = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
PS = 3
from kornia.feature.laf import generate_patch_grid_from_normalized_LAF
img = utils.tensor_to_gradcheck_var(img) # to var
laf = utils.tensor_to_gradcheck_var(laf) # to var
assert gradcheck(generate_patch_grid_from_normalized_LAF, (img, laf, PS), raise_exception=True)
class TestExtractPatchesSimple:
def test_shape(self, device):
laf = torch.rand(5, 4, 2, 3, device=device)
img = torch.rand(5, 3, 100, 30, device=device)
PS = 10
patches = kornia.feature.extract_patches_simple(img, laf, PS)
assert patches.shape == (5, 4, 3, PS, PS)
# TODO: check what to do to improve timing
# @pytest.mark.skip("The test takes too long to finish.")
def test_gradcheck(self, device):
nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()
nlaf = nlaf.view(1, 1, 2, 3)
img = torch.rand(1, 3, 20, 30, device=device)
PS = 11
img = utils.tensor_to_gradcheck_var(img) # to var
nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var
assert gradcheck(kornia.feature.extract_patches_simple, (img, nlaf, PS, False), raise_exception=True)
class TestExtractPatchesPyr:
def test_shape(self, device):
laf = torch.rand(5, 4, 2, 3, device=device)
img = torch.rand(5, 3, 100, 30, device=device)
PS = 10
patches = kornia.feature.extract_patches_from_pyramid(img, laf, PS)
assert patches.shape == (5, 4, 3, PS, PS)
# TODO: check what to do to improve timing
# @pytest.mark.skip("The test takes too long to finish.")
def test_gradcheck(self, device):
nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()
nlaf = nlaf.view(1, 1, 2, 3)
img = torch.rand(1, 3, 20, 30, device=device)
PS = 11
img = utils.tensor_to_gradcheck_var(img) # to var
nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var
assert gradcheck(kornia.feature.extract_patches_from_pyramid, (img, nlaf, PS, False), raise_exception=True)
class TestLAFIsTouchingBoundary:
def test_shape(self, device):
inp = torch.rand(5, 3, 2, 3, device=device)
img = torch.rand(5, 3, 10, 10, device=device)
assert (5, 3) == kornia.feature.laf_is_inside_image(inp, img).shape
def test_touch(self, device):
w, h = 10, 5
img = torch.rand(1, 3, h, w, device=device)
laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]], [[1, 0, 5], [0, 1, 2]]]], device=device).float()
expected = torch.tensor([[False, True]], device=device)
assert torch.all(kornia.feature.laf_is_inside_image(laf, img) == expected).item()
@pytest.mark.jit
def test_jit(self, device, dtype):
w, h = 10, 5
img = torch.rand(1, 3, h, w, device=device)
laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]], [[1, 0, 5], [0, 1, 2]]]], device=device).float()
model = kornia.feature.laf_is_inside_image
model_jit = torch.jit.script(kornia.feature.laf_is_inside_image)
assert_close(model(laf, img), model_jit(laf, img))
class TestGetCreateLAF:
def test_shape(self, device):
xy = torch.ones(1, 3, 2, device=device)
ori = torch.ones(1, 3, 1, device=device)
scale = torch.ones(1, 3, 1, 1, device=device)
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
assert laf.shape == (1, 3, 2, 3)
def test_laf(self, device):
xy = torch.ones(1, 1, 2, device=device)
ori = torch.zeros(1, 1, 1, device=device)
scale = 5 * torch.ones(1, 1, 1, 1, device=device)
expected = torch.tensor([[[[5, 0, 1], [0, 5, 1]]]], device=device).float()
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
assert_close(laf, expected)
def test_laf_def(self, device):
xy = torch.ones(1, 1, 2, device=device)
expected = torch.tensor([[[[1, 0, 1], [0, 1, 1]]]], device=device).float()
laf = kornia.feature.laf_from_center_scale_ori(xy)
assert_close(laf, expected)
def test_cross_consistency(self, device):
batch_size, channels = 3, 2
xy = torch.rand(batch_size, channels, 2, device=device)
ori = torch.rand(batch_size, channels, 1, device=device)
scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))
laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)
scale2 = kornia.feature.get_laf_scale(laf)
assert_close(scale, scale2)
xy2 = kornia.feature.get_laf_center(laf)
assert_close(xy2, xy)
ori2 = kornia.feature.get_laf_orientation(laf)
assert_close(ori2, ori)
def test_gradcheck(self, device):
batch_size, channels = 3, 2
xy = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 2, device=device))
ori = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 1, device=device))
scale = utils.tensor_to_gradcheck_var(torch.abs(torch.rand(batch_size, channels, 1, 1, device=device)))
assert gradcheck(kornia.feature.laf_from_center_scale_ori, (xy, scale, ori), raise_exception=True)
@pytest.mark.skip("Depends on angle-to-rotation-matric")
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels = 3, 2
xy = torch.rand(batch_size, channels, 2, device=device)
ori = torch.rand(batch_size, channels, 1, device=device)
scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))
model = kornia.feature.laf_from_center_scale_ori
model_jit = torch.jit.script(kornia.feature.laf_from_center_scale_ori)
assert_close(model(xy, scale, ori), model_jit(xy, scale, ori))
class TestGetLAF3pts:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
out = kornia.feature.laf_to_three_points(inp)
assert out.shape == inp.shape
def test_batch_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
out = kornia.feature.laf_to_three_points(inp)
assert out.shape == inp.shape
def test_conversion(self, device):
inp = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)
expected = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)
threepts = kornia.feature.laf_to_three_points(inp)
assert_close(threepts, expected)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(kornia.feature.laf_to_three_points, (inp,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_to_three_points
model_jit = torch.jit.script(kornia.feature.laf_to_three_points)
assert_close(model(inp), model_jit(inp))
class TestGetLAFFrom3pts:
def test_shape(self, device):
inp = torch.ones(1, 3, 2, 3, device=device)
out = kornia.feature.laf_from_three_points(inp)
assert out.shape == inp.shape
def test_batch_shape(self, device):
inp = torch.ones(5, 3, 2, 3, device=device)
out = kornia.feature.laf_from_three_points(inp)
assert out.shape == inp.shape
def test_conversion(self, device):
expected = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)
inp = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)
threepts = kornia.feature.laf_from_three_points(inp)
assert_close(threepts, expected)
def test_cross_consistency(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp_2 = kornia.feature.laf_from_three_points(inp)
inp_2 = kornia.feature.laf_to_three_points(inp_2)
assert_close(inp_2, inp)
def test_gradcheck(self, device):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
inp = utils.tensor_to_gradcheck_var(inp) # to var
assert gradcheck(kornia.feature.laf_from_three_points, (inp,), raise_exception=True)
@pytest.mark.jit
def test_jit(self, device, dtype):
batch_size, channels, height, width = 3, 2, 2, 3
inp = torch.rand(batch_size, channels, height, width, device=device)
model = kornia.feature.laf_from_three_points
model_jit = torch.jit.script(kornia.feature.laf_from_three_points)
assert_close(model(inp), model_jit(inp))
class TestTransformLAFs:
@pytest.mark.parametrize("batch_size", [1, 2, 5])
@pytest.mark.parametrize("num_points", [2, 3, 5])
def test_transform_points(self, batch_size, num_points, device, dtype):
# generate input data
eye_size = 3
lafs_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)
dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)
# transform the points from dst to ref
lafs_dst = kornia.feature.perspective_transform_lafs(dst_homo_src, lafs_src)
# transform the points from ref to dst
src_homo_dst = torch.inverse(dst_homo_src)
lafs_dst_to_src = kornia.feature.perspective_transform_lafs(src_homo_dst, lafs_dst)
# projected should be equal as initial
assert_close(lafs_src, lafs_dst_to_src)
def test_gradcheck(self, device, dtype):
# generate input data
batch_size, num_points = 2, 3
eye_size = 3
points_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)
dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)
# evaluate function gradient
points_src = utils.tensor_to_gradcheck_var(points_src) # to var
dst_homo_src = utils.tensor_to_gradcheck_var(dst_homo_src) # to var
assert gradcheck(kornia.feature.perspective_transform_lafs, (dst_homo_src, points_src), raise_exception=True)
| [
"torch.zeros",
"torch.rand",
"torch.autograd.gradcheck",
"torch.inverse",
"torch.ones",
"torch.tensor",
"torch.zeros_like",
"torch.jit.script"
] | 1.8.1 | saurabhya/kornia | f2b4fe9fb32d99795783f25b5a4c561001783ebf |
1.8 | import hydra
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as T
from hydra.core.config_store import ConfigStore
from hydra.utils import to_absolute_path
import kornia as K
from kornia.x import Configuration, ImageClassifierTrainer, ModelCheckpoint
cs = ConfigStore.instance()
# Registering the Config class with the name 'config'.
cs.store(name="config", node=Configuration)
@hydra.main(config_path=".", config_name="config.yaml")
def my_app(config: Configuration) -> None:
# create the model
model = nn.Sequential(
K.contrib.VisionTransformer(image_size=32, patch_size=16, embed_dim=128, num_heads=3),
K.contrib.ClassificationHead(embed_size=128, num_classes=10),
)
# create the dataset
train_dataset = torchvision.datasets.CIFAR10(
root=to_absolute_path(config.data_path), train=True, download=True, transform=T.ToTensor())
valid_dataset = torchvision.datasets.CIFAR10(
root=to_absolute_path(config.data_path), train=False, download=True, transform=T.ToTensor())
# create the dataloaders
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=8, pin_memory=True)
valid_daloader = torch.utils.data.DataLoader(
valid_dataset, batch_size=config.batch_size, shuffle=True, num_workers=8, pin_memory=True)
# create the loss function
criterion = nn.CrossEntropyLoss()
# instantiate the optimizer and scheduler
optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, config.num_epochs * len(train_dataloader))
# define some augmentations
_augmentations = nn.Sequential(
K.augmentation.RandomHorizontalFlip(p=0.75),
K.augmentation.RandomVerticalFlip(p=0.75),
K.augmentation.RandomAffine(degrees=10.),
K.augmentation.PatchSequential(
K.augmentation.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=0.8),
grid_size=(2, 2), # cifar-10 is 32x32 and vit is patch 16
patchwise_apply=False,
),
)
def augmentations(self, sample: dict) -> dict:
out = _augmentations(sample["input"])
return {"input": out, "target": sample["target"]}
model_checkpoint = ModelCheckpoint(
filepath="./outputs", monitor="top5",
)
trainer = ImageClassifierTrainer(
model, train_dataloader, valid_daloader, criterion, optimizer, scheduler, config,
callbacks={
"augmentations": augmentations, "on_checkpoint": model_checkpoint,
}
)
trainer.fit()
if __name__ == "__main__":
my_app()
| [
"torch.nn.CrossEntropyLoss",
"torch.utils.data.DataLoader"
] | 1.8.1 | saurabhya/kornia | f2b4fe9fb32d99795783f25b5a4c561001783ebf |
1.5 | """
Transforms and data augmentation for sequence level images, bboxes and masks.
Mostly copy-paste from https://github.com/Epiphqny/VisTR/blob/master/datasets/transforms.py
"""
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh, box_iou
from util.misc import interpolate
import numpy as np
from numpy import random as rand
from PIL import Image
import cv2
import pdb
def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
assert mode in ['iou', 'iof']
bboxes1 = bboxes1.astype(np.float32)
bboxes2 = bboxes2.astype(np.float32)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
ious = np.zeros((rows, cols), dtype=np.float32)
if rows * cols == 0:
return ious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
ious = np.zeros((cols, rows), dtype=np.float32)
exchange = True
area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
for i in range(bboxes1.shape[0]):
x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
overlap = np.maximum(x_end - x_start, 0) * np.maximum(y_end - y_start, 0)
if mode == 'iou':
union = area1[i] + area2 - overlap
else:
union = area1[i] if not exchange else area2
union = np.maximum(union, eps)
ious[i, :] = overlap / union
if exchange:
ious = ious.T
return ious
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target['masks'] = target['masks'][:, i:i + h, j:j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target['boxes'].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target['masks'].flatten(1).any(1)
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["boxes"] = boxes
if "masks" in target:
target['masks'] = target['masks'].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
try:
size = get_size(image.size, size, max_size)
except:
pdb.set_trace()
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target['masks'] = interpolate(
target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))
return padded_image, target
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, ref_img: PIL.Image.Image, target: dict, ref_target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
img, target = crop(img, target, region)
ref_img, ref_target = crop(ref_img, ref_target, region)
return img, ref_img, target, ref_target
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.))
crop_left = int(round((image_width - crop_width) / 2.))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, ref_image, target, ref_target):
if rand.randint(2):
image *= rand.uniform(self.lower, self.upper)
ref_image *= rand.uniform(self.lower, self.upper)
return image, ref_image, target, ref_target
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, target):
if rand.randint(2):
delta = rand.uniform(-self.delta, self.delta)
image += delta
return image, target
class RandomSaturation(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, ref_image, target, ref_target):
if rand.randint(2):
image[:, :, 1] *= rand.uniform(self.lower, self.upper)
ref_image[:, :, 1] *= rand.uniform(self.lower, self.upper)
return image, ref_image, target, ref_target
class RandomHue(object): #
def __init__(self, delta=18.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, ref_image, target, ref_target):
if rand.randint(2):
image[:, :, 0] += rand.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
ref_image[:, :, 0] += rand.uniform(-self.delta, self.delta)
ref_image[:, :, 0][ref_image[:, :, 0] > 360.0] -= 360.0
ref_image[:, :, 0][ref_image[:, :, 0] < 0.0] += 360.0
return image, ref_image, target, ref_target
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, target):
if rand.randint(2):
swap = self.perms[rand.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, target
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, ref_image, target, ref_target):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
ref_image = cv2.cvtColor(ref_image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
ref_image = cv2.cvtColor(ref_image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, ref_image, target, ref_target
class SwapChannels(object):
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
image = image[:, :, self.swaps]
return image
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, ref_image, target, ref_target):
image = np.asarray(image).astype('float32')
ref_image = np.asarray(ref_image).astype('float32')
image, target = self.rand_brightness(image, target)
ref_image, ref_target = self.rand_brightness(ref_image, ref_target)
if rand.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
image, ref_image, target, ref_target = distort(image, ref_image, target, ref_target)
image, target = self.rand_light_noise(image, target)
ref_image, ref_target = self.rand_light_noise(ref_image, ref_target)
image = Image.fromarray(image.astype('uint8'))
ref_image = Image.fromarray(ref_image.astype('uint8'))
return image, ref_image, target, ref_target
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, ref_img, target, ref_target):
if random.random() < self.p:
img, target = hflip(img, target)
ref_img, ref_target = hflip(ref_img, ref_target)
return img, ref_img, target, ref_target
class RandomVerticalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return vflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, ref_img, target=None, ref_target=None):
size = random.choice(self.sizes)
img, target = resize(img, target, size, self.max_size)
ref_img, ref_target = resize(ref_img, ref_target, size, self.max_size)
return img, ref_img, target, ref_target
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, image, ref_image, target, ref_target):
return F.to_tensor(image), F.to_tensor(ref_image), target, ref_target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, ref_image, target=None, ref_target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
ref_image = F.normalize(ref_image, mean=self.mean, std=self.std)
if target is None:
return image, None, ref_image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
ref_target = ref_target.copy()
if "boxes" in ref_target:
boxes = ref_target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
ref_target["boxes"] = boxes
return image, ref_image, target, ref_target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, ref_image, target, ref_target):
for t in self.transforms:
image, ref_image, target, ref_target = t(image, ref_image, target, ref_target)
return image, ref_image, target, ref_target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string | [
"torch.all",
"torch.tensor",
"torch.as_tensor",
"torch.nn.functional.pad"
] | 1.5.0 | anirudh-chakravarthy/PropTR | 29448a0c73da6c9918d161228d92409d3d1315db |
0.4 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Donny You, RainbowSecret, JingyiXie
## Microsoft Research
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.utils import data
import lib.datasets.tools.transforms as trans
import lib.datasets.tools.cv2_aug_transforms as cv2_aug_trans
import lib.datasets.tools.pil_aug_transforms as pil_aug_trans
from lib.datasets.loader.default_loader import DefaultLoader, CSDataTestLoader
from lib.datasets.loader.ade20k_loader import ADE20KLoader
from lib.datasets.loader.lip_loader import LipLoader
from lib.datasets.tools.collate import collate
from lib.utils.tools.logger import Logger as Log
from lib.utils.distributed import get_world_size, get_rank, is_distributed
import pdb
class DataLoader(object):
def __init__(self, configer):
self.configer = configer
if self.configer.get('data', 'image_tool') == 'pil':
self.aug_train_transform = pil_aug_trans.PILAugCompose(self.configer, split='train')
self.aug_val_transform = pil_aug_trans.PILAugCompose(self.configer, split='val')
elif self.configer.get('data', 'image_tool') == 'cv2':
self.aug_train_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='train')
self.aug_val_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='val')
else:
Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))
exit(1)
self.img_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize(div_value=self.configer.get('normalize', 'div_value'),
mean=self.configer.get('normalize', 'mean'),
std=self.configer.get('normalize', 'std')), ])
self.label_transform = trans.Compose([
trans.ToLabel(),
trans.ReLabel(255, -1), ])
def get_trainloader(self):
if self.configer.exists('data', 'use_edge') and self.configer.get('data', 'use_edge') == 'ce2p':
"""
ce2p manner:
load both the ground-truth label and edge.
"""
Log.info('use edge (follow ce2p) for train...')
trainloader = data.DataLoader(
LipLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
label_transform=self.label_transform,
configer=self.configer),
batch_size=self.configer.get('train', 'batch_size'), pin_memory=True,
num_workers=self.configer.get('data', 'workers'),
shuffle=True, drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
elif self.configer.exists('train', 'loader') and \
(self.configer.get('train', 'loader') == 'ade20k'
or self.configer.get('train', 'loader') == 'pascal_context'
or self.configer.get('train', 'loader') == 'pascal_voc'
or self.configer.get('train', 'loader') == 'coco_stuff'):
"""
ADE20KLoader manner:
support input images of different shapes.
"""
Log.info('use ADE20KLoader (diverse input shape) for train...')
trainloader = data.DataLoader(
ADE20KLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
label_transform=self.label_transform,
configer=self.configer),
batch_size=self.configer.get('train', 'batch_size'), pin_memory=True,
num_workers=self.configer.get('data', 'workers'),
shuffle=True, drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
else:
"""
Default manner:
support input images of the same shapes.
"""
dataset = DefaultLoader(
root_dir=self.configer.get('data', 'data_dir'), dataset='train',
aug_transform=self.aug_train_transform,
img_transform=self.img_transform,
label_transform=self.label_transform,
configer=self.configer
)
if is_distributed():
sampler = torch.utils.data.distributed.DistributedSampler(dataset)
else:
sampler = None
Log.info('use the DefaultLoader for train...')
trainloader = data.DataLoader(
dataset,
batch_size=self.configer.get('train', 'batch_size') // get_world_size(), pin_memory=True,
num_workers=self.configer.get('data', 'workers') // get_world_size(),
sampler=sampler,
shuffle=(sampler is None),
drop_last=self.configer.get('data', 'drop_last'),
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('train', 'data_transformer')
)
)
return trainloader
def get_valloader(self, dataset=None):
dataset = 'val' if dataset is None else dataset
if self.configer.get('method') == 'fcn_segmentor':
"""
default manner:
load the ground-truth label.
"""
Log.info('use DefaultLoader for val ...')
valloader = data.DataLoader(
DefaultLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
aug_transform=self.aug_val_transform,
img_transform=self.img_transform,
label_transform=self.label_transform,
configer=self.configer),
batch_size=self.configer.get('val', 'batch_size'), pin_memory=True,
num_workers=self.configer.get('data', 'workers'), shuffle=False,
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('val', 'data_transformer')
)
)
return valloader
else:
Log.error('Method: {} loader is invalid.'.format(self.configer.get('method')))
return None
def get_testloader(self, dataset=None):
dataset = 'test' if dataset is None else dataset
if self.configer.get('method') == 'fcn_segmentor':
Log.info('use CSDataTestLoader for test ...')
testloader = data.DataLoader(
CSDataTestLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,
img_transform=self.img_transform,
configer=self.configer),
batch_size=self.configer.get('test', 'batch_size'), pin_memory=True,
num_workers=self.configer.get('data', 'workers'), shuffle=False,
collate_fn=lambda *args: collate(
*args, trans_dict=self.configer.get('test', 'data_transformer')
)
)
return testloader
if __name__ == "__main__":
pass
| [
"torch.utils.data.distributed.DistributedSampler"
] | 0.4.1 | tianweiy/openseg.pytorch | e4159e0b2db86d22149c44f220c5f2e3070a3042 |
1.10 | """
From https://stackoverflow.com/questions/62265351/measuring-f1-score-for-multiclass-classification-natively-in-pytorch
with this modification https://stackoverflow.com/questions/62265351/measuring-f1-score-for-multiclass-classification-natively-in-pytorch#comment122867942_63358412
"""
from typing import Tuple
import torch
class F1Score:
"""
Class for f1 calculation in Pytorch.
"""
def __init__(self, average: str = 'weighted'):
"""
Init.
Args:
average: averaging method
"""
self.average = average
if average not in [None, 'micro', 'macro', 'weighted']:
raise ValueError('Wrong value of average parameter')
@staticmethod
def calc_f1_micro(predictions: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Calculate f1 micro.
Args:
predictions: tensor with predictions
labels: tensor with original labels
Returns:
f1 score
"""
true_positive = torch.eq(labels, predictions).sum().float()
f1_score = torch.div(true_positive, len(labels))
return f1_score
@staticmethod
def calc_f1_count_for_label(predictions: torch.Tensor,
labels: torch.Tensor, label_id: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Calculate f1 and true count for the label
Args:
predictions: tensor with predictions
labels: tensor with original labels
label_id: id of current label
Returns:
f1 score and true count for label
"""
# label count
true_count = torch.eq(labels, label_id).sum()
# true positives: labels equal to prediction and to label_id
true_positive = torch.logical_and(torch.eq(labels, predictions),
torch.eq(labels, label_id)).sum().float()
# precision for label
precision = torch.div(true_positive, torch.eq(predictions, label_id).sum().float())
# replace nan values with 0
precision = torch.where(torch.isnan(precision),
torch.zeros_like(precision).type_as(true_positive),
precision)
# recall for label
recall = torch.div(true_positive, true_count)
# f1
f1 = 2 * precision * recall / (precision + recall)
# replace nan values with 0
f1 = torch.where(torch.isnan(f1), torch.zeros_like(f1).type_as(true_positive), f1)
return f1, true_count
def __call__(self, predictions: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
"""
Calculate f1 score based on averaging method defined in init.
Args:
predictions: tensor with predictions
labels: tensor with original labels
Returns:
f1 score
"""
# simpler calculation for micro
if self.average == 'micro':
return self.calc_f1_micro(predictions, labels)
f1_score = 0
for label_id in range(0, len(labels.unique())):
f1, true_count = self.calc_f1_count_for_label(predictions, labels, label_id)
if self.average == 'weighted':
f1_score += f1 * true_count
elif self.average == 'macro':
f1_score += f1
if self.average == 'weighted':
f1_score = torch.div(f1_score, len(labels))
elif self.average == 'macro':
f1_score = torch.div(f1_score, len(labels.unique()))
return f1_score
| [
"torch.zeros_like",
"torch.div",
"torch.eq",
"torch.isnan"
] | 1.10.2 | Jakoviz/Infected-sweetpotato-classification | 003befcc5c430f41f8426d9ac94894e20fdfc247 |
1.7 | from torch.testing import assert_allclose
from transformers import AutoModel
from allennlp.common.testing import ModelTestCase
from allennlp.data import Vocabulary
from allennlp_models import vision # noqa: F401
from tests import FIXTURES_ROOT
class TestVEVilbert(ModelTestCase):
def test_model_can_train_save_and_load_small_model(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file)
def test_model_can_train_save_and_load_with_cache(self):
import tempfile
with tempfile.TemporaryDirectory(prefix=self.__class__.__name__) as d:
overrides = {"dataset_reader.feature_cache_dir": str(d)}
import json
overrides = json.dumps(overrides)
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment.jsonnet"
self.ensure_model_can_train_save_and_load(param_file, overrides=overrides)
def test_model_can_train_save_and_load_from_huggingface(self):
param_file = FIXTURES_ROOT / "vision" / "vilbert_ve" / "experiment_from_huggingface.jsonnet"
self.ensure_model_can_train_save_and_load(param_file)
def test_model_loads_weights_correctly(self):
from allennlp_models.vision.models.visual_entailment import VisualEntailmentModel
vocab = Vocabulary()
model_name = "epwalsh/bert-xsmall-dummy"
model = VisualEntailmentModel.from_huggingface_model_name(
vocab=vocab,
model_name=model_name,
image_feature_dim=2048,
image_num_hidden_layers=1,
image_hidden_size=3,
image_num_attention_heads=1,
combined_num_attention_heads=1,
combined_hidden_size=5,
pooled_output_dim=7,
image_intermediate_size=11,
image_attention_dropout=0.0,
image_hidden_dropout=0.0,
image_biattention_id=[0, 1],
text_biattention_id=[0, 1],
text_fixed_layer=0,
image_fixed_layer=0,
)
transformer = AutoModel.from_pretrained(model_name)
# compare embedding parameters
assert_allclose(
transformer.embeddings.word_embeddings.weight.data,
model.backbone.text_embeddings.embeddings.word_embeddings.weight.data,
)
# compare encoder parameters
assert_allclose(
transformer.encoder.layer[0].intermediate.dense.weight.data,
model.backbone.encoder.layers1[0].intermediate.dense.weight.data,
)
| [
"torch.testing.assert_allclose"
] | 1.7.0 | shunk031/allennlp-models | 1e89d5e51cb45f3e77a48d4983bf980088334fac |
1.7 | """
BiMPM (Bilateral Multi-Perspective Matching) model implementation.
"""
from typing import Dict, List, Any
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
from allennlp.modules.bimpm_matching import BiMpmMatching
@Model.register("bimpm")
class BiMpm(Model):
"""
This `Model` implements BiMPM model described in [Bilateral Multi-Perspective Matching
for Natural Language Sentences](https://arxiv.org/abs/1702.03814) by Zhiguo Wang et al., 2017.
Also please refer to the [TensorFlow implementation](https://github.com/zhiguowang/BiMPM/) and
[PyTorch implementation](https://github.com/galsang/BIMPM-pytorch).
Registered as a `Model` with name "bimpm".
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the `premise` and `hypothesis` `TextFields` we get as input to the
model.
matcher_word : `BiMpmMatching`
BiMPM matching on the output of word embeddings of premise and hypothesis.
encoder1 : `Seq2SeqEncoder`
First encoder layer for the premise and hypothesis
matcher_forward1 : `BiMPMMatching`
BiMPM matching for the forward output of first encoder layer
matcher_backward1 : `BiMPMMatching`
BiMPM matching for the backward output of first encoder layer
encoder2 : `Seq2SeqEncoder`
Second encoder layer for the premise and hypothesis
matcher_forward2 : `BiMPMMatching`
BiMPM matching for the forward output of second encoder layer
matcher_backward2 : `BiMPMMatching`
BiMPM matching for the backward output of second encoder layer
aggregator : `Seq2VecEncoder`
Aggregator of all BiMPM matching vectors
classifier_feedforward : `FeedForward`
Fully connected layers for classification.
dropout : `float`, optional (default=`0.1`)
Dropout percentage to use.
initializer : `InitializerApplicator`, optional (default=`InitializerApplicator()`)
If provided, will be used to initialize the model parameters.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
matcher_word: BiMpmMatching,
encoder1: Seq2SeqEncoder,
matcher_forward1: BiMpmMatching,
matcher_backward1: BiMpmMatching,
encoder2: Seq2SeqEncoder,
matcher_forward2: BiMpmMatching,
matcher_backward2: BiMpmMatching,
aggregator: Seq2VecEncoder,
classifier_feedforward: FeedForward,
dropout: float = 0.1,
initializer: InitializerApplicator = InitializerApplicator(),
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self.text_field_embedder = text_field_embedder
self.matcher_word = matcher_word
self.encoder1 = encoder1
self.matcher_forward1 = matcher_forward1
self.matcher_backward1 = matcher_backward1
self.encoder2 = encoder2
self.matcher_forward2 = matcher_forward2
self.matcher_backward2 = matcher_backward2
self.aggregator = aggregator
matching_dim = (
self.matcher_word.get_output_dim()
+ self.matcher_forward1.get_output_dim()
+ self.matcher_backward1.get_output_dim()
+ self.matcher_forward2.get_output_dim()
+ self.matcher_backward2.get_output_dim()
)
check_dimensions_match(
matching_dim,
self.aggregator.get_input_dim(),
"sum of dim of all matching layers",
"aggregator input dim",
)
self.classifier_feedforward = classifier_feedforward
self.dropout = torch.nn.Dropout(dropout)
self.metrics = {"accuracy": CategoricalAccuracy()}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
def forward(
self, # type: ignore
premise: TextFieldTensors,
hypothesis: TextFieldTensors,
label: torch.LongTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
premise : `TextFieldTensors`
The premise from a `TextField`
hypothesis : `TextFieldTensors`
The hypothesis from a `TextField`
label : `torch.LongTensor`, optional (default = `None`)
The label for the pair of the premise and the hypothesis
metadata : `List[Dict[str, Any]]`, optional, (default = `None`)
Additional information about the pair
# Returns
An output dictionary consisting of:
logits : `torch.FloatTensor`
A tensor of shape `(batch_size, num_labels)` representing unnormalised log
probabilities of the entailment label.
loss : `torch.FloatTensor`, optional
A scalar loss to be optimised.
"""
mask_premise = util.get_text_field_mask(premise)
mask_hypothesis = util.get_text_field_mask(hypothesis)
# embedding and encoding of the premise
embedded_premise = self.dropout(self.text_field_embedder(premise))
encoded_premise1 = self.dropout(self.encoder1(embedded_premise, mask_premise))
encoded_premise2 = self.dropout(self.encoder2(encoded_premise1, mask_premise))
# embedding and encoding of the hypothesis
embedded_hypothesis = self.dropout(self.text_field_embedder(hypothesis))
encoded_hypothesis1 = self.dropout(self.encoder1(embedded_hypothesis, mask_hypothesis))
encoded_hypothesis2 = self.dropout(self.encoder2(encoded_hypothesis1, mask_hypothesis))
matching_vector_premise: List[torch.Tensor] = []
matching_vector_hypothesis: List[torch.Tensor] = []
def add_matching_result(matcher, encoded_premise, encoded_hypothesis):
# utility function to get matching result and add to the result list
matching_result = matcher(
encoded_premise, mask_premise, encoded_hypothesis, mask_hypothesis
)
matching_vector_premise.extend(matching_result[0])
matching_vector_hypothesis.extend(matching_result[1])
# calculate matching vectors from word embedding, first layer encoding, and second layer encoding
add_matching_result(self.matcher_word, embedded_premise, embedded_hypothesis)
half_hidden_size_1 = self.encoder1.get_output_dim() // 2
add_matching_result(
self.matcher_forward1,
encoded_premise1[:, :, :half_hidden_size_1],
encoded_hypothesis1[:, :, :half_hidden_size_1],
)
add_matching_result(
self.matcher_backward1,
encoded_premise1[:, :, half_hidden_size_1:],
encoded_hypothesis1[:, :, half_hidden_size_1:],
)
half_hidden_size_2 = self.encoder2.get_output_dim() // 2
add_matching_result(
self.matcher_forward2,
encoded_premise2[:, :, :half_hidden_size_2],
encoded_hypothesis2[:, :, :half_hidden_size_2],
)
add_matching_result(
self.matcher_backward2,
encoded_premise2[:, :, half_hidden_size_2:],
encoded_hypothesis2[:, :, half_hidden_size_2:],
)
# concat the matching vectors
matching_vector_cat_premise = self.dropout(torch.cat(matching_vector_premise, dim=2))
matching_vector_cat_hypothesis = self.dropout(torch.cat(matching_vector_hypothesis, dim=2))
# aggregate the matching vectors
aggregated_premise = self.dropout(
self.aggregator(matching_vector_cat_premise, mask_premise)
)
aggregated_hypothesis = self.dropout(
self.aggregator(matching_vector_cat_hypothesis, mask_hypothesis)
)
# the final forward layer
logits = self.classifier_feedforward(
torch.cat([aggregated_premise, aggregated_hypothesis], dim=-1)
)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "label_probs": probs}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()
}
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
predictions = output_dict["label_probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary("labels").get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
default_predictor = "textual_entailment"
| [
"torch.nn.Dropout",
"torch.cat",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax"
] | 1.7.0 | shunk031/allennlp-models | 1e89d5e51cb45f3e77a48d4983bf980088334fac |
1.8 | from enum import auto
from typing import Any, Callable, Dict, Optional
import torch
import torch.nn.functional as F
from ..config.config import Config, ConfigEnum
from ..data.labels import LabelType
from ..utils.tensor import prepare_tensor
from .quaternion import qconjugate, qmult
MetricFunction = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
GenericMetricFunction = Callable[[torch.Tensor, torch.Tensor, Optional[str]], torch.Tensor]
def _apply_reduction(x: torch.Tensor, reduction: Optional[str]) -> torch.Tensor:
if reduction is None or reduction == 'none':
return x
if reduction == 'mean':
return torch.mean(x)
elif reduction == 'sum':
return torch.sum(x)
else:
raise RuntimeError(f"Unsupported reduction '{reduction}'")
def _quat_norm(source: torch.Tensor, _target: torch.Tensor, label_type: LabelType,
reduction: Optional[str] = 'mean') -> torch.Tensor:
"""Quaternion norm of source tensor."""
if label_type == LabelType.POSE3D_QUAT:
source_norm = torch.norm(source[:, 3:], p=2, dim=1, keepdim=True)
elif label_type == LabelType.POSE3D_DUAL_QUAT:
source_norm = torch.norm(source[:, :4], p=2, dim=1, keepdim=True)
else:
raise RuntimeError("Unsupported label type for this loss type")
return _apply_reduction(source_norm, reduction)
def _normalize(x: torch.Tensor, label_type: LabelType, eps: float = 1e-8) -> torch.Tensor:
if label_type == LabelType.POSE3D_QUAT:
x_norm = torch.norm(x[:, 3:], p=2, dim=1, keepdim=True) + eps
x = torch.cat((x[:, :3], x[:, 3:] / x_norm), dim=1)
return x
elif label_type == LabelType.POSE3D_DUAL_QUAT:
x_norm = torch.norm(x[:, :4], p=2, dim=1, keepdim=True) + eps
x = x / x_norm
return x
else:
raise RuntimeError("Unsupported label type for normalization")
def trans_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,
reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:
"""Translation (translation directly from label or dual quaternion vector) loss."""
if label_type == LabelType.POSE3D_EULER or label_type == LabelType.POSE3D_QUAT:
source_trans = source[:, :3]
target_trans = target[:, :3]
elif label_type == LabelType.POSE3D_DUAL_QUAT:
source = _normalize(source, label_type, eps)
target = _normalize(target, label_type, eps)
source_trans = source[:, 4:]
target_trans = target[:, 4:]
else:
raise RuntimeError("Unsupported label type for this loss type.")
loss = torch.norm(source_trans - target_trans, dim=1, p=p, keepdim=True)
return _apply_reduction(loss, reduction)
def trans_3d_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,
reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:
"""Translation in 3D coordinates [x, y, z] loss."""
if label_type == LabelType.POSE3D_EULER or label_type == LabelType.POSE3D_QUAT:
source_trans = source[:, :3]
target_trans = target[:, :3]
elif label_type == LabelType.POSE3D_DUAL_QUAT:
# normalize dual quaternion
source = _normalize(source, label_type, eps)
target = _normalize(target, label_type, eps)
# convert dual quaternion to translation vector
source_trans_quat = 2.0 * qmult(source[:, 4:], qconjugate(source[:, :4]))
target_trans_quat = 2.0 * qmult(target[:, 4:], qconjugate(target[:, :4]))
source_trans = source_trans_quat[:, 1:]
target_trans = target_trans_quat[:, 1:]
else:
raise RuntimeError("Unsupported label type for this loss type.")
loss = torch.norm(source_trans - target_trans, dim=1, p=p, keepdim=True)
return _apply_reduction(loss, reduction)
def dual_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,
reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:
"""Dual quaternion vector loss."""
if label_type == LabelType.POSE3D_QUAT:
# translation quaternion
source_trans_quat = source.new_zeros(source.shape[0], 4)
source_trans_quat[:, 1:] = source[:, :3]
target_trans_quat = target.new_zeros(target.shape[0], 4)
target_trans_quat[:, 1:] = target[:, :3]
# dual quaternions
source_dual = 0.5 * qmult(source_trans_quat, source[:, 3:])
target_dual = 0.5 * qmult(target_trans_quat, target[:, 3:])
elif label_type == LabelType.POSE3D_DUAL_QUAT:
source = _normalize(source, label_type, eps)
target = _normalize(target, label_type, eps)
source_dual = source[:, 4:]
target_dual = target[:, 4:]
else:
raise RuntimeError("Unsupported label type for this loss type")
loss = torch.norm(source_dual - target_dual, dim=1, p=p, keepdim=True)
return _apply_reduction(loss, reduction)
def rot_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType, p: int = 2,
reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:
"""Rotation vector (either euler angles or quaternion vector) loss."""
if label_type == LabelType.POSE3D_EULER:
source_rot = source[:, 3:]
target_rot = target[:, 3:]
elif label_type == LabelType.POSE3D_QUAT:
source = _normalize(source, label_type, eps)
target = _normalize(target, label_type, eps)
source_rot = source[:, 3:]
target_rot = target[:, 3:]
elif label_type == LabelType.POSE3D_DUAL_QUAT:
source = _normalize(source, label_type, eps)
target = _normalize(target, label_type, eps)
source_rot = source[:, :4]
target_rot = target[:, :4]
else:
raise RuntimeError("Unsupported label type for this loss type")
loss = torch.norm(source_rot - target_rot, dim=1, p=p, keepdim=True)
return _apply_reduction(loss, reduction)
def quat_norm_loss(source: torch.Tensor, target: torch.Tensor, label_type: LabelType,
reduction: Optional[str] = 'mean') -> torch.Tensor:
"""Quaternion norm loss."""
if label_type != LabelType.POSE3D_QUAT and label_type != LabelType.POSE3D_DUAL_QUAT:
raise RuntimeError("Unsupported label type for this loss type.")
source_norm = _quat_norm(source, target, label_type, reduction=None)
loss = torch.pow(1.0 - source_norm, 2)
return _apply_reduction(loss, reduction)
def dual_constraint_loss(source: torch.Tensor, _target: torch.Tensor, label_type: LabelType,
reduction: Optional[str] = 'mean', eps: float = 1e-8) -> torch.Tensor:
"""Dual quaternion constraint loss."""
if label_type != LabelType.POSE3D_DUAL_QUAT:
raise RuntimeError("Unsupported label type for this loss type.")
source = _normalize(source, label_type, eps)
source_trans_quat = 2.0 * qmult(source[:, 4:], qconjugate(source[:, :4]))
loss = torch.pow(source_trans_quat[:, 0], 2)
return _apply_reduction(loss, reduction)
def _weighted_loss(metric_fn: GenericMetricFunction, source: torch.Tensor, target: torch.Tensor,
weights: torch.Tensor) -> torch.Tensor:
"""Weighted sum of loss function output."""
ret = metric_fn(source, target, 'none')
return torch.sum(weights * torch.mean(ret, 0))
def _weighted_loss_fn(metric_fn: GenericMetricFunction, weights: Optional[torch.Tensor] = None) -> MetricFunction:
"""Create weighted loss function."""
if weights is None:
def func(source, target):
return metric_fn(source, target, 'mean')
return func
else:
def func(source, target):
return _weighted_loss(metric_fn, source, target, weights) # type: ignore
return func
class MetricType(ConfigEnum):
"""Enum with all available loss types."""
MAE = auto()
MSE = auto()
TRANS = auto()
TRANS_3D = auto()
DUAL = auto()
ROT = auto()
QUAT_NORM = auto()
DUAL_CONSTRAINT = auto()
def fn(self, label_type: LabelType, weights: Optional[torch.Tensor] = None, **kwargs: Any) -> MetricFunction:
func: Optional[GenericMetricFunction] = None
if self == MetricType.MAE:
def func(source, target, red): return F.l1_loss(source, target, reduction=red, **kwargs)
elif self == MetricType.MSE:
def func(source, target, red): return F.mse_loss(source, target, reduction=red, **kwargs)
elif self == MetricType.TRANS:
def func(source, target, red): return trans_loss(source, target, label_type, reduction=red, **kwargs)
elif self == MetricType.TRANS_3D:
def func(source, target, red): return trans_3d_loss(source, target, label_type, reduction=red, **kwargs)
elif self == MetricType.DUAL:
def func(source, target, red): return dual_loss(source, target, label_type, reduction=red, **kwargs)
elif self == MetricType.ROT:
def func(source, target, red): return rot_loss(source, target, label_type, reduction=red, **kwargs)
elif self == MetricType.QUAT_NORM:
def func(source, target, red): return quat_norm_loss(source, target, label_type, reduction=red)
elif self == MetricType.DUAL_CONSTRAINT:
def func(source, target, red): return dual_constraint_loss(source, target, label_type, reduction=red)
if func is not None:
return _weighted_loss_fn(func, weights)
else:
raise NotImplementedError("MetricType '{}' not implemented".format(self))
def get_loss_fn(cfg: Config) -> MetricFunction:
"""Create loss function from config."""
label_type = cfg.model.label_type
# weights loss functions
loss_functions = list()
for metric_data in cfg.metrics.loss:
# weights
weights = metric_data['weights']
if weights is not None:
weights = prepare_tensor(torch.FloatTensor(weights), device=cfg.device, non_blocking=False)
# function
loss_functions.append(metric_data['type'].fn(label_type, weights=weights, **metric_data['params']))
# sum weighted loss
def func(source: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
loss = torch.stack([f(source, target) for f in loss_functions])
return torch.sum(loss)
return func
def get_metric_fns(cfg: Config) -> Dict[str, MetricFunction]:
"""Create metric functions from config."""
metric_fns = dict()
for metric_data in [*cfg.metrics.loss, *cfg.metrics.other]:
params = metric_data['params'] if 'params' in metric_data else dict()
metric_fns[metric_data['type'].name.lower()] = metric_data['type'].fn(cfg.model.label_type, **params)
return metric_fns
| [
"torch.cat",
"torch.nn.functional.l1_loss",
"torch.norm",
"torch.FloatTensor",
"torch.pow",
"torch.nn.functional.mse_loss",
"torch.mean",
"torch.sum"
] | 1.8.1 | mhorn11/deepclr | 6ee21963a402776851950a51709eef849ff96b5f |
1.0 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
import torch.nn.init as init
import data
from tools.logger import *
from transformer.Models import get_sinusoid_encoding_table
class Encoder(nn.Module):
def __init__(self, hps, vocab):
super(Encoder, self).__init__()
self._hps = hps
self._vocab = vocab
self.sent_max_len = hps.sent_max_len
vocab_size = len(vocab)
logger.info("[INFO] Vocabulary size is %d", vocab_size)
embed_size = hps.word_emb_dim
sent_max_len = hps.sent_max_len
input_channels = 1
out_channels = hps.output_channel
min_kernel_size = hps.min_kernel_size
max_kernel_size = hps.max_kernel_size
width = embed_size
# word embedding
self.embed = nn.Embedding(vocab_size, embed_size, padding_idx=vocab.word2id('[PAD]'))
if hps.word_embedding:
word2vec = data.Word_Embedding(hps.embedding_path, vocab)
word_vecs = word2vec.load_my_vecs(embed_size)
# pretrained_weight = word2vec.add_unknown_words_by_zero(word_vecs, embed_size)
pretrained_weight = word2vec.add_unknown_words_by_avg(word_vecs, embed_size)
pretrained_weight = np.array(pretrained_weight)
self.embed.weight.data.copy_(torch.from_numpy(pretrained_weight))
self.embed.weight.requires_grad = hps.embed_train
# position embedding
self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)
# cnn
self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
logger.info("[INFO] Initing W for CNN.......")
for conv in self.convs:
init_weight_value = 6.0
init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
logger.error("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
raise ValueError("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def forward(self, input):
# input: a batch of Example object [batch_size, N, seq_len]
vocab = self._vocab
batch_size, N, _ = input.size()
input = input.view(-1, input.size(2)) # [batch_size*N, L]
input_sent_len = ((input!=vocab.word2id('[PAD]')).sum(dim=1)).int() # [batch_size*N, 1]
enc_embed_input = self.embed(input) # [batch_size*N, L, D]
input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])
if self._hps.cuda:
input_pos = input_pos.cuda()
enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]
enc_conv_input = enc_embed_input + enc_pos_embed_input
enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)
enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)
enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)
sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)
sent_embedding = sent_embedding.view(batch_size, N, -1)
return sent_embedding
class DomainEncoder(Encoder):
def __init__(self, hps, vocab, domaindict):
super(DomainEncoder, self).__init__(hps, vocab)
# domain embedding
self.domain_embedding = nn.Embedding(domaindict.size(), hps.domain_emb_dim)
self.domain_embedding.weight.requires_grad = True
def forward(self, input, domain):
"""
:param input: [batch_size, N, seq_len], N sentence number, seq_len token number
:param domain: [batch_size]
:return: sent_embedding: [batch_size, N, Co * kernel_sizes]
"""
batch_size, N, _ = input.size()
sent_embedding = super().forward(input)
enc_domain_input = self.domain_embedding(domain) # [batch, D]
enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]
sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)
return sent_embedding
class MultiDomainEncoder(Encoder):
def __init__(self, hps, vocab, domaindict):
super(MultiDomainEncoder, self).__init__(hps, vocab)
self.domain_size = domaindict.size()
# domain embedding
self.domain_embedding = nn.Embedding(self.domain_size, hps.domain_emb_dim)
self.domain_embedding.weight.requires_grad = True
def forward(self, input, domain):
"""
:param input: [batch_size, N, seq_len], N sentence number, seq_len token number
:param domain: [batch_size, domain_size]
:return: sent_embedding: [batch_size, N, Co * kernel_sizes]
"""
batch_size, N, _ = input.size()
# logger.info(domain[:5, :])
sent_embedding = super().forward(input)
domain_padding = torch.arange(self.domain_size).unsqueeze(0).expand(batch_size, -1)
domain_padding = domain_padding.cuda().view(-1) if self._hps.cuda else domain_padding.view(-1) # [batch * domain_size]
enc_domain_input = self.domain_embedding(domain_padding) # [batch * domain_size, D]
enc_domain_input = enc_domain_input.view(batch_size, self.domain_size, -1) * domain.unsqueeze(-1).float() # [batch, domain_size, D]
# logger.info(enc_domain_input[:5,:]) # [batch, domain_size, D]
enc_domain_input = enc_domain_input.sum(1) / domain.sum(1).float().unsqueeze(-1) # [batch, D]
enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]
sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)
return sent_embedding
class BertEncoder(nn.Module):
def __init__(self, hps):
super(BertEncoder, self).__init__()
from pytorch_pretrained_bert.modeling import BertModel
self._hps = hps
self.sent_max_len = hps.sent_max_len
self._cuda = hps.cuda
embed_size = hps.word_emb_dim
sent_max_len = hps.sent_max_len
input_channels = 1
out_channels = hps.output_channel
min_kernel_size = hps.min_kernel_size
max_kernel_size = hps.max_kernel_size
width = embed_size
# word embedding
self._bert = BertModel.from_pretrained("/remote-home/dqwang/BERT/pre-train/uncased_L-24_H-1024_A-16")
self._bert.eval()
for p in self._bert.parameters():
p.requires_grad = False
self.word_embedding_proj = nn.Linear(4096, embed_size)
# position embedding
self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)
# cnn
self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
logger.info("[INFO] Initing W for CNN.......")
for conv in self.convs:
init_weight_value = 6.0
init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
logger.error("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
raise ValueError("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def pad_encoder_input(self, input_list):
"""
:param input_list: N [seq_len, hidden_state]
:return: enc_sent_input_pad: list, N [max_len, hidden_state]
"""
max_len = self.sent_max_len
enc_sent_input_pad = []
_, hidden_size = input_list[0].size()
for i in range(len(input_list)):
article_words = input_list[i] # [seq_len, hidden_size]
seq_len = article_words.size(0)
if seq_len > max_len:
pad_words = article_words[:max_len, :]
else:
pad_tensor = torch.zeros(max_len - seq_len, hidden_size).cuda() if self._cuda else torch.zeros(max_len - seq_len, hidden_size)
pad_words = torch.cat([article_words, pad_tensor], dim=0)
enc_sent_input_pad.append(pad_words)
return enc_sent_input_pad
def forward(self, inputs, input_masks, enc_sent_len):
"""
:param inputs: a batch of Example object [batch_size, doc_len=512]
:param input_masks: 0 or 1, [batch, doc_len=512]
:param enc_sent_len: sentence original length [batch, N]
:return:
"""
# Use Bert to get word embedding
batch_size, N = enc_sent_len.size()
input_pad_list = []
for i in range(batch_size):
tokens_id = inputs[i]
input_mask = input_masks[i]
sent_len = enc_sent_len[i]
input_ids = tokens_id.unsqueeze(0)
input_mask = input_mask.unsqueeze(0)
out, _ = self._bert(input_ids, token_type_ids=None, attention_mask=input_mask)
out = torch.cat(out[-4:], dim=-1).squeeze(0) # [doc_len=512, hidden_state=4096]
_, hidden_size = out.size()
# restore the sentence
last_end = 1
enc_sent_input = []
for length in sent_len:
if length != 0 and last_end < 511:
enc_sent_input.append(out[last_end: min(511, last_end + length), :])
last_end += length
else:
pad_tensor = torch.zeros(self.sent_max_len, hidden_size).cuda() if self._hps.cuda else torch.zeros(self.sent_max_len, hidden_size)
enc_sent_input.append(pad_tensor)
# pad the sentence
enc_sent_input_pad = self.pad_encoder_input(enc_sent_input) # [N, seq_len, hidden_state=4096]
input_pad_list.append(torch.stack(enc_sent_input_pad))
input_pad = torch.stack(input_pad_list)
input_pad = input_pad.view(batch_size*N, self.sent_max_len, -1)
enc_sent_len = enc_sent_len.view(-1) # [batch_size*N]
enc_embed_input = self.word_embedding_proj(input_pad) # [batch_size * N, L, D]
sent_pos_list = []
for sentlen in enc_sent_len:
sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))
for k in range(self.sent_max_len - sentlen):
sent_pos.append(0)
sent_pos_list.append(sent_pos)
input_pos = torch.Tensor(sent_pos_list).long()
if self._hps.cuda:
input_pos = input_pos.cuda()
enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]
enc_conv_input = enc_embed_input + enc_pos_embed_input
enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)
enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)
enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)
sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)
sent_embedding = sent_embedding.view(batch_size, N, -1)
return sent_embedding
class BertTagEncoder(BertEncoder):
def __init__(self, hps, domaindict):
super(BertTagEncoder, self).__init__(hps)
# domain embedding
self.domain_embedding = nn.Embedding(domaindict.size(), hps.domain_emb_dim)
self.domain_embedding.weight.requires_grad = True
def forward(self, inputs, input_masks, enc_sent_len, domain):
sent_embedding = super().forward(inputs, input_masks, enc_sent_len)
batch_size, N = enc_sent_len.size()
enc_domain_input = self.domain_embedding(domain) # [batch, D]
enc_domain_input = enc_domain_input.unsqueeze(1).expand(batch_size, N, -1) # [batch, N, D]
sent_embedding = torch.cat((sent_embedding, enc_domain_input), dim=2)
return sent_embedding
class ELMoEndoer(nn.Module):
def __init__(self, hps):
super(ELMoEndoer, self).__init__()
self._hps = hps
self.sent_max_len = hps.sent_max_len
from allennlp.modules.elmo import Elmo
elmo_dim = 1024
options_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
weight_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
# elmo_dim = 512
# options_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_options.json"
# weight_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
embed_size = hps.word_emb_dim
sent_max_len = hps.sent_max_len
input_channels = 1
out_channels = hps.output_channel
min_kernel_size = hps.min_kernel_size
max_kernel_size = hps.max_kernel_size
width = embed_size
# elmo embedding
self.elmo = Elmo(options_file, weight_file, 1, dropout=0)
self.embed_proj = nn.Linear(elmo_dim, embed_size)
# position embedding
self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)
# cnn
self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
logger.info("[INFO] Initing W for CNN.......")
for conv in self.convs:
init_weight_value = 6.0
init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
logger.error("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
raise ValueError("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def forward(self, input):
# input: a batch of Example object [batch_size, N, seq_len, character_len]
batch_size, N, seq_len, _ = input.size()
input = input.view(batch_size * N, seq_len, -1) # [batch_size*N, seq_len, character_len]
input_sent_len = ((input.sum(-1)!=0).sum(dim=1)).int() # [batch_size*N, 1]
logger.debug(input_sent_len.view(batch_size, -1))
enc_embed_input = self.elmo(input)['elmo_representations'][0] # [batch_size*N, L, D]
enc_embed_input = self.embed_proj(enc_embed_input)
# input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])
sent_pos_list = []
for sentlen in input_sent_len:
sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))
for k in range(self.sent_max_len - sentlen):
sent_pos.append(0)
sent_pos_list.append(sent_pos)
input_pos = torch.Tensor(sent_pos_list).long()
if self._hps.cuda:
input_pos = input_pos.cuda()
enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]
enc_conv_input = enc_embed_input + enc_pos_embed_input
enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)
enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)
enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)
sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)
sent_embedding = sent_embedding.view(batch_size, N, -1)
return sent_embedding
class ELMoEndoer2(nn.Module):
def __init__(self, hps):
super(ELMoEndoer2, self).__init__()
self._hps = hps
self._cuda = hps.cuda
self.sent_max_len = hps.sent_max_len
from allennlp.modules.elmo import Elmo
elmo_dim = 1024
options_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
weight_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"
# elmo_dim = 512
# options_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_options.json"
# weight_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"
embed_size = hps.word_emb_dim
sent_max_len = hps.sent_max_len
input_channels = 1
out_channels = hps.output_channel
min_kernel_size = hps.min_kernel_size
max_kernel_size = hps.max_kernel_size
width = embed_size
# elmo embedding
self.elmo = Elmo(options_file, weight_file, 1, dropout=0)
self.embed_proj = nn.Linear(elmo_dim, embed_size)
# position embedding
self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)
# cnn
self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
logger.info("[INFO] Initing W for CNN.......")
for conv in self.convs:
init_weight_value = 6.0
init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out))
def calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
logger.error("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
raise ValueError("[Error] Fan in and fan out can not be computed for tensor with less than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.dim() > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def pad_encoder_input(self, input_list):
"""
:param input_list: N [seq_len, hidden_state]
:return: enc_sent_input_pad: list, N [max_len, hidden_state]
"""
max_len = self.sent_max_len
enc_sent_input_pad = []
_, hidden_size = input_list[0].size()
for i in range(len(input_list)):
article_words = input_list[i] # [seq_len, hidden_size]
seq_len = article_words.size(0)
if seq_len > max_len:
pad_words = article_words[:max_len, :]
else:
pad_tensor = torch.zeros(max_len - seq_len, hidden_size).cuda() if self._cuda else torch.zeros(max_len - seq_len, hidden_size)
pad_words = torch.cat([article_words, pad_tensor], dim=0)
enc_sent_input_pad.append(pad_words)
return enc_sent_input_pad
def forward(self, inputs, input_masks, enc_sent_len):
"""
:param inputs: a batch of Example object [batch_size, doc_len=512, character_len=50]
:param input_masks: 0 or 1, [batch, doc_len=512]
:param enc_sent_len: sentence original length [batch, N]
:return:
sent_embedding: [batch, N, D]
"""
# Use Bert to get word embedding
batch_size, N = enc_sent_len.size()
input_pad_list = []
elmo_output = self.elmo(inputs)['elmo_representations'][0] # [batch_size, 512, D]
elmo_output = elmo_output * input_masks.unsqueeze(-1).float()
# print("END elmo")
for i in range(batch_size):
sent_len = enc_sent_len[i] # [1, N]
out = elmo_output[i]
_, hidden_size = out.size()
# restore the sentence
last_end = 0
enc_sent_input = []
for length in sent_len:
if length != 0 and last_end < 512:
enc_sent_input.append(out[last_end : min(512, last_end + length), :])
last_end += length
else:
pad_tensor = torch.zeros(self.sent_max_len, hidden_size).cuda() if self._hps.cuda else torch.zeros(self.sent_max_len, hidden_size)
enc_sent_input.append(pad_tensor)
# pad the sentence
enc_sent_input_pad = self.pad_encoder_input(enc_sent_input) # [N, seq_len, hidden_state=4096]
input_pad_list.append(torch.stack(enc_sent_input_pad)) # batch * [N, max_len, hidden_state]
input_pad = torch.stack(input_pad_list)
input_pad = input_pad.view(batch_size * N, self.sent_max_len, -1)
enc_sent_len = enc_sent_len.view(-1) # [batch_size*N]
enc_embed_input = self.embed_proj(input_pad) # [batch_size * N, L, D]
# input_pos = torch.Tensor([np.hstack((np.arange(1, sentlen + 1), np.zeros(self.sent_max_len - sentlen))) for sentlen in input_sent_len])
sent_pos_list = []
for sentlen in enc_sent_len:
sent_pos = list(range(1, min(self.sent_max_len, sentlen) + 1))
for k in range(self.sent_max_len - sentlen):
sent_pos.append(0)
sent_pos_list.append(sent_pos)
input_pos = torch.Tensor(sent_pos_list).long()
if self._hps.cuda:
input_pos = input_pos.cuda()
enc_pos_embed_input = self.position_embedding(input_pos.long()) # [batch_size*N, D]
enc_conv_input = enc_embed_input + enc_pos_embed_input
enc_conv_input = enc_conv_input.unsqueeze(1) # (batch * N,Ci,L,D)
enc_conv_output = [F.relu(conv(enc_conv_input)).squeeze(3) for conv in self.convs] # kernel_sizes * (batch*N, Co, W)
enc_maxpool_output = [F.max_pool1d(x, x.size(2)).squeeze(2) for x in enc_conv_output] # kernel_sizes * (batch*N, Co)
sent_embedding = torch.cat(enc_maxpool_output, 1) # (batch*N, Co * kernel_sizes)
sent_embedding = sent_embedding.view(batch_size, N, -1)
return sent_embedding | [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.stack",
"torch.arange",
"torch.from_numpy",
"torch.nn.Conv2d",
"torch.Tensor",
"torch.nn.Embedding"
] | 1.0.0 | KuNyaa/fastNLP | 945b30bb6174751130744231aa26119bf9bb2601 |
1.0 |
import torch
from torch import nn
from fastNLP import seq_len_to_mask
from fastNLP.modules import Embedding
from fastNLP.modules import LSTM
from fastNLP.modules import ConditionalRandomField, allowed_transitions
import torch.nn.functional as F
from fastNLP import Const
class CNNBiLSTMCRF(nn.Module):
def __init__(self, embed, char_embed, hidden_size, num_layers, tag_vocab, dropout=0.5, encoding_type='bioes'):
super().__init__()
self.embedding = embed
self.char_embedding = char_embed
self.lstm = LSTM(input_size=self.embedding.embedding_dim+self.char_embedding.embedding_dim,
hidden_size=hidden_size//2, num_layers=num_layers,
bidirectional=True, batch_first=True)
self.fc = nn.Linear(hidden_size, len(tag_vocab))
transitions = allowed_transitions(tag_vocab.idx2word, encoding_type=encoding_type, include_start_end=True)
self.crf = ConditionalRandomField(len(tag_vocab), include_start_end_trans=True, allowed_transitions=transitions)
self.dropout = nn.Dropout(dropout, inplace=True)
for name, param in self.named_parameters():
if 'fc' in name:
if param.data.dim()>1:
nn.init.xavier_uniform_(param)
else:
nn.init.constant_(param, 0)
if 'crf' in name:
nn.init.zeros_(param)
def _forward(self, words, seq_len, target=None):
word_embeds = self.embedding(words)
char_embeds = self.char_embedding(words)
words = torch.cat((word_embeds, char_embeds), dim=-1)
outputs, _ = self.lstm(words, seq_len)
self.dropout(outputs)
logits = F.log_softmax(self.fc(outputs), dim=-1)
if target is not None:
loss = self.crf(logits, target, seq_len_to_mask(seq_len, max_len=logits.size(1))).mean()
return {Const.LOSS: loss}
else:
pred, _ = self.crf.viterbi_decode(logits, seq_len_to_mask(seq_len, max_len=logits.size(1)))
return {Const.OUTPUT: pred}
def forward(self, words, seq_len, target):
return self._forward(words, seq_len, target)
def predict(self, words, seq_len):
return self._forward(words, seq_len, None)
| [
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.zeros_"
] | 1.0.0 | KuNyaa/fastNLP | 945b30bb6174751130744231aa26119bf9bb2601 |
1.4 | """Test suites for numerical compatibility with librosa"""
import os
import unittest
from distutils.version import StrictVersion
import torch
import torchaudio
import torchaudio.functional as F
from torchaudio._internal.module_utils import is_module_available
LIBROSA_AVAILABLE = is_module_available('librosa')
if LIBROSA_AVAILABLE:
import numpy as np
import librosa
import scipy
import pytest
from torchaudio_unittest import common_utils
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class TestFunctional(common_utils.TorchaudioTestCase):
"""Test suite for functions in `functional` module."""
def test_griffinlim(self):
# NOTE: This test is flaky without a fixed random seed
# See https://github.com/pytorch/audio/issues/382
torch.random.manual_seed(42)
tensor = torch.rand((1, 1000))
n_fft = 400
ws = 400
hop = 100
window = torch.hann_window(ws)
normalize = False
momentum = 0.99
n_iter = 8
length = 1000
rand_init = False
init = 'random' if rand_init else None
specgram = F.spectrogram(tensor, 0, window, n_fft, hop, ws, 2, normalize).sqrt()
ta_out = F.griffinlim(specgram, window, n_fft, hop, ws, 1, normalize,
n_iter, momentum, length, rand_init)
lr_out = librosa.griffinlim(specgram.squeeze(0).numpy(), n_iter=n_iter, hop_length=hop,
momentum=momentum, init=init, length=length)
lr_out = torch.from_numpy(lr_out).unsqueeze(0)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
def _test_create_fb(self, n_mels=40, sample_rate=22050, n_fft=2048, fmin=0.0, fmax=8000.0, norm=None):
librosa_fb = librosa.filters.mel(sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm=norm)
fb = F.create_fb_matrix(sample_rate=sample_rate,
n_mels=n_mels,
f_max=fmax,
f_min=fmin,
n_freqs=(n_fft // 2 + 1),
norm=norm)
for i_mel_bank in range(n_mels):
self.assertEqual(
fb[:, i_mel_bank], torch.tensor(librosa_fb[i_mel_bank]), atol=1e-4, rtol=1e-5)
def test_create_fb(self):
self._test_create_fb()
self._test_create_fb(n_mels=128, sample_rate=44100)
self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0)
self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0)
self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0)
self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0)
self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0)
if StrictVersion(librosa.__version__) < StrictVersion("0.7.2"):
return
self._test_create_fb(n_mels=128, sample_rate=44100, norm="slaney")
self._test_create_fb(n_mels=128, fmin=2000.0, fmax=5000.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=100.0, fmax=9000.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=800.0, fmax=900.0, norm="slaney")
self._test_create_fb(n_mels=56, fmin=1900.0, fmax=900.0, norm="slaney")
self._test_create_fb(n_mels=10, fmin=1900.0, fmax=900.0, norm="slaney")
def test_amplitude_to_DB(self):
spec = torch.rand((6, 201))
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
# Power to DB
multiplier = 10.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.power_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
# Amplitude to DB
multiplier = 20.0
ta_out = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
lr_out = librosa.core.amplitude_to_db(spec.numpy())
lr_out = torch.from_numpy(lr_out)
self.assertEqual(ta_out, lr_out, atol=5e-5, rtol=1e-5)
@pytest.mark.parametrize('complex_specgrams', [
torch.randn(2, 1025, 400, 2)
])
@pytest.mark.parametrize('rate', [0.5, 1.01, 1.3])
@pytest.mark.parametrize('hop_length', [256])
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
def test_phase_vocoder(complex_specgrams, rate, hop_length):
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
complex_specgrams = complex_specgrams.type(torch.float64)
phase_advance = torch.linspace(0, np.pi * hop_length, complex_specgrams.shape[-3], dtype=torch.float64)[..., None]
complex_specgrams_stretch = F.phase_vocoder(complex_specgrams, rate=rate, phase_advance=phase_advance)
# == Test shape
expected_size = list(complex_specgrams.size())
expected_size[-2] = int(np.ceil(expected_size[-2] / rate))
assert complex_specgrams.dim() == complex_specgrams_stretch.dim()
assert complex_specgrams_stretch.size() == torch.Size(expected_size)
# == Test values
index = [0] * (complex_specgrams.dim() - 3) + [slice(None)] * 3
mono_complex_specgram = complex_specgrams[index].numpy()
mono_complex_specgram = mono_complex_specgram[..., 0] + \
mono_complex_specgram[..., 1] * 1j
expected_complex_stretch = librosa.phase_vocoder(mono_complex_specgram,
rate=rate,
hop_length=hop_length)
complex_stretch = complex_specgrams_stretch[index].numpy()
complex_stretch = complex_stretch[..., 0] + 1j * complex_stretch[..., 1]
assert np.allclose(complex_stretch, expected_complex_stretch, atol=1e-5)
def _load_audio_asset(*asset_paths, **kwargs):
file_path = common_utils.get_asset_path(*asset_paths)
sound, sample_rate = torchaudio.load(file_path, **kwargs)
return sound, sample_rate
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class TestTransforms(common_utils.TorchaudioTestCase):
"""Test suite for functions in `transforms` module."""
def assert_compatibilities(self, n_fft, hop_length, power, n_mels, n_mfcc, sample_rate):
common_utils.set_audio_backend('default')
path = common_utils.get_asset_path('sinewave.wav')
sound, sample_rate = common_utils.load_wav(path)
sound_librosa = sound.cpu().numpy().squeeze() # (64000)
# test core spectrogram
spect_transform = torchaudio.transforms.Spectrogram(
n_fft=n_fft, hop_length=hop_length, power=power)
out_librosa, _ = librosa.core.spectrum._spectrogram(
y=sound_librosa, n_fft=n_fft, hop_length=hop_length, power=power)
out_torch = spect_transform(sound).squeeze().cpu()
self.assertEqual(out_torch, torch.from_numpy(out_librosa), atol=1e-5, rtol=1e-5)
# test mel spectrogram
melspect_transform = torchaudio.transforms.MelSpectrogram(
sample_rate=sample_rate, window_fn=torch.hann_window,
hop_length=hop_length, n_mels=n_mels, n_fft=n_fft)
librosa_mel = librosa.feature.melspectrogram(
y=sound_librosa, sr=sample_rate, n_fft=n_fft,
hop_length=hop_length, n_mels=n_mels, htk=True, norm=None)
librosa_mel_tensor = torch.from_numpy(librosa_mel)
torch_mel = melspect_transform(sound).squeeze().cpu()
self.assertEqual(
torch_mel.type(librosa_mel_tensor.dtype), librosa_mel_tensor, atol=5e-3, rtol=1e-5)
# test s2db
power_to_db_transform = torchaudio.transforms.AmplitudeToDB('power', 80.)
power_to_db_torch = power_to_db_transform(spect_transform(sound)).squeeze().cpu()
power_to_db_librosa = librosa.core.spectrum.power_to_db(out_librosa)
self.assertEqual(power_to_db_torch, torch.from_numpy(power_to_db_librosa), atol=5e-3, rtol=1e-5)
mag_to_db_transform = torchaudio.transforms.AmplitudeToDB('magnitude', 80.)
mag_to_db_torch = mag_to_db_transform(torch.abs(sound)).squeeze().cpu()
mag_to_db_librosa = librosa.core.spectrum.amplitude_to_db(sound_librosa)
self.assertEqual(mag_to_db_torch, torch.from_numpy(mag_to_db_librosa), atol=5e-3, rtol=1e-5)
power_to_db_torch = power_to_db_transform(melspect_transform(sound)).squeeze().cpu()
db_librosa = librosa.core.spectrum.power_to_db(librosa_mel)
db_librosa_tensor = torch.from_numpy(db_librosa)
self.assertEqual(
power_to_db_torch.type(db_librosa_tensor.dtype), db_librosa_tensor, atol=5e-3, rtol=1e-5)
# test MFCC
melkwargs = {'hop_length': hop_length, 'n_fft': n_fft}
mfcc_transform = torchaudio.transforms.MFCC(
sample_rate=sample_rate, n_mfcc=n_mfcc, norm='ortho', melkwargs=melkwargs)
# librosa.feature.mfcc doesn't pass kwargs properly since some of the
# kwargs for melspectrogram and mfcc are the same. We just follow the
# function body in
# https://librosa.github.io/librosa/_modules/librosa/feature/spectral.html#melspectrogram
# to mirror this function call with correct args:
#
# librosa_mfcc = librosa.feature.mfcc(
# y=sound_librosa, sr=sample_rate, n_mfcc = n_mfcc,
# hop_length=hop_length, n_fft=n_fft, htk=True, norm=None, n_mels=n_mels)
librosa_mfcc = scipy.fftpack.dct(db_librosa, axis=0, type=2, norm='ortho')[:n_mfcc]
librosa_mfcc_tensor = torch.from_numpy(librosa_mfcc)
torch_mfcc = mfcc_transform(sound).squeeze().cpu()
self.assertEqual(
torch_mfcc.type(librosa_mfcc_tensor.dtype), librosa_mfcc_tensor, atol=5e-3, rtol=1e-5)
def test_basics1(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
def test_basics2(self):
kwargs = {
'n_fft': 600,
'hop_length': 100,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 20,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
# NOTE: Test passes offline, but fails on TravisCI (and CircleCI), see #372.
@unittest.skipIf('CI' in os.environ, 'Test is known to fail on CI')
def test_basics3(self):
kwargs = {
'n_fft': 200,
'hop_length': 50,
'power': 2.0,
'n_mels': 128,
'n_mfcc': 50,
'sample_rate': 24000
}
self.assert_compatibilities(**kwargs)
def test_basics4(self):
kwargs = {
'n_fft': 400,
'hop_length': 200,
'power': 3.0,
'n_mels': 128,
'n_mfcc': 40,
'sample_rate': 16000
}
self.assert_compatibilities(**kwargs)
def test_MelScale(self):
"""MelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
hop_length = n_fft // 4
sample_rate = 44100
sound = common_utils.get_whitenoise(sample_rate=sample_rate, duration=60)
sound = sound.mean(dim=0, keepdim=True)
spec_ta = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
spec_lr = spec_ta.cpu().numpy().squeeze()
# Perform MelScale with torchaudio and librosa
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_ta)
melspec_lr = librosa.feature.melspectrogram(
S=spec_lr, sr=sample_rate, n_fft=n_fft, hop_length=hop_length,
win_length=n_fft, center=True, window='hann', n_mels=n_mels, htk=True, norm=None)
# Note: Using relaxed rtol instead of atol
self.assertEqual(melspec_ta, torch.from_numpy(melspec_lr[None, ...]), atol=1e-8, rtol=1e-3)
def test_InverseMelScale(self):
"""InverseMelScale transform is comparable to that of librosa"""
n_fft = 2048
n_mels = 256
n_stft = n_fft // 2 + 1
hop_length = n_fft // 4
# Prepare mel spectrogram input. We use torchaudio to compute one.
path = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
sound, sample_rate = common_utils.load_wav(path)
sound = sound[:, 2**10:2**10 + 2**14]
sound = sound.mean(dim=0, keepdim=True)
spec_orig = F.spectrogram(
sound, pad=0, window=torch.hann_window(n_fft), n_fft=n_fft,
hop_length=hop_length, win_length=n_fft, power=2, normalized=False)
melspec_ta = torchaudio.transforms.MelScale(n_mels=n_mels, sample_rate=sample_rate)(spec_orig)
melspec_lr = melspec_ta.cpu().numpy().squeeze()
# Perform InverseMelScale with torch audio and librosa
spec_ta = torchaudio.transforms.InverseMelScale(
n_stft, n_mels=n_mels, sample_rate=sample_rate)(melspec_ta)
spec_lr = librosa.feature.inverse.mel_to_stft(
melspec_lr, sr=sample_rate, n_fft=n_fft, power=2.0, htk=True, norm=None)
spec_lr = torch.from_numpy(spec_lr[None, ...])
# Align dimensions
# librosa does not return power spectrogram while torchaudio returns power spectrogram
spec_orig = spec_orig.sqrt()
spec_ta = spec_ta.sqrt()
threshold = 2.0
# This threshold was choosen empirically, based on the following observation
#
# torch.dist(spec_lr, spec_ta, p=float('inf'))
# >>> tensor(1.9666)
#
# The spectrograms reconstructed by librosa and torchaudio are not comparable elementwise.
# This is because they use different approximation algorithms and resulting values can live
# in different magnitude. (although most of them are very close)
# See
# https://github.com/pytorch/audio/pull/366 for the discussion of the choice of algorithm
# https://github.com/pytorch/audio/pull/448/files#r385747021 for the distribution of P-inf
# distance over frequencies.
self.assertEqual(spec_ta, spec_lr, atol=threshold, rtol=1e-5)
threshold = 1700.0
# This threshold was choosen empirically, based on the following observations
#
# torch.dist(spec_orig, spec_ta, p=1)
# >>> tensor(1644.3516)
# torch.dist(spec_orig, spec_lr, p=1)
# >>> tensor(1420.7103)
# torch.dist(spec_lr, spec_ta, p=1)
# >>> tensor(943.2759)
assert torch.dist(spec_orig, spec_ta, p=1) < threshold
| [
"torch.Size",
"torch.rand",
"torch.hann_window",
"torch.linspace",
"torch.from_numpy",
"torch.random.manual_seed",
"torch.abs",
"torch.tensor",
"torch.randn",
"torch.dist"
] | 1.4.0 | adefossez/audio | 19fc580da97baf179395bb257647c5c25b993e42 |
0.3 | #!/usr/bin/env python3
import typing # noqa F401
import warnings
import torch
from torch import Tensor
from ..exceptions.warnings import BadInitialCandidatesWarning
def initialize_q_batch(X: Tensor, Y: Tensor, n: int, eta: float = 1.0) -> Tensor:
r"""Heuristic for selecting initial conditions for candidate generation.
This heuristic selects points from `X` (without replacement) with probability
proportional to `exp(eta * Z)`, where `Z = (Y - mean(Y)) / std(Y)` and `eta`
is a temperature parameter.
When using an acquisiton function that is non-negative and possibly zero
over large areas of the feature space (e.g. qEI), you should use
`initialize_q_batch_nonneg` instead.
Args:
X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.
feature space. Typically, these are generated using qMC sampling.
Y: A tensor of `b` outcomes associated with the samples. Typically, this
is the value of the batch acquisition function to be maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
Returns:
A `n x q x d` tensor of `n` `q`-batch initial conditions.
Example:
# To get `n=10` starting points of q-batch size `q=3` for model with `d=6`:
>>> qUCB = qUpperConfidenceBound(model, beta=0.1)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qUCB(Xrnd), 10)
"""
n_samples = X.shape[0]
if n > n_samples:
raise RuntimeError(
f"n ({n}) cannot be larger than the number of "
f"provided samples ({n_samples})"
)
elif n == n_samples:
return X
Ystd = Y.std()
if Ystd == 0:
warnings.warn(
"All acqusition values for raw samples points are the same. "
"Choosing initial conditions at random.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
max_val, max_idx = torch.max(Y, dim=0)
Z = Y - Y.mean() / Ystd
weights = torch.exp(eta * Z)
idcs = torch.multinomial(weights, n)
# make sure we get the maximum
if max_idx not in idcs:
idcs[-1] = max_idx
return X[idcs]
def initialize_q_batch_nonneg(
X: Tensor, Y: Tensor, n: int, eta: float = 1.0, alpha: float = 1e-4
) -> Tensor:
r"""Heuristic for selecting initial conditions for non-neg. acquisition functions.
This function is similar to `initialize_q_batch`, but designed specifically
for acquisition functions that are non-negative and possibly zero over
large areas of the feature space (e.g. qEI). All samples for which
`Y < alpha * max(Y)` will be ignored (assuming that `Y` contains at least
one positive value).
Args:
X: A `b x q x d` tensor of `b` samples of `q`-batches from a `d`-dim.
feature space. Typically, these are generated using qMC.
Y: A tensor of `b` outcomes associated with the samples. Typically, this
is the value of the batch acquisition function to be maximized.
n: The number of initial condition to be generated. Must be less than `b`.
eta: Temperature parameter for weighting samples.
alpha: The threshold (as a fraction of the maximum observed value) under
which to ignore samples. All input samples for which
`Y < alpha * max(Y)` will be ignored.
Returns:
A `n x q x d` tensor of `n` `q`-batch initial conditions.
Example:
# To get `n=10` starting points of q-batch size `q=3` for model with `d=6`:
>>> qEI = qExpectedImprovement(model, best_f=0.2)
>>> Xrnd = torch.rand(500, 3, 6)
>>> Xinit = initialize_q_batch(Xrnd, qEI(Xrnd), 10)
"""
n_samples = X.shape[0]
if n > n_samples:
raise RuntimeError("n cannot be larger than the number of provided samples")
elif n == n_samples:
return X
max_val, max_idx = torch.max(Y, dim=0)
if torch.any(max_val <= 0):
warnings.warn(
"All acquisition values for raw sampled points are nonpositive, so "
"initial conditions are being selected randomly.",
BadInitialCandidatesWarning,
)
return X[torch.randperm(n=n_samples, device=X.device)][:n]
# make sure there are at least `n` points with positive acquisition values
pos = Y > 0
num_pos = pos.sum().item()
if num_pos < n:
# select all positive points and then fill remaining quota with randomly
# selected points
remaining_indices = (~pos).nonzero().view(-1)
rand_indices = torch.randperm(remaining_indices.shape[0], device=Y.device)
sampled_remaining_indices = remaining_indices[rand_indices[: n - num_pos]]
pos[sampled_remaining_indices] = 1
return X[pos]
# select points within alpha of max_val, iteratively decreasing alpha by a
# factor of 10 as necessary
alpha_pos = Y >= alpha * max_val
while alpha_pos.sum() < n:
alpha = 0.1 * alpha
alpha_pos = Y >= alpha * max_val
alpha_pos_idcs = torch.arange(len(Y), device=Y.device)[alpha_pos]
weights = torch.exp(eta * (Y[alpha_pos] / max_val - 1))
idcs = alpha_pos_idcs[torch.multinomial(weights, n)]
if max_idx not in idcs:
idcs[-1] = max_idx
return X[idcs]
| [
"torch.max",
"torch.any",
"torch.randperm",
"torch.multinomial",
"torch.exp"
] | 0.3.2 | cnheider/botorch | 1d90aaff64b2f1e1f49bcac233b45ba18427f6fd |
1.8 | import cv2
import torch
from Services.NeuralNetwork.tool.torch_utils import do_detect
from Services.NeuralNetwork.tool.darknet2pytorch import Darknet
class NeuralNetwork:
@staticmethod
def isCudaAvailable() -> bool:
return torch.cuda.is_available()
@staticmethod
def getAvailableCalculationDevices() -> dict:
devices = {}
devices["cpu"] = "CPU"
if torch.cuda.is_available():
for device_idx in range(torch.cuda.device_count()):
devices["cuda:%d" % device_idx] = "GPU (%s)" % torch.cuda.get_device_name(device_idx)
return devices
def __init__(self, cfg_file: str, weights_file: str, threshold: float, device_id: str) -> None:
with torch.no_grad():
self._obj_thresh = threshold
self._device_id = device_id
self._model = Darknet(cfg_file)
if(self._model is None):
return
#self._model.print_network()
self._model.load_weights(weights_file)
self._model = self._model.to(device_id)
self._model.eval()
def process(self, img):
with torch.no_grad():
sized = cv2.resize(img, (416, 416))
sized = cv2.cvtColor(sized, cv2.COLOR_BGR2RGB)
if(img is None):
return
boxes = do_detect(self._model, sized, self._obj_thresh, 0.4, self._device_id)
detections = []
img_height, img_width = img.shape[:2]
for box in boxes[0]:
x1 = int(box[0] * img_width)
y1 = int(box[1] * img_height)
x2 = int(box[2] * img_width)
y2 = int(box[3] * img_height)
xywh = (x1, y1, x2-x1, y2-y1)
conf = float(box[5])
id = int(box[6])
detections.append([xywh, id, conf])
return detections
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.cuda.get_device_name",
"torch.cuda.device_count"
] | 1.8.1 | shell-done/Spongo_IHM | 3492c889b1d60cf50b4b2625b496fd6958309a8e |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This module implements loading meshes from glTF 2 assets stored in a
GLB container file or a glTF JSON file with embedded binary data.
It is experimental.
The module provides a MeshFormatInterpreter called
MeshGlbFormat which must be used explicitly.
e.g.
.. code-block:: python
from pytorch3d.io import IO
from pytorch3d.io.experimental_gltf_io import MeshGlbFormat
io = IO()
io.register_meshes_format(MeshGlbFormat())
io.load_mesh(...)
This implementation is quite restricted in what it supports.
- It does not try to validate the input against the standard.
- It loads the default scene only.
- Only triangulated geometry is supported.
- The geometry of all meshes of the entire scene is aggregated into a single mesh.
Use `load_meshes()` instead to get un-aggregated (but transformed) ones.
- All material properties are ignored except for either vertex color, baseColorTexture
or baseColorFactor. If available, one of these (in this order) is exclusively
used which does not match the semantics of the standard.
"""
import json
import struct
import warnings
from base64 import b64decode
from collections import deque
from enum import IntEnum
from io import BytesIO
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union, cast
import numpy as np
import torch
from iopath.common.file_io import PathManager
from PIL import Image
from pytorch3d.io.utils import PathOrStr, _open_file
from pytorch3d.renderer.mesh import TexturesBase, TexturesUV, TexturesVertex
from pytorch3d.structures import Meshes, join_meshes_as_scene
from pytorch3d.transforms import Transform3d, quaternion_to_matrix
from .pluggable_formats import MeshFormatInterpreter, endswith
_GLTF_MAGIC = 0x46546C67
_JSON_CHUNK_TYPE = 0x4E4F534A
_BINARY_CHUNK_TYPE = 0x004E4942
_DATA_URI_PREFIX = "data:application/octet-stream;base64,"
class _PrimitiveMode(IntEnum):
POINTS = 0
LINES = 1
LINE_LOOP = 2
LINE_STRIP = 3
TRIANGLES = 4
TRIANGLE_STRIP = 5
TRIANGLE_FAN = 6
class _ComponentType(IntEnum):
BYTE = 5120
UNSIGNED_BYTE = 5121
SHORT = 5122
UNSIGNED_SHORT = 5123
UNSIGNED_INT = 5125
FLOAT = 5126
_ITEM_TYPES: Dict[int, Any] = {
5120: np.int8,
5121: np.uint8,
5122: np.int16,
5123: np.uint16,
5125: np.uint32,
5126: np.float32,
}
_ElementShape = Union[Tuple[int], Tuple[int, int]]
_ELEMENT_SHAPES: Dict[str, _ElementShape] = {
"SCALAR": (1,),
"VEC2": (2,),
"VEC3": (3,),
"VEC4": (4,),
"MAT2": (2, 2),
"MAT3": (3, 3),
"MAT4": (4, 4),
}
def _read_header(stream: BinaryIO) -> Optional[Tuple[int, int]]:
header = stream.read(12)
magic, version, length = struct.unpack("<III", header)
if magic != _GLTF_MAGIC:
return None
return version, length
def _read_chunks(
stream: BinaryIO, length: int
) -> Optional[Tuple[Dict[str, Any], np.ndarray]]:
"""
Get the json header and the binary data from a
GLB file.
"""
json_data = None
binary_data = None
while stream.tell() < length:
chunk_header = stream.read(8)
chunk_length, chunk_type = struct.unpack("<II", chunk_header)
chunk_data = stream.read(chunk_length)
if chunk_type == _JSON_CHUNK_TYPE:
json_data = json.loads(chunk_data)
elif chunk_type == _BINARY_CHUNK_TYPE:
binary_data = chunk_data
else:
warnings.warn("Unsupported chunk type")
return None
if json_data is None:
raise ValueError("Missing json header")
if binary_data is not None:
binary_data = np.frombuffer(binary_data, dtype=np.uint8)
return json_data, binary_data
def _make_node_transform(node: Dict[str, Any]) -> Transform3d:
"""
Convert a transform from the json data in to a PyTorch3D
Transform3d format.
"""
array = node.get("matrix")
if array is not None: # Stored in column-major order
M = np.array(array, dtype=np.float32).reshape(4, 4, order="F")
return Transform3d(matrix=torch.from_numpy(M))
out = Transform3d()
# Given some of (scale/rotation/translation), we do them in that order to
# get points in to the world space.
# See https://github.com/KhronosGroup/glTF/issues/743 .
array = node.get("scale", None)
if array is not None:
scale_vector = torch.FloatTensor(array)
out = out.scale(scale_vector[None])
# Rotation quaternion (x, y, z, w) where w is the scalar
array = node.get("rotation", None)
if array is not None:
x, y, z, w = array
# We negate w. This is equivalent to inverting the rotation.
# This is needed as quaternion_to_matrix makes a matrix which
# operates on column vectors, whereas Transform3d wants a
# matrix which operates on row vectors.
rotation_quaternion = torch.FloatTensor([-w, x, y, z])
rotation_matrix = quaternion_to_matrix(rotation_quaternion)
out = out.rotate(R=rotation_matrix)
array = node.get("translation", None)
if array is not None:
translation_vector = torch.FloatTensor(array)
out = out.translate(x=translation_vector[None])
return out
class _GLTFLoader:
def __init__(self, stream: BinaryIO) -> None:
self._json_data = None
# Map from buffer index to (decoded) binary data
self._binary_data = {}
version_and_length = _read_header(stream)
if version_and_length is None: # GLTF
stream.seek(0)
json_data = json.load(stream)
else: # GLB
version, length = version_and_length
if version != 2:
warnings.warn("Unsupported version")
return
json_and_binary_data = _read_chunks(stream, length)
if json_and_binary_data is None:
raise ValueError("Data not found")
json_data, binary_data = json_and_binary_data
self._binary_data[0] = binary_data
self._json_data = json_data
self._accessors = json_data.get("accessors", [])
self._buffer_views = json_data.get("bufferViews", [])
self._buffers = json_data.get("buffers", [])
self._texture_map_images = {}
def _access_image(self, image_index: int) -> np.ndarray:
"""
Get the data for an image from the file. This is only called
by _get_texture_map_image which caches it.
"""
image_json = self._json_data["images"][image_index]
buffer_view = self._buffer_views[image_json["bufferView"]]
if "byteStride" in buffer_view:
raise NotImplementedError("strided buffer views")
length = buffer_view["byteLength"]
offset = buffer_view.get("byteOffset", 0)
binary_data = self.get_binary_data(buffer_view["buffer"])
bytesio = BytesIO(binary_data[offset : offset + length].tobytes())
with Image.open(bytesio) as f:
array = np.array(f)
if array.dtype == np.uint8:
return array.astype(np.float32) / 255.0
else:
return array
def _get_texture_map_image(self, image_index: int) -> torch.Tensor:
"""
Return a texture map image as a torch tensor.
Calling this function repeatedly with the same arguments returns
the very same tensor, this allows a memory optimization to happen
later in TexturesUV.join_scene.
Any alpha channel is ignored.
"""
im = self._texture_map_images.get(image_index)
if im is not None:
return im
im = torch.from_numpy(self._access_image(image_index))[:, :, :3]
self._texture_map_images[image_index] = im
return im
def _access_data(self, accessor_index: int) -> np.ndarray:
"""
Get the raw data from an accessor as a numpy array.
"""
accessor = self._accessors[accessor_index]
buffer_view_index = accessor.get("bufferView")
# Undefined buffer view (all zeros) are not (yet) supported
if buffer_view_index is None:
raise NotImplementedError("Undefined buffer view")
accessor_byte_offset = accessor.get("byteOffset", 0)
component_type = accessor["componentType"]
element_count = accessor["count"]
element_type = accessor["type"]
# Sparse accessors are not (yet) supported
if accessor.get("sparse") is not None:
raise NotImplementedError("Sparse Accessors")
buffer_view = self._buffer_views[buffer_view_index]
buffer_index = buffer_view["buffer"]
buffer_byte_length = buffer_view["byteLength"]
element_byte_offset = buffer_view.get("byteOffset", 0)
element_byte_stride = buffer_view.get("byteStride", 0)
if element_byte_stride != 0 and element_byte_stride < 4:
raise ValueError("Stride is too small.")
if element_byte_stride > 252:
raise ValueError("Stride is too big.")
element_shape = _ELEMENT_SHAPES[element_type]
item_type = _ITEM_TYPES[component_type]
item_dtype = np.dtype(item_type)
item_count = np.prod(element_shape)
item_size = item_dtype.itemsize
size = element_count * item_count * item_size
if size > buffer_byte_length:
raise ValueError("Buffer did not have enough data for the accessor")
buffer_ = self._buffers[buffer_index]
binary_data = self.get_binary_data(buffer_index)
if len(binary_data) < buffer_["byteLength"]:
raise ValueError("Not enough binary data for the buffer")
if element_byte_stride == 0:
element_byte_stride = item_size * item_count
# The same buffer can store interleaved elements
if element_byte_stride < item_size * item_count:
raise ValueError("Items should not overlap")
dtype = np.dtype(
{
"names": ["element"],
"formats": [str(element_shape) + item_dtype.str],
"offsets": [0],
"itemsize": element_byte_stride,
}
)
byte_offset = accessor_byte_offset + element_byte_offset
if byte_offset % item_size != 0:
raise ValueError("Misaligned data")
byte_length = element_count * element_byte_stride
buffer_view = binary_data[byte_offset : byte_offset + byte_length].view(dtype)[
"element"
]
# Convert matrix data from column-major (OpenGL) to row-major order
if element_type in ("MAT2", "MAT3", "MAT4"):
buffer_view = np.transpose(buffer_view, (0, 2, 1))
return buffer_view
def _get_primitive_attribute(
self, primitive_attributes: Dict[str, Any], key: str, dtype
) -> Optional[np.ndarray]:
accessor_index = primitive_attributes.get(key)
if accessor_index is None:
return None
primitive_attribute = self._access_data(accessor_index)
if key == "JOINTS_0":
pass
elif dtype == np.uint8:
primitive_attribute /= 255.0
elif dtype == np.uint16:
primitive_attribute /= 65535.0
else:
if dtype != np.float32:
raise ValueError("Unexpected data type")
primitive_attribute = primitive_attribute.astype(dtype)
return primitive_attribute
def get_binary_data(self, buffer_index: int):
"""
Get the binary data from a buffer as a 1D numpy array of bytes.
This is implemented for explicit uri data buffers or the main GLB data
segment.
"""
buffer_ = self._buffers[buffer_index]
binary_data = self._binary_data.get(buffer_index)
if binary_data is None: # Lazily decode binary data
uri = buffer_.get("uri")
if not uri.startswith(_DATA_URI_PREFIX):
raise NotImplementedError("Unexpected URI type")
binary_data = b64decode(uri[len(_DATA_URI_PREFIX) :])
binary_data = np.frombuffer(binary_data, dtype=np.uint8)
self._binary_data[buffer_index] = binary_data
return binary_data
def get_texture_for_mesh(
self, primitive: Dict[str, Any], indices: torch.Tensor
) -> Optional[TexturesBase]:
"""
Get the texture object representing the given mesh primitive.
Args:
primitive: the mesh primitive being loaded.
indices: the face indices of the mesh
"""
attributes = primitive["attributes"]
vertex_colors = self._get_primitive_attribute(attributes, "COLOR_0", np.float32)
if vertex_colors is not None:
return TexturesVertex(torch.from_numpy(vertex_colors))
vertex_texcoords_0 = self._get_primitive_attribute(
attributes, "TEXCOORD_0", np.float32
)
if vertex_texcoords_0 is not None:
verts_uvs = torch.from_numpy(vertex_texcoords_0)
verts_uvs[:, 1] = 1 - verts_uvs[:, -1]
faces_uvs = indices
material_index = primitive.get("material", 0)
material = self._json_data["materials"][material_index]
material_roughness = material["pbrMetallicRoughness"]
if "baseColorTexture" in material_roughness:
texture_index = material_roughness["baseColorTexture"]["index"]
texture_json = self._json_data["textures"][texture_index]
# Todo - include baseColorFactor when also given
# Todo - look at the sampler
image_index = texture_json["source"]
map = self._get_texture_map_image(image_index)
elif "baseColorFactor" in material_roughness:
# Constant color?
map = torch.FloatTensor(material_roughness["baseColorFactor"])[
None, None, :3
]
texture = TexturesUV(
# pyre-fixme[61]: `map` may not be initialized here.
maps=[map], # alpha channel ignored
faces_uvs=[faces_uvs],
verts_uvs=[verts_uvs],
)
return texture
return None
def load(self, include_textures: bool) -> List[Tuple[Optional[str], Meshes]]:
"""
Attempt to load all the meshes making up the default scene from
the file as a list of possibly-named Meshes objects.
Args:
include_textures: Whether to try loading textures.
Returns:
Meshes object containing one mesh.
"""
if self._json_data is None:
raise ValueError("Initialization problem")
# This loads the default scene from the file.
# This is usually the only one.
# It is possible to have multiple scenes, in which case
# you could choose another here instead of taking the default.
scene_index = self._json_data.get("scene")
if scene_index is None:
raise ValueError("Default scene is not specified.")
scene = self._json_data["scenes"][scene_index]
nodes = self._json_data.get("nodes", [])
meshes = self._json_data.get("meshes", [])
root_node_indices = scene["nodes"]
mesh_transform = Transform3d()
names_meshes_list: List[Tuple[Optional[str], Meshes]] = []
# Keep track and apply the transform of the scene node to mesh vertices
Q = deque([(Transform3d(), node_index) for node_index in root_node_indices])
while Q:
parent_transform, current_node_index = Q.popleft()
current_node = nodes[current_node_index]
transform = _make_node_transform(current_node)
current_transform = transform.compose(parent_transform)
if "mesh" in current_node:
mesh_index = current_node["mesh"]
mesh = meshes[mesh_index]
mesh_name = mesh.get("name", None)
mesh_transform = current_transform
for primitive in mesh["primitives"]:
attributes = primitive["attributes"]
accessor_index = attributes["POSITION"]
positions = torch.from_numpy(
self._access_data(accessor_index).copy()
)
positions = mesh_transform.transform_points(positions)
mode = primitive.get("mode", _PrimitiveMode.TRIANGLES)
if mode != _PrimitiveMode.TRIANGLES:
raise NotImplementedError("Non triangular meshes")
if "indices" in primitive:
accessor_index = primitive["indices"]
indices = self._access_data(accessor_index).astype(np.int64)
else:
indices = np.arange(0, len(positions), dtype=np.int64)
indices = torch.from_numpy(indices.reshape(-1, 3))
texture = None
if include_textures:
texture = self.get_texture_for_mesh(primitive, indices)
mesh_obj = Meshes(
verts=[positions], faces=[indices], textures=texture
)
names_meshes_list.append((mesh_name, mesh_obj))
if "children" in current_node:
children_node_indices = current_node["children"]
Q.extend(
[
(current_transform, node_index)
for node_index in children_node_indices
]
)
return names_meshes_list
def load_meshes(
path: PathOrStr,
path_manager: PathManager,
include_textures: bool = True,
) -> List[Tuple[Optional[str], Meshes]]:
"""
Loads all the meshes from the default scene in the given GLB file.
and returns them separately.
Args:
path: path to read from
path_manager: PathManager object for interpreting the path
include_textures: whether to load textures
Returns:
List of (name, mesh) pairs, where the name is the optional name property
from the GLB file, or None if it is absent, and the mesh is a Meshes
object containing one mesh.
"""
with _open_file(path, path_manager, "rb") as f:
loader = _GLTFLoader(cast(BinaryIO, f))
names_meshes_list = loader.load(include_textures=include_textures)
return names_meshes_list
class MeshGlbFormat(MeshFormatInterpreter):
"""
Implements loading meshes from glTF 2 assets stored in a
GLB container file or a glTF JSON file with embedded binary data.
This implementation is quite restricted in what it supports.
- It does not try to validate the input against the standard.
- It loads the default scene only.
- Only triangulated geometry is supported.
- The geometry of all meshes of the entire scene is aggregated into a single mesh.
Use `load_meshes()` instead to get un-aggregated (but transformed) ones.
- All material properties are ignored except for either vertex color, baseColorTexture
or baseColorFactor. If available, one of these (in this order) is exclusively
used which does not match the semantics of the standard.
"""
def __init__(self) -> None:
self.known_suffixes = (".glb",)
def read(
self,
path: PathOrStr,
include_textures: bool,
device,
path_manager: PathManager,
**kwargs,
) -> Optional[Meshes]:
if not endswith(path, self.known_suffixes):
return None
names_meshes_list = load_meshes(
path=path,
path_manager=path_manager,
include_textures=include_textures,
)
meshes_list = [mesh for name, mesh in names_meshes_list]
mesh = join_meshes_as_scene(meshes_list)
return mesh.to(device)
def save(
self,
data: Meshes,
path: PathOrStr,
path_manager: PathManager,
binary: Optional[bool],
**kwargs,
) -> bool:
return False
| [
"torch.FloatTensor",
"torch.from_numpy"
] | 3 | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from pytorch3d import _C
def sample_pdf(
bins: torch.Tensor,
weights: torch.Tensor,
n_samples: int,
det: bool = False,
eps: float = 1e-5,
) -> torch.Tensor:
"""
Samples probability density functions defined by bin edges `bins` and
the non-negative per-bin probabilities `weights`.
Args:
bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins.
weights: Tensor of shape `(..., n_bins)` containing non-negative numbers
representing the probability of sampling the corresponding bin.
n_samples: The number of samples to draw from each set of bins.
det: If `False`, the sampling is random. `True` yields deterministic
uniformly-spaced sampling from the inverse cumulative density function.
eps: A constant preventing division by zero in case empty bins are present.
Returns:
samples: Tensor of shape `(..., n_samples)` containing `n_samples` samples
drawn from each probability distribution.
Refs:
[1] https://github.com/bmild/nerf/blob/55d8b00244d7b5178f4d003526ab6667683c9da9/run_nerf_helpers.py#L183 # noqa E501
"""
if torch.is_grad_enabled() and (bins.requires_grad or weights.requires_grad):
raise NotImplementedError("sample_pdf differentiability.")
if weights.min() <= -eps:
raise ValueError("Negative weights provided.")
batch_shape = bins.shape[:-1]
n_bins = weights.shape[-1]
if n_bins + 1 != bins.shape[-1] or weights.shape[:-1] != batch_shape:
shapes = f"{bins.shape}{weights.shape}"
raise ValueError("Inconsistent shapes of bins and weights: " + shapes)
output_shape = batch_shape + (n_samples,)
if det:
u = torch.linspace(0.0, 1.0, n_samples, device=bins.device, dtype=torch.float32)
output = u.expand(output_shape).contiguous()
else:
output = torch.rand(output_shape, dtype=torch.float32, device=bins.device)
# pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`.
_C.sample_pdf(
bins.reshape(-1, n_bins + 1),
weights.reshape(-1, n_bins),
output.reshape(-1, n_samples),
eps,
)
return output
def sample_pdf_python(
bins: torch.Tensor,
weights: torch.Tensor,
N_samples: int,
det: bool = False,
eps: float = 1e-5,
) -> torch.Tensor:
"""
This is a pure python implementation of the `sample_pdf` function.
It may be faster than sample_pdf when the number of bins is very large,
because it behaves as O(batchsize * [n_bins + log(n_bins) * n_samples] )
whereas sample_pdf behaves as O(batchsize * n_bins * n_samples).
For 64 bins sample_pdf is much faster.
Samples probability density functions defined by bin edges `bins` and
the non-negative per-bin probabilities `weights`.
Note: This is a direct conversion of the TensorFlow function from the original
release [1] to PyTorch. It requires PyTorch 1.6 or greater due to the use of
torch.searchsorted.
Args:
bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins.
weights: Tensor of shape `(..., n_bins)` containing non-negative numbers
representing the probability of sampling the corresponding bin.
N_samples: The number of samples to draw from each set of bins.
det: If `False`, the sampling is random. `True` yields deterministic
uniformly-spaced sampling from the inverse cumulative density function.
eps: A constant preventing division by zero in case empty bins are present.
Returns:
samples: Tensor of shape `(..., N_samples)` containing `N_samples` samples
drawn from each probability distribution.
Refs:
[1] https://github.com/bmild/nerf/blob/55d8b00244d7b5178f4d003526ab6667683c9da9/run_nerf_helpers.py#L183 # noqa E501
"""
# Get pdf
weights = weights + eps # prevent nans
if weights.min() <= 0:
raise ValueError("Negative weights provided.")
pdf = weights / weights.sum(dim=-1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1)
# Take uniform samples u of shape (..., N_samples)
if det:
u = torch.linspace(0.0, 1.0, N_samples, device=cdf.device, dtype=cdf.dtype)
u = u.expand(list(cdf.shape[:-1]) + [N_samples]).contiguous()
else:
u = torch.rand(
list(cdf.shape[:-1]) + [N_samples], device=cdf.device, dtype=cdf.dtype
)
# Invert CDF
inds = torch.searchsorted(cdf, u, right=True)
# inds has shape (..., N_samples) identifying the bin of each sample.
below = (inds - 1).clamp(0)
above = inds.clamp(max=cdf.shape[-1] - 1)
# Below and above are of shape (..., N_samples), identifying the bin
# edges surrounding each sample.
inds_g = torch.stack([below, above], -1).view(
*below.shape[:-1], below.shape[-1] * 2
)
cdf_g = torch.gather(cdf, -1, inds_g).view(*below.shape, 2)
bins_g = torch.gather(bins, -1, inds_g).view(*below.shape, 2)
# cdf_g and bins_g are of shape (..., N_samples, 2) and identify
# the cdf and the index of the two bin edges surrounding each sample.
denom = cdf_g[..., 1] - cdf_g[..., 0]
denom = torch.where(denom < eps, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
# t is of shape (..., N_samples) and identifies how far through
# each sample is in its bin.
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
| [
"torch.rand",
"torch.stack",
"torch.gather",
"torch.linspace",
"torch.zeros_like",
"torch.searchsorted",
"torch.ones_like",
"torch.is_grad_enabled",
"torch.cumsum"
] | 3 | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 |
3 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from common_testing import TestCaseMixin
from pytorch3d.loss import mesh_edge_loss
from pytorch3d.structures import Meshes
from test_sample_points_from_meshes import init_meshes
class TestMeshEdgeLoss(TestCaseMixin, unittest.TestCase):
def test_empty_meshes(self):
device = torch.device("cuda:0")
target_length = 0
N = 10
V = 32
verts_list = []
faces_list = []
for _ in range(N):
vn = torch.randint(3, high=V, size=(1,))[0].item()
verts = torch.rand((vn, 3), dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
mesh = Meshes(verts=verts_list, faces=faces_list)
loss = mesh_edge_loss(mesh, target_length=target_length)
self.assertClose(loss, torch.tensor([0.0], dtype=torch.float32, device=device))
self.assertTrue(loss.requires_grad)
@staticmethod
def mesh_edge_loss_naive(meshes, target_length: float = 0.0):
"""
Naive iterative implementation of mesh loss calculation.
"""
edges_packed = meshes.edges_packed()
verts_packed = meshes.verts_packed()
edge_to_mesh = meshes.edges_packed_to_mesh_idx()
N = len(meshes)
device = meshes.device
valid = meshes.valid
predlosses = torch.zeros((N,), dtype=torch.float32, device=device)
for b in range(N):
if valid[b] == 0:
continue
mesh_edges = edges_packed[edge_to_mesh == b]
verts_edges = verts_packed[mesh_edges]
num_edges = mesh_edges.size(0)
for e in range(num_edges):
v0, v1 = verts_edges[e, 0], verts_edges[e, 1]
predlosses[b] += ((v0 - v1).norm(dim=0, p=2) - target_length) ** 2.0
if num_edges > 0:
predlosses[b] = predlosses[b] / num_edges
return predlosses.mean()
def test_mesh_edge_loss_output(self):
"""
Check outputs of tensorized and iterative implementations are the same.
"""
device = torch.device("cuda:0")
target_length = 0.5
num_meshes = 10
num_verts = 32
num_faces = 64
verts_list = []
faces_list = []
valid = torch.randint(2, size=(num_meshes,))
for n in range(num_meshes):
if valid[n]:
vn = torch.randint(3, high=num_verts, size=(1,))[0].item()
fn = torch.randint(vn, high=num_faces, size=(1,))[0].item()
verts = torch.rand((vn, 3), dtype=torch.float32, device=device)
faces = torch.randint(
vn, size=(fn, 3), dtype=torch.int64, device=device
)
else:
verts = torch.tensor([], dtype=torch.float32, device=device)
faces = torch.tensor([], dtype=torch.int64, device=device)
verts_list.append(verts)
faces_list.append(faces)
meshes = Meshes(verts=verts_list, faces=faces_list)
loss = mesh_edge_loss(meshes, target_length=target_length)
predloss = TestMeshEdgeLoss.mesh_edge_loss_naive(meshes, target_length)
self.assertClose(loss, predloss)
@staticmethod
def mesh_edge_loss(num_meshes: int = 10, max_v: int = 100, max_f: int = 300):
meshes = init_meshes(num_meshes, max_v, max_f, device="cuda:0")
torch.cuda.synchronize()
def compute_loss():
mesh_edge_loss(meshes, target_length=0.0)
torch.cuda.synchronize()
return compute_loss
| [
"torch.zeros",
"torch.device",
"torch.rand",
"torch.cuda.synchronize",
"torch.randint",
"torch.tensor"
] | 3 | theycallmepeter/pytorch3d_PBR | bc83c23969ff7843fc05d2da001952b368926174 |
0.4 | from __future__ import absolute_import, division, print_function
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.infer import EmpiricalMarginal, TracePredictive
from pyro.infer.mcmc import MCMC, NUTS
from tests.common import assert_equal
def model(num_trials):
phi_prior = dist.Uniform(num_trials.new_tensor(0.), num_trials.new_tensor(1.))\
.expand_by([num_trials.shape[0]])
success_prob = pyro.sample("phi", phi_prior)
return pyro.sample("obs", dist.Binomial(num_trials, success_prob))
def test_posterior_predictive():
true_probs = torch.ones(5) * 0.7
num_trials = torch.ones(5) * 1000
num_success = dist.Binomial(num_trials, true_probs).sample()
conditioned_model = poutine.condition(model, data={"obs": num_success})
nuts_kernel = NUTS(conditioned_model, adapt_step_size=True)
mcmc_run = MCMC(nuts_kernel, num_samples=1000, warmup_steps=200).run(num_trials)
posterior_predictive = TracePredictive(model, mcmc_run, num_samples=10000).run(num_trials)
marginal_return_vals = EmpiricalMarginal(posterior_predictive)
assert_equal(marginal_return_vals.mean, torch.ones(5) * 700, prec=30)
| [
"torch.ones"
] | 0.4.0 | neerajprad/pyro | 3b5b2c5de208209365bf26f239f12521de68acc4 |
1.4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from unittest import mock
import torch
from botorch import settings
from botorch.acquisition.monte_carlo import (
MCAcquisitionFunction,
qExpectedImprovement,
qNoisyExpectedImprovement,
qProbabilityOfImprovement,
qSimpleRegret,
qUpperConfidenceBound,
)
from botorch.acquisition.objective import ScalarizedObjective
from botorch.exceptions import BotorchWarning, UnsupportedError
from botorch.sampling.samplers import IIDNormalSampler, SobolQMCNormalSampler
from botorch.utils.testing import BotorchTestCase, MockModel, MockPosterior
class DummyMCAcquisitionFunction(MCAcquisitionFunction):
def forward(self, X):
pass
class TestMCAcquisitionFunction(BotorchTestCase):
def test_abstract_raises(self):
with self.assertRaises(TypeError):
MCAcquisitionFunction()
# raise if model is multi-output, but no objective is given
no = "botorch.utils.testing.MockModel.num_outputs"
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 2
mm = MockModel(MockPosterior())
with self.assertRaises(UnsupportedError):
DummyMCAcquisitionFunction(model=mm)
class TestQExpectedImprovement(BotorchTestCase):
def test_q_expected_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# TODO: Test batched best_f, batched model, batched evaluation
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
# test bad objective type
obj = ScalarizedObjective(
weights=torch.rand(2, device=self.device, dtype=dtype)
)
with self.assertRaises(UnsupportedError):
qExpectedImprovement(model=mm, best_f=0, sampler=sampler, objective=obj)
def test_q_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 2 x 1
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test shifting best_f value
acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 2.0)
self.assertEqual(res[1].item(), 1.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQNoisyExpectedImprovement(BotorchTestCase):
def test_q_noisy_expected_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 2 x 1
samples_noisy = torch.tensor([1.0, 0.0], device=self.device, dtype=dtype)
samples_noisy = samples_noisy.view(1, 2, 1)
# X_baseline is `q' x d` = 1 x 1
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res.item(), 1.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
sampler = SobolQMCNormalSampler(num_samples=2)
samples_noisy_pending = torch.tensor(
[1.0, 0.0, 0.0], device=self.device, dtype=dtype
)
samples_noisy_pending = samples_noisy_pending.view(1, 3, 1)
mm_noisy_pending = MockModel(MockPosterior(samples=samples_noisy_pending))
acqf = qNoisyExpectedImprovement(
model=mm_noisy_pending, X_baseline=X_baseline, sampler=sampler
)
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_noisy_expected_improvement_batch(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 2 x 3 x 1
samples_noisy = torch.zeros(2, 3, 1, device=self.device, dtype=dtype)
samples_noisy[0, 0, 0] = 1.0
mm_noisy = MockModel(MockPosterior(samples=samples_noisy))
# X is `q x d` = 1 x 1
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test X_pending w/ batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True, seed=12345)
acqf = qNoisyExpectedImprovement(
model=mm_noisy, X_baseline=X_baseline, sampler=sampler
)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 3, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
def test_prune_baseline(self):
no = "botorch.utils.testing.MockModel.num_outputs"
prune = "botorch.acquisition.monte_carlo.prune_inferior_points"
for dtype in (torch.float, torch.double):
X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
mock_num_outputs.return_value = 1
mm = MockModel(mock.Mock())
with mock.patch(prune, return_value=X_pruned) as mock_prune:
acqf = qNoisyExpectedImprovement(
model=mm, X_baseline=X_baseline, prune_baseline=True
)
mock_prune.assert_called_once()
self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
# TODO: Test different objectives (incl. constraints)
class TestQProbabilityOfImprovement(BotorchTestCase):
def test_q_probability_of_improvement(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_probability_of_improvement_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.5)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQSimpleRegret(BotorchTestCase):
def test_q_simple_regret(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_simple_regret_batch(self):
# the event shape is `b x q x t` = 2 x 2 x 1
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qSimpleRegret(model=mm, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# TODO: Test different objectives (incl. constraints)
class TestQUpperConfidenceBound(BotorchTestCase):
def test_q_upper_confidence_bound(self):
for dtype in (torch.float, torch.double):
# the event shape is `b x q x t` = 1 x 1 x 1
samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
mm = MockModel(MockPosterior(samples=samples))
# X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
X = torch.zeros(1, 1, device=self.device, dtype=dtype)
# basic test
sampler = IIDNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
# basic test, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
res = acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# basic test, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res.item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertEqual(acqf.X_pending, X)
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
def test_q_upper_confidence_bound_batch(self):
# TODO: T41739913 Implement tests for all MCAcquisitionFunctions
for dtype in (torch.float, torch.double):
samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
samples[0, 0, 0] = 1.0
mm = MockModel(MockPosterior(samples=samples))
# X is a dummy and unused b/c of mocking
X = torch.zeros(2, 1, 1, device=self.device, dtype=dtype)
# test batch mode
sampler = IIDNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# test batch mode, no resample
sampler = IIDNormalSampler(num_samples=2, seed=12345)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, no resample
sampler = SobolQMCNormalSampler(num_samples=2)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X)
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
# test batch mode, qmc, resample
sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
res = acqf(X) # 1-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X)
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
res = acqf(X.expand(2, 1, 1)) # 2-dim batch
self.assertEqual(res[0].item(), 1.0)
self.assertEqual(res[1].item(), 0.0)
# the base samples should have the batch dim collapsed
self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
bs = acqf.sampler.base_samples.clone()
acqf(X.expand(2, 1, 1))
self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
# basic test for X_pending and warning
acqf.set_X_pending()
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(None)
self.assertIsNone(acqf.X_pending)
acqf.set_X_pending(X)
self.assertTrue(torch.equal(acqf.X_pending, X))
res = acqf(X)
X2 = torch.zeros(
1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
)
with warnings.catch_warnings(record=True) as ws, settings.debug(True):
acqf.set_X_pending(X2)
self.assertEqual(acqf.X_pending, X2)
self.assertEqual(len(ws), 1)
self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
# TODO: Test different objectives (incl. constraints)
| [
"torch.zeros",
"torch.rand",
"torch.Size",
"torch.tensor",
"torch.equal"
] | 1.4 | dme65/botorch | 508f215bfe987373924e39444c8fb544d5132178 |
0.4 | import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torchvision import models
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from miscc.config import cfg
from GlobalAttention import GlobalAttentionGeneral as ATT_NET
# FIXME change this to mutable variables.
MAX_OBJECTS = cfg.MAX_OBJECTS
def Level2RNNEncodeMagic(captions, cap_lens, low_level_rnn, high_level_rnn):
''' You are not expected to understand this. '''
cap_mask = (cap_lens > 0)
actual_captions = captions[cap_mask] # -> (? x n_words)
actual_cap_lens = cap_lens[cap_mask] # -> (?, )
L_hidden = low_level_rnn.init_hidden(actual_captions.size(0))
_, actual_cap_embs = low_level_rnn(actual_captions, actual_cap_lens, L_hidden)
cap_embs = torch.zeros(captions.size(0), cfg.TEXT.MAX_QA_NUM, actual_cap_embs.size(-1)).cuda()
cap_embs[cap_mask] = actual_cap_embs
num_caps = cap_mask.sum(1)
H_hidden = high_level_rnn.init_hidden(captions.size(0))
per_cap_embs, avg_cap_embs = high_level_rnn(cap_embs, num_caps, H_hidden)
return per_cap_embs, avg_cap_embs, num_caps
def stn(image, transformation_matrix, size):
grid = torch.nn.functional.affine_grid(transformation_matrix, torch.Size(size))
out_image = torch.nn.functional.grid_sample(image, grid)
return out_image
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * torch.sigmoid(x[:, nc:])
def conv1x1(in_planes, out_planes, bias=False):
"1x1 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1,
padding=0, bias=bias)
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
# nn.functional.interpolate(scale_factor=2, mode="nearest"),
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU())
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num))
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
class BBOX_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(BBOX_NET, self).__init__()
self.c_dim = cfg.GAN.CONDITION_DIM
self.encode = nn.Sequential(
# 128 * 16 x 16
conv3x3(self.c_dim, self.c_dim // 2, stride=2),
nn.LeakyReLU(0.2, inplace=True),
# 64 x 8 x 8
conv3x3(self.c_dim // 2, self.c_dim // 4, stride=2),
nn.BatchNorm2d(self.c_dim // 4),
nn.LeakyReLU(0.2, inplace=True),
# 32 x 4 x 4
conv3x3(self.c_dim // 4, self.c_dim // 8, stride=2),
nn.BatchNorm2d(self.c_dim // 8),
nn.LeakyReLU(0.2, inplace=True),
# 16 x 2 x 2
)
def forward(self, labels, transf_matr_inv):
label_layout = torch.cuda.FloatTensor(labels.shape[0], self.c_dim, 16, 16).fill_(0)
for idx in range(MAX_OBJECTS):
current_label = labels[:, idx]
current_label = current_label.view(current_label.shape[0], current_label.shape[1], 1, 1)
current_label = current_label.repeat(1, 1, 16, 16)
current_label = stn(current_label, transf_matr_inv[:, idx], current_label.shape)
label_layout += current_label
layout_encoding = self.encode(label_layout).view(labels.shape[0], -1)
return layout_encoding
# ############## Text2Image Encoder-Decoder #######
class HigherLevelRNN(nn.Module):
def __init__(self, ninput=256, drop_prob=0.5,
nhidden=128, nlayers=1, bidirectional=True):
super(HigherLevelRNN, self).__init__()
self.n_steps = cfg.TEXT.MAX_QA_NUM
self.ninput = ninput # size of each embedding vector
self.drop_prob = drop_prob # probability of an element to be zeroed
self.nlayers = nlayers # Number of recurrent layers
self.bidirectional = bidirectional
self.rnn_type = cfg.RNN_TYPE
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
# number of features in the hidden state
self.nhidden = nhidden // self.num_directions
self.define_module()
def define_module(self):
if self.rnn_type == 'LSTM':
# dropout: If non-zero, introduces a dropout layer on
# the outputs of each RNN layer except the last layer
self.rnn = nn.LSTM(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
elif self.rnn_type == 'GRU':
self.rnn = nn.GRU(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
else:
raise NotImplementedError
def init_weights(self):
return
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()),
Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()))
else:
return Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_())
def forward(self, emb, cap_lens, hidden, mask=None):
# input: torch.LongTensor of size batch x n_steps
# emb: batch x n_steps x ninput
# Returns: a PackedSequence object
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(emb, cap_lens, batch_first=True)
# #hidden and memory (num_layers * num_directions, batch, hidden_size):
# tensor containing the initial hidden state for each element in batch.
# #output (batch, seq_len, hidden_size * num_directions)
# #or a PackedSequence object:
# tensor containing output features (h_t) from the last layer of RNN
output, hidden = self.rnn(emb, hidden)
# PackedSequence object
# --> (batch, seq_len, hidden_size * num_directions)
output = pad_packed_sequence(output, batch_first=True)[0]
# output = self.drop(output)
# --> batch x hidden_size*num_directions x seq_len
words_emb = output.transpose(1, 2)
# --> batch x num_directions*hidden_size
if self.rnn_type == 'LSTM':
sent_emb = hidden[0].transpose(0, 1).contiguous()
else:
sent_emb = hidden.transpose(0, 1).contiguous()
sent_emb = sent_emb.view(-1, self.nhidden * self.num_directions)
return words_emb, sent_emb
class RNN_ENCODER(nn.Module):
def __init__(self, ntoken, ninput=300, drop_prob=0.5,
nhidden=128, nlayers=1, bidirectional=True):
super(RNN_ENCODER, self).__init__()
self.n_steps = cfg.TEXT.WORDS_NUM
self.ntoken = ntoken # size of the dictionary
self.ninput = ninput # size of each embedding vector
self.drop_prob = drop_prob # probability of an element to be zeroed
self.nlayers = nlayers # Number of recurrent layers
self.bidirectional = bidirectional
self.rnn_type = cfg.RNN_TYPE
if bidirectional:
self.num_directions = 2
else:
self.num_directions = 1
# number of features in the hidden state
self.nhidden = nhidden // self.num_directions
self.define_module()
self.init_weights()
def define_module(self):
self.encoder = nn.Embedding(self.ntoken, self.ninput)
self.drop = nn.Dropout(self.drop_prob)
if self.rnn_type == 'LSTM':
# dropout: If non-zero, introduces a dropout layer on
# the outputs of each RNN layer except the last layer
self.rnn = nn.LSTM(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
elif self.rnn_type == 'GRU':
self.rnn = nn.GRU(self.ninput, self.nhidden,
self.nlayers, batch_first=True,
dropout=self.drop_prob,
bidirectional=self.bidirectional)
else:
raise NotImplementedError
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
# Do not need to initialize RNN parameters, which have been initialized
# http://pytorch.org/docs/master/_modules/torch/nn/modules/rnn.html#LSTM
# self.decoder.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.fill_(0)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()),
Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_()))
else:
return Variable(weight.new(self.nlayers * self.num_directions,
bsz, self.nhidden).zero_())
def forward(self, captions, cap_lens, hidden, mask=None):
# input: torch.LongTensor of size batch x n_steps
# --> emb: batch x n_steps x ninput
emb = self.drop(self.encoder(captions))
#
# Returns: a PackedSequence object
cap_lens = cap_lens.data.tolist()
emb = pack_padded_sequence(emb, cap_lens, batch_first=True, enforce_sorted=False)
# #hidden and memory (num_layers * num_directions, batch, hidden_size):
# tensor containing the initial hidden state for each element in batch.
# #output (batch, seq_len, hidden_size * num_directions)
# #or a PackedSequence object:
# tensor containing output features (h_t) from the last layer of RNN
output, hidden = self.rnn(emb, hidden)
# PackedSequence object
# --> (batch, seq_len, hidden_size * num_directions)
output = pad_packed_sequence(output, batch_first=True)[0]
# output = self.drop(output)
# --> batch x hidden_size*num_directions x seq_len
words_emb = output.transpose(1, 2)
# --> batch x num_directions*hidden_size
if self.rnn_type == 'LSTM':
sent_emb = hidden[0].transpose(0, 1).contiguous()
else:
sent_emb = hidden.transpose(0, 1).contiguous()
sent_emb = sent_emb.view(-1, self.nhidden * self.num_directions)
return words_emb, sent_emb
class CNN_ENCODER(nn.Module):
def __init__(self, nef):
super(CNN_ENCODER, self).__init__()
if cfg.TRAIN.FLAG:
self.nef = nef
else:
self.nef = 256 # define a uniform ranker
model = models.inception_v3()
url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
model.load_state_dict(model_zoo.load_url(url))
for param in model.parameters():
param.requires_grad = False
print('Load pretrained model from ', url)
# print(model)
self.define_module(model)
self.init_trainable_weights()
def define_module(self, model):
self.Conv2d_1a_3x3 = model.Conv2d_1a_3x3
self.Conv2d_2a_3x3 = model.Conv2d_2a_3x3
self.Conv2d_2b_3x3 = model.Conv2d_2b_3x3
self.Conv2d_3b_1x1 = model.Conv2d_3b_1x1
self.Conv2d_4a_3x3 = model.Conv2d_4a_3x3
self.Mixed_5b = model.Mixed_5b
self.Mixed_5c = model.Mixed_5c
self.Mixed_5d = model.Mixed_5d
self.Mixed_6a = model.Mixed_6a
self.Mixed_6b = model.Mixed_6b
self.Mixed_6c = model.Mixed_6c
self.Mixed_6d = model.Mixed_6d
self.Mixed_6e = model.Mixed_6e
self.Mixed_7a = model.Mixed_7a
self.Mixed_7b = model.Mixed_7b
self.Mixed_7c = model.Mixed_7c
self.emb_features = conv1x1(768, self.nef)
self.emb_cnn_code = nn.Linear(2048, self.nef)
def init_trainable_weights(self):
initrange = 0.1
self.emb_features.weight.data.uniform_(-initrange, initrange)
self.emb_cnn_code.weight.data.uniform_(-initrange, initrange)
def forward(self, x):
features = None
# --> fixed-size input: batch x 3 x 299 x 299
# x = nn.functional.interpolate(x, size=(299, 299), mode='bilinear')
x = nn.Upsample(size=(299, 299), mode='bilinear')(x)
# 299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.Mixed_5b(x)
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 35 x 35 x 288
x = self.Mixed_6a(x)
# 17 x 17 x 768
x = self.Mixed_6b(x)
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
# image region features
features = x
# 17 x 17 x 768
x = self.Mixed_7a(x)
# 8 x 8 x 1280
x = self.Mixed_7b(x)
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
# x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
# global image features
cnn_code = self.emb_cnn_code(x)
# 512
if features is not None:
features = self.emb_features(features)
return features, cnn_code
# ############## G networks ###################
class SimpleLrPhiProjector(nn.Module):
''' a fake CA_NET. '''
def __init__(self):
super(SimpleLrPhiProjector, self).__init__()
self.t_dim = cfg.TEXT.EMBEDDING_DIM
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim, bias=True)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.fc(x)), None, None
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = cfg.TEXT.EMBEDDING_DIM
self.c_dim = cfg.GAN.CONDITION_DIM
self.fc = nn.Linear(self.t_dim, self.c_dim * 4, bias=True)
self.relu = GLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.c_dim]
logvar = x[:, self.c_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class INIT_STAGE_G(nn.Module):
def __init__(self, ngf, ncf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.in_dim = cfg.GAN.Z_DIM + ncf # cfg.TEXT.EMBEDDING_DIM
self.define_module()
def define_module(self):
nz, ngf = self.in_dim, self.gf_dim
linput = 100+cfg.GAN.LABEL_DIM
self.ef_dim = 100
self.bbox_net = BBOX_NET()
nz += 48
self.fc = nn.Sequential(
nn.Linear(nz, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
# local pathway
self.label = nn.Sequential(
nn.Linear(linput, self.ef_dim, bias=False),
nn.BatchNorm1d(self.ef_dim),
nn.ReLU(True))
self.local1 = upBlock(self.ef_dim, ngf // 2)
self.local2 = upBlock(ngf // 2, ngf // 4)
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 2, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code, transf_matrices_inv, label_one_hot):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param c_code: batch x cfg.TEXT.EMBEDDING_DIM
:return: batch x ngf/16 x 64 x 64
"""
local_labels = torch.cuda.FloatTensor(z_code.shape[0], MAX_OBJECTS, self.ef_dim).fill_(0)
# local pathway
h_code_locals = torch.cuda.FloatTensor(z_code.shape[0], self.gf_dim // 4, 16, 16).fill_(0)
for idx in range(MAX_OBJECTS):
current_label = self.label(torch.cat((c_code, label_one_hot[:, idx]), 1))
local_labels[:, idx] = current_label
current_label = current_label.view(current_label.shape[0], self.ef_dim, 1, 1)
current_label = current_label.repeat(1, 1, 4, 4)
h_code_local = self.local1(current_label)
h_code_local = self.local2(h_code_local)
h_code_local = stn(h_code_local, transf_matrices_inv[:, idx], h_code_local.shape)
h_code_locals += h_code_local
bbox_code = self.bbox_net(local_labels, transf_matrices_inv)
c_z_code = torch.cat((c_code, z_code, bbox_code), 1)
# c_z_code = torch.cat((c_code, z_code), 1)
# state size ngf x 4 x 4
out_code = self.fc(c_z_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size ngf/3 x 8 x 8
out_code = self.upsample1(out_code)
# state size ngf/4 x 16 x 16
out_code = self.upsample2(out_code)
# combine local and global pathways
out_code = torch.cat((out_code, h_code_locals), 1)
# state size ngf/8 x 32 x 32
out_code32 = self.upsample3(out_code)
# state size ngf/16 x 64 x 64
out_code64 = self.upsample4(out_code32)
return out_code64
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, nef, ncf):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
self.ef_dim = nef
self.cf_dim = ncf
self.num_residual = cfg.GAN.R_NUM
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(cfg.GAN.R_NUM):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
self.att = ATT_NET(ngf, self.ef_dim)
self.jointConv = Block3x3_relu(ngf + 100, ngf) # FIXME del
self.residual = self._make_layer(ResBlock, ngf) # FIXME ngf * 2
# self.upsample = upBlock(ngf * 2, ngf) # FIXME
self.upsample = upBlock(ngf , ngf)
def forward(self, h_code, c_code, word_embs, mask):
"""
h_code1(query): batch x idf x ih x iw (queryL=ihxiw)
word_embs(context): batch x cdf x sourceL (sourceL=seq_len)
c_code1: batch x idf x queryL
att1: batch x sourceL x queryL
"""
self.att.applyMask(mask)
_c_code, att = self.att(h_code, word_embs)
# FIXME del
s_size = h_code.size(2)
c_code = c_code.view(-1, 100, 1, 1)
c_code = c_code.repeat(1, 1, s_size, s_size)
h_c_code = torch.cat((h_code, c_code), 1)
h_c_code = self.jointConv(h_c_code) # FIXME del
out_code = self.residual(h_c_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code, att
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self):
super(G_NET, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
if cfg.GAN.B_CA_NET:
self.ca_net = CA_NET()
else:
self.ca_net = SimpleLrPhiProjector()
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
self.img_net1 = GET_IMAGE_G(ngf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net2 = GET_IMAGE_G(ngf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net3 = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask, transf_matrices_inv, label_one_hot):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
fake_imgs = []
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code1 = self.h_net1(z_code, c_code, transf_matrices_inv, label_one_hot)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if cfg.TREE.BRANCH_NUM > 1:
h_code2, att1 = \
self.h_net2(h_code1, c_code, word_embs, mask)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code3, att2 = \
self.h_net3(h_code2, c_code, word_embs, mask)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if att2 is not None:
att_maps.append(att2)
return fake_imgs, att_maps, mu, logvar
class G_DCGAN(nn.Module):
def __init__(self):
super(G_DCGAN, self).__init__()
ngf = cfg.GAN.GF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
ncf = cfg.GAN.CONDITION_DIM
self.ca_net = CA_NET()
# 16gf x 64 x 64 --> gf x 64 x 64 --> 3 x 64 x 64
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(ngf * 16, ncf)
# gf x 64 x 64
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(ngf, nef, ncf)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(ngf, nef, ncf)
self.img_net = GET_IMAGE_G(ngf)
def forward(self, z_code, sent_emb, word_embs, mask):
"""
:param z_code: batch x cfg.GAN.Z_DIM
:param sent_emb: batch x cfg.TEXT.EMBEDDING_DIM
:param word_embs: batch x cdf x seq_len
:param mask: batch x seq_len
:return:
"""
att_maps = []
c_code, mu, logvar = self.ca_net(sent_emb)
if cfg.TREE.BRANCH_NUM > 0:
h_code = self.h_net1(z_code, c_code)
if cfg.TREE.BRANCH_NUM > 1:
h_code, att1 = self.h_net2(h_code, c_code, word_embs, mask)
if att1 is not None:
att_maps.append(att1)
if cfg.TREE.BRANCH_NUM > 2:
h_code, att2 = self.h_net3(h_code, c_code, word_embs, mask)
if att2 is not None:
att_maps.append(att2)
fake_imgs = self.img_net(h_code)
return [fake_imgs], att_maps, mu, logvar
# ############## D networks ##########################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
class D_GET_LOGITS(nn.Module):
def __init__(self, ndf, nef, bcondition=False):
super(D_GET_LOGITS, self).__init__()
self.df_dim = ndf
self.ef_dim = nef
self.bcondition = bcondition
if self.bcondition:
self.jointConv = Block3x3_leakRelu(ndf * 8 + nef, ndf * 8)
self.outlogits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, h_code, c_code=None):
if self.bcondition and c_code is not None:
# conditioning output
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((h_code, c_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = h_code
output = self.outlogits(h_c_code)
return output.view(-1)
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET64, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
self.define_module()
def define_module(self):
self.act = nn.LeakyReLU(0.2, inplace=True)
ndf = cfg.GAN.DF_DIM
# global pathway
# --> state size. ndf x in_size/2 x in_size/2
self.conv1 = nn.Conv2d(3, ndf, 4, 2, 1, bias=False)
# --> state size 2ndf x x in_size/4 x in_size/4
self.conv2 = nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(ndf * 2)
# --> state size 4ndf x in_size/8 x in_size/8
self.conv3 = nn.Conv2d(ndf * 4, ndf * 4, 4, 2, 1, bias=False)
self.bn3 = nn.BatchNorm2d(ndf * 4)
# --> state size 8ndf x in_size/16 x in_size/16
self.conv4 = nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False)
self.bn4 = nn.BatchNorm2d(ndf * 8)
# local pathway
self.local = nn.Sequential(
nn.Conv2d(3 + cfg.GAN.LABEL_DIM, ndf * 2, 4, 1, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True)
)
def forward(self, image, label, transf_matrices, transf_matrices_inv):
# local pathway
h_code_locals = torch.cuda.FloatTensor(image.shape[0], cfg.GAN.DF_DIM * 2, 16, 16).fill_(0)
for idx in range(MAX_OBJECTS):
current_label = label[:, idx].view(label.shape[0], cfg.GAN.LABEL_DIM, 1, 1)
current_label = current_label.repeat(1, 1, 16, 16)
h_code_local = stn(image, transf_matrices[:, idx], (image.shape[0], image.shape[1], 16, 16))
h_code_local = torch.cat((h_code_local, current_label), 1)
h_code_local = self.local(h_code_local)
h_code_local = stn(h_code_local, transf_matrices_inv[:, idx],
(h_code_local.shape[0], h_code_local.shape[1], 16, 16))
h_code_locals += h_code_local
h_code = self.conv1(image)
h_code = self.act(h_code)
h_code = self.conv2(h_code)
h_code = self.bn2(h_code)
h_code = self.act(h_code)
h_code = torch.cat((h_code, h_code_locals), 1)
h_code = self.conv3(h_code)
h_code = self.bn3(h_code)
h_code = self.act(h_code)
h_code = self.conv4(h_code)
h_code = self.bn4(h_code)
x_code4 = self.act(h_code)
return x_code4
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET128, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
#
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code8 = self.img_code_s16(x_var) # 8 x 8 x 8df
x_code4 = self.img_code_s32(x_code8) # 4 x 4 x 16df
x_code4 = self.img_code_s32_1(x_code4) # 4 x 4 x 8df
return x_code4
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self, b_jcu=True):
super(D_NET256, self).__init__()
ndf = cfg.GAN.DF_DIM
nef = cfg.TEXT.EMBEDDING_DIM
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
if b_jcu:
self.UNCOND_DNET = D_GET_LOGITS(ndf, nef, bcondition=False)
else:
self.UNCOND_DNET = None
self.COND_DNET = D_GET_LOGITS(ndf, nef, bcondition=True)
def forward(self, x_var):
x_code16 = self.img_code_s16(x_var)
x_code8 = self.img_code_s32(x_code16)
x_code4 = self.img_code_s64(x_code8)
x_code4 = self.img_code_s64_1(x_code4)
x_code4 = self.img_code_s64_2(x_code4)
return x_code4
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.LSTM",
"torch.nn.GRU",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.utils.model_zoo.load_url",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.cuda.FloatTensor",
"torch.Size",
"torch.sigmoid",
"torch.nn.functional.avg_pool2d",
"torch.autograd.Variable",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.nn.Sigmoid",
"torch.nn.Upsample",
"torch.nn.functional.grid_sample",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.BatchNorm1d",
"torch.nn.functional.max_pool2d",
"torch.nn.Embedding"
] | 0.4.1 | niwtr/VQA-GAN | 61275bf7e5b3f37fd8fbc0ec9ce4e0045343e299 |
1.0 | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.mtransformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.mtransformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_field, feat_fields, for_encoder=True):
"""
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
emb_dim = opt.src_word_vec_size if for_encoder else opt.tgt_word_vec_size
word_padding_idx = word_field.vocab.stoi[word_field.pad_token]
num_word_embeddings = len(word_field.vocab)
feat_pad_indices = [ff.vocab.stoi[ff.pad_token] for ff in feat_fields]
num_feat_embeddings = [len(ff.vocab) for ff in feat_fields]
emb = Embeddings(
word_vec_size=emb_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feat_pad_indices,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam"
)
return emb
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
encoder = TransformerEncoder(
opt.model_mode,
opt.model_mode2,
opt.model_ffn_mode,
opt.enc_layers,
opt.enc_rnn_size,
opt.heads,
opt.transformer_ff,
opt.dropout,
embeddings
)
elif opt.encoder_type == "cnn":
encoder = CNNEncoder(
opt.enc_layers,
opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout,
embeddings)
elif opt.encoder_type == "mean":
encoder = MeanEncoder(opt.enc_layers, embeddings)
else:
encoder = RNNEncoder(
opt.rnn_type,
opt.brnn,
opt.enc_layers,
opt.enc_rnn_size,
opt.dropout,
embeddings,
opt.bridge
)
return encoder
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
decoder = TransformerDecoder(
opt.model_mode,
opt.model_mode2,
opt.model_ffn_mode,
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.transformer_ff,
opt.global_attention,
opt.copy_attn,
opt.self_attn_type,
opt.dropout,
embeddings
)
elif opt.decoder_type == "cnn":
decoder = CNNDecoder(
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.copy_attn,
opt.cnn_kernel_width,
opt.dropout,
embeddings
)
else:
dec_class = InputFeedRNNDecoder if opt.input_feed else StdRNNDecoder
decoder = dec_class(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn
)
return decoder
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
#if not hasattr(model_opt, 'model_mode'):
model_opt.model_mode = opt.model_mode
model_opt.model_mode2 = opt.model_mode2
model_opt.model_ffn_mode = opt.model_ffn_mode
print(
"[onmt.model_builder.py] model_opt.model_mode: {}, model_opt.model_mode2: {}, model_opt.model_ffn_mode: {}"
.format(model_opt.model_mode, model_opt.model_mode2, model_opt.model_ffn_mode)
)
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
"Unsupported model type %s" % model_opt.model_type
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
# Build encoder.
if model_opt.model_type == "text":
feat_fields = [fields[k]
for k in inputters.collect_features(fields, 'src')]
src_emb = build_embeddings(model_opt, fields["src"], feat_fields)
#print("[build_base_model in onmt.model_builder.py] fields[\"src\"]: {}".format(fields["src"]))
#print("[build_base_model in onmt.model_builder.py] feat_fields: {}".format(feat_fields))
#print("[build_base_model in onmt.model_builder.py] src_emb: {}".format(src_emb))
print(
"[onmt.model_builder.py] model_opt.model_mode: {}, model_opt.model_mode2: {}, model_opt.model_ffn_mode: {}"
.format(model_opt.model_mode, model_opt.model_mode2, model_opt.model_ffn_mode)
)
encoder = build_encoder(model_opt, src_emb)
elif model_opt.model_type == "img":
# why is build_encoder not used here?
# why is the model_opt.__dict__ check necessary?
if "image_channel_size" not in model_opt.__dict__:
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(
model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size
)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(
model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size
)
# Build decoder.
feat_fields = [fields[k]
for k in inputters.collect_features(fields, 'tgt')]
tgt_emb = build_embeddings(
model_opt, fields["tgt"], feat_fields, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
assert fields['src'].vocab == fields['tgt'].vocab, \
"preprocess with -share_vocab if you use share_embeddings"
tgt_emb.word_lut.weight = src_emb.word_lut.weight
decoder = build_decoder(model_opt, tgt_emb)
decoder2 = build_decoder(model_opt, tgt_emb)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
# model = onmt.models.NMTModel(encoder, decoder)
model = onmt.models.KTransformerModel(encoder, decoder, decoder2)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
vocab_size = len(fields["tgt"].vocab)
pad_idx = fields["tgt"].vocab.stoi[fields["tgt"].pad_token]
generator = CopyGenerator(model_opt.dec_rnn_size, vocab_size, pad_idx)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = {fix_key(k): v
for k, v in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
return model
| [
"torch.nn.LogSoftmax",
"torch.device",
"torch.nn.init.xavier_uniform_",
"torch.load"
] | 1.0 | miyamotost/ITDD-with-DialogueAct | 827b8b27bacb8a48fea479e709c39eaee3610552 |
1.0 | """ Optimizers class """
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
from onmt.utils import use_gpu
import operator
import functools
from copy import copy
from math import sqrt
def build_optim(model, opt, checkpoint):
""" Build optimizer """
saved_optimizer_state_dict = None
if opt.train_from and opt.reset_optim != 'all':
optim = checkpoint['optim']
# We need to save a copy of optim.optimizer.state_dict() for setting
# the, optimizer state later on in Stage 2 in this method, since
# the method optim.set_parameters(model) will overwrite
# optim.optimizer, and with ith the values stored in
# optim.optimizer.state_dict()
if opt.reset_optim != 'states':
saved_optimizer_state_dict = optim.optimizer.state_dict()
if opt.reset_optim == 'keep_states':
optim.method = opt.optim
optim.learning_rate = opt.learning_rate
optim.original_lr = opt.learning_rate
optim.max_grad_norm = opt.max_grad_norm
optim.lr_decay = opt.learning_rate_decay
optim.start_decay_steps = opt.start_decay_steps
optim.decay_steps = opt.decay_steps
optim.betas = [opt.adam_beta1, opt.adam_beta2]
optim.adagrad_accum = opt.adagrad_accumulator_init
optim.decay_method = opt.decay_method
optim.warmup_steps = opt.warmup_steps
optim.model_size = opt.rnn_size
else:
optim = Optimizer(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_steps=opt.start_decay_steps,
decay_steps=opt.decay_steps,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
# Stage 1:
# Essentially optim.set_parameters (re-)creates and optimizer using
# model.paramters() as parameters that will be stored in the
# optim.optimizer.param_groups field of the torch optimizer class.
# Importantly, this method does not yet load the optimizer state, as
# essentially it builds a new optimizer with empty optimizer state and
# parameters from the model.
optim.set_parameters(model)
if opt.train_from and (opt.reset_optim in ['none', 'keep_states']):
# Stage 2: In this stage, which is only performed when loading an
# optimizer from a checkpoint, we load the saved_optimizer_state_dict
# into the re-created optimizer, to set the optim.optimizer.state
# field, which was previously empty. For this, we use the optimizer
# state saved in the "saved_optimizer_state_dict" variable for
# this purpose.
# See also: https://github.com/pytorch/pytorch/issues/2830
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
# Convert back the state values to cuda type if applicable
if use_gpu(opt):
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
# We want to make sure that indeed we have a non-empty optimizer state
# when we loaded an existing model. This should be at least the case
# for Adam, which saves "exp_avg" and "exp_avg_sq" state
# (Exponential moving average of gradient and squared gradient values)
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
return optim
class MultipleOptimizer(object):
""" Implement multiple optimizers needed for sparse adam """
def __init__(self, op):
""" ? """
self.optimizers = op
@property
def param_groups(self):
param_groups = []
for optimizer in self.optimizers:
param_groups.extend(optimizer.param_groups)
return param_groups
def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad()
def step(self):
""" ? """
for op in self.optimizers:
op.step()
@property
def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()}
def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers]
def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i])
class Optimizer(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
Args:
method (:obj:`str`): one of [sgd, adagrad, adadelta, adam]
lr (float): learning rate
lr_decay (float, optional): learning rate decay multiplier
start_decay_steps (int, optional): step to start learning rate decay
beta1, beta2 (float, optional): parameters for adam
adagrad_accum (float, optional): initialization parameter for adagrad
decay_method (str, option): custom decay options
warmup_steps (int, option): parameter for `noam` decay
model_size (int, option): parameter for `noam` decay
We use the default parameters for Adam that are suggested by
the original paper https://arxiv.org/pdf/1412.6980.pdf
These values are also used by other established implementations,
e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/
Recently there are slightly different values used in the paper
"Attention is all you need"
https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
was used there however, beta2=0.999 is still arguably the more
established value, so we use that here as well
"""
def __init__(self, method, learning_rate, max_grad_norm,
lr_decay=1, start_decay_steps=None, decay_steps=None,
beta1=0.9, beta2=0.999,
adagrad_accum=0.0,
decay_method=None,
warmup_steps=4000,
model_size=None):
self.last_ppl = None
self.learning_rate = learning_rate
self.original_lr = learning_rate
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_steps = start_decay_steps
self.decay_steps = decay_steps
self._step = 0
self.betas = [beta1, beta2]
self.adagrad_accum = adagrad_accum
self.decay_method = decay_method
self.warmup_steps = warmup_steps
self.model_size = model_size
def set_parameters(self, model):
""" ? """
params = [p for p in model.parameters() if p.requires_grad]
if self.method == 'sgd':
self.optimizer = optim.SGD(params, lr=self.learning_rate)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(
self.params,
lr=self.learning_rate,
initial_accumulator_value=self.adagrad_accum)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(params, lr=self.learning_rate)
elif self.method == 'adafactor':
self.optimizer = AdaFactor(params, non_constant_decay=True,
enable_factorization=True,
weight_decay=0)
elif self.method == 'adam':
self.optimizer = optim.Adam(params, lr=self.learning_rate,
betas=self.betas, eps=1e-9)
elif self.method == 'sparseadam':
dense = []
sparse = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# TODO: Find a better way to check for sparse gradients.
if 'embed' in name:
sparse.append(param)
else:
dense.append(param)
self.optimizer = MultipleOptimizer(
[optim.Adam(dense, lr=self.learning_rate,
betas=self.betas, eps=1e-8),
optim.SparseAdam(sparse, lr=self.learning_rate,
betas=self.betas, eps=1e-8)])
else:
raise RuntimeError("Invalid optim method: " + self.method)
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
self._step += 1
# Decay method used in tensor2tensor.
if self.decay_method == "noam":
lr_scale = (
self.model_size ** (-0.5) *
min(self._step ** (-0.5),
self._step * self.warmup_steps**(-1.5)))
# Decay based on start_decay_steps every decay_steps
elif self.start_decay_steps is not None:
step = self._step - self.start_decay_steps
lr_scale = (self.lr_decay ** (
max(step + self.decay_steps, 0) // self.decay_steps))
else:
lr_scale = 1
self.learning_rate = lr_scale * self.original_lr
for group in self.optimizer.param_groups:
if self.method != 'adafactor':
group['lr'] = self.learning_rate
if self.max_grad_norm:
clip_grad_norm_(group['params'], self.max_grad_norm)
self.optimizer.step()
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
class AdaFactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
if group['lr'] is None:
# default value from paper
lr_t = min(1e-2, 1 / sqrt(state['step']))
lr_t *= max(group['eps2'], self._rms(p.data))
else:
lr_t = group['lr']
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
| [
"torch.zeros",
"torch.mul",
"torch.max",
"torch.is_tensor",
"torch.nn.utils.clip_grad_norm_",
"torch.div",
"torch.sum"
] | 1.0 | miyamotost/ITDD-with-DialogueAct | e2267a2351a57be344e3de2ecf2f0d6820bc0cf9 |
1.4 | import numpy as np
import torch
from torch import Tensor
from PIL import Image, ImageEnhance
def torch_none(x: Tensor):
return x
def torch_rot90_(x: Tensor):
return x.transpose_(2, 3).flip(2)
def torch_rot90(x: Tensor):
return x.transpose(2, 3).flip(2)
def torch_rot180(x: Tensor):
return x.flip(2).flip(3)
def torch_rot270(x: Tensor):
return x.transpose(2, 3).flip(3)
def torch_flipud(x: Tensor):
return x.flip(2)
def torch_fliplp(x: Tensor):
return x.flip(3)
def torch_transpose(x: Tensor):
return x.transpose(2, 3)
def torch_transpose_(x: Tensor):
return x.transpose_(2, 3)
def torch_transpose2(x: Tensor):
return x.transpose(3, 2)
def pad_tensor(image_tensor: Tensor, pad_size: int = 32):
"""Pads input tensor to make it's height and width dividable by @pad_size
:param image_tensor: Input tensor of shape NCHW
:param pad_size: Pad size
:return: Tuple of output tensor and pad params. Second argument can be used to reverse pad operation of model output
"""
rows, cols = image_tensor.size(2), image_tensor.size(3)
if rows > pad_size:
pad_rows = rows % pad_size
pad_rows = pad_size - pad_rows if pad_rows > 0 else 0
else:
pad_rows = pad_size - rows
if cols > pad_size:
pad_cols = cols % pad_size
pad_cols = pad_size - pad_cols if pad_cols > 0 else 0
else:
pad_cols = pad_size - cols
if pad_rows == 0 and pad_cols == 0:
return image_tensor, (0, 0, 0, 0)
pad_top = pad_rows // 2
pad_btm = pad_rows - pad_top
pad_left = pad_cols // 2
pad_right = pad_cols - pad_left
pad = [pad_left, pad_right, pad_top, pad_btm]
image_tensor = torch.nn.functional.pad(image_tensor, pad)
return image_tensor, pad
def unpad_tensor(image_tensor, pad):
pad_left, pad_right, pad_top, pad_btm = pad
rows, cols = image_tensor.size(2), image_tensor.size(3)
return image_tensor[..., pad_top:rows - pad_btm, pad_left: cols - pad_right]
def image_enhance(img, gama=1.55):
# image = img
# if convert:
image = np.asarray(img*255, np.uint8)
# --------- down contrast
image = Image.fromarray(image)
# image.show()
contrast = ImageEnhance.Contrast(image)
image = contrast.enhance(gama)
# ----------
# if convert:
image = np.asarray(image, np.float32) / 255.0
return image
| [
"torch.nn.functional.pad"
] | 1.4.0 | PKSingh0017/MSCG-Net | b7e79d68f14984fe460eff72bcbb8049e4d2bc9f |
1.8 | import copy
import numpy
import string
import time
import torch
import tqdm
from draugr.numpy_utilities import Split
from draugr.python_utilities import (
rgb_drop_alpha_batch_nhwc,
torch_vision_normalize_batch_nchw,
)
from draugr.torch_utilities import (
TorchEvalSession,
TorchTrainSession,
global_torch_device,
to_tensor,
uint_nhwc_to_nchw_float_batch,
)
from draugr.visualisation import confusion_matrix_plot
from matplotlib import pyplot
from munin.generate_report import ReportEntry, generate_html, generate_pdf
from munin.utilities.html_embeddings import generate_math_html, plt_html
from pathlib import Path
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from warg import NOD
__all__ = ["test_model", "pred_target_train_model"]
def test_model(model, data_iterator, latest_model_path, num_columns: int = 2):
model = model.eval().to(global_torch_device())
inputs, labels = next(data_iterator)
inputs = inputs.to(global_torch_device())
labels = labels.to(global_torch_device())
with torch.no_grad():
pred = model(inputs)
y_pred = pred.data.to("cpu").numpy()
y_pred_max = numpy.argmax(y_pred, axis=-1)
accuracy_w = accuracy_score(labels, y_pred_max)
precision_a, recall_a, fscore_a, support_a = precision_recall_fscore_support(
labels, y_pred_max
)
precision_w, recall_w, fscore_w, support_w = precision_recall_fscore_support(
labels, y_pred_max, average="weighted"
)
_, predicted = torch.max(pred, 1)
truth_labels = labels.data.to("cpu").numpy()
input_images_rgb = [
default_torch_retransform(x) for x in inputs.to(global_torch_device())
]
cell_width = (800 / num_columns) - 6 - 6 * 2
pyplot.plot(numpy.random.random((3, 3)))
alphabet = string.ascii_lowercase
class_names = numpy.array([*alphabet])
samples = len(y_pred)
predictions = [
[None for _ in range(num_columns)] for _ in range(samples // num_columns)
]
for i, a, b, c in zip(range(samples), input_images_rgb, y_pred_max, truth_labels):
pyplot.imshow(a)
if b == c:
outcome = "tp"
else:
outcome = "fn"
gd = ReportEntry(
name=i,
figure=plt_html(a, format="jpg", size=(cell_width, cell_width)),
prediction=class_names[b],
truth=class_names[c],
outcome=outcome,
explanation=None,
)
predictions[i // num_columns][i % num_columns] = gd
cfmat = confusion_matrix_plot(y_pred_max, truth_labels, class_names)
title = "Classification Report"
model_name = latest_model_path
confusion_matrix = plt_html(cfmat, format="png", size=(800, 800))
accuracy = generate_math_html("\dfrac{tp+tn}{N}"), None, accuracy_w
precision = generate_math_html("\dfrac{tp}{tp+fp}"), precision_a, precision_w
recall = generate_math_html("\dfrac{tp}{tp+fn}"), recall_a, recall_w
f1_score = (
generate_math_html("2*\dfrac{precision*recall}{precision+recall}"),
fscore_a,
fscore_w,
)
support = generate_math_html("N_{class_truth}"), support_a, support_w
metrics = NOD.nod_of(
accuracy, precision, f1_score, recall, support
).as_flat_tuples()
bundle = NOD.nod_of(title, model_name, confusion_matrix, metrics, predictions)
file_name = Path(title.lower().replace(" ", "_"))
generate_html(file_name.with_suffix(".html"), **bundle)
generate_pdf(file_name.with_suffix(".html"), file_name.with_suffix(".pdf"))
# plot_utilities.plot_prediction(input_images_rgb, truth_labels, predicted, pred)
# pyplot.show()
def pred_target_train_model(
model,
train_iterator,
criterion,
optimizer,
scheduler,
writer,
interrupted_path,
test_data_iterator=None,
num_updates: int = 250000,
early_stop=None,
) -> torch.nn.Module:
"""
Args:
model:
train_iterator:
criterion:
optimizer:
scheduler:
writer:
interrupted_path:
test_data_iterator:
num_updates:
early_stop:
Returns:
"""
best_model_wts = copy.deepcopy(model.state_dict())
best_val_loss = 1e10
since = time.time()
try:
sess = tqdm.tqdm(range(num_updates), leave=False, disable=False)
val_loss = 0
update_loss = 0
val_acc = 0
last_val = None
last_out = None
with torch.autograd.detect_anomaly():
for update_i in sess:
for phase in [Split.Training, Split.Validation]:
if phase == Split.Training:
with TorchTrainSession(model):
input, true_label = zip(*next(train_iterator))
rgb_imgs = torch_vision_normalize_batch_nchw(
uint_nhwc_to_nchw_float_batch(
rgb_drop_alpha_batch_nhwc(to_tensor(input))
)
)
true_label = to_tensor(true_label, dtype=torch.long)
optimizer.zero_grad()
pred = model(rgb_imgs)
loss = criterion(pred, true_label)
loss.backward()
optimizer.step()
if last_out is None:
last_out = pred
else:
if not torch.dist(last_out, pred) > 0:
print(f"Same output{last_out},{pred}")
last_out = pred
update_loss = loss.data.cpu().numpy()
writer.scalar(f"loss/train", update_loss, update_i)
if scheduler:
scheduler.step()
elif test_data_iterator:
with TorchEvalSession(model):
test_rgb_imgs, test_true_label = zip(*next(train_iterator))
test_rgb_imgs = torch_vision_normalize_batch_nchw(
uint_nhwc_to_nchw_float_batch(
rgb_drop_alpha_batch_nhwc(to_tensor(test_rgb_imgs))
)
)
test_true_label = to_tensor(
test_true_label, dtype=torch.long
)
with torch.no_grad():
val_pred = model(test_rgb_imgs)
val_loss = criterion(val_pred, test_true_label)
_, cat = torch.max(val_pred, -1)
val_acc = torch.sum(cat == test_true_label) / float(
cat.size(0)
)
writer.scalar(f"loss/acc", val_acc, update_i)
writer.scalar(f"loss/val", val_loss, update_i)
if last_val is None:
last_val = cat
else:
if all(last_val == cat):
print(f"Same val{last_val},{cat}")
last_val = cat
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model_wts = copy.deepcopy(model.state_dict())
sess.write(
f"New best validation model at update {update_i} with test_loss {best_val_loss}"
)
torch.save(model.state_dict(), interrupted_path)
if early_stop is not None and val_pred < early_stop:
break
sess.set_description_str(
f"Update {update_i} - {phase} "
f"update_loss:{update_loss:2f} "
f"test_loss:{val_loss}"
f"val_acc:{val_acc}"
)
except KeyboardInterrupt:
print("Interrupt")
finally:
pass
model.load_state_dict(best_model_wts) # load best model weights
time_elapsed = time.time() - since
print(f"{time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s")
print(f"Best val loss: {best_val_loss:3f}")
return model
| [
"torch.max",
"torch.no_grad",
"torch.sum",
"torch.dist",
"torch.autograd.detect_anomaly"
] | 1.8.1 | aivclab/vision | 6c644dd72f68bca608a2900e5d9461e90fe841eb |
1.8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 22/03/2020
"""
"""Credit: mostly based on Ilya's excellent implementation here:
https://github.com/ikostrikov/pytorch-flows"""
import numpy
import torch
from torch import nn
from torch.nn import functional as F
class InverseAutoregressiveFlow(nn.Module):
"""Inverse Autoregressive Flows with LSTM-type update. One block.
Eq 11-14 of https://arxiv.org/abs/1606.04934"""
def __init__(self, num_input, num_hidden, num_context):
super().__init__()
self.made = MADE(
num_input=num_input,
num_output=num_input * 2,
num_hidden=num_hidden,
num_context=num_context,
)
# init such that sigmoid(s) is close to 1 for stability
self.sigmoid_arg_bias = nn.Parameter(torch.ones(num_input) * 2)
self.sigmoid = nn.Sigmoid()
self.log_sigmoid = nn.LogSigmoid()
def forward(self, input, context=None):
"""
Args:
input:
context:
Returns:
"""
m, s = torch.chunk(self.made(input, context), chunks=2, dim=-1)
s = s + self.sigmoid_arg_bias
sigmoid = self.sigmoid(s)
z = sigmoid * input + (1 - sigmoid) * m
return z, -self.log_sigmoid(s)
class FlowSequential(nn.Sequential):
"""Forward pass."""
def forward(self, input, context=None):
"""
Args:
input:
context:
Returns:
"""
total_log_prob = torch.zeros_like(input, device=input.device)
for block in self._modules.values():
input, log_prob = block(input, context)
total_log_prob += log_prob
return input, total_log_prob
class MaskedLinear(nn.Module):
"""Linear layer with some input-output connections masked."""
def __init__(
self, in_features, out_features, mask, context_features=None, bias=True
):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
self.register_buffer("mask", mask)
if context_features is not None:
self.cond_linear = nn.Linear(context_features, out_features, bias=False)
def forward(self, input, context=None):
"""
Args:
input:
context:
Returns:
"""
output = F.linear(input, self.mask * self.linear.weight, self.linear.bias)
if context is None:
return output
else:
return output + self.cond_linear(context)
class MADE(nn.Module):
"""Implements MADE: Masked Autoencoder for Distribution Estimation.
Follows https://arxiv.org/abs/1502.03509
This is used to build MAF: Masked Autoregressive Flow (https://arxiv.org/abs/1705.07057)."""
def __init__(self, num_input, num_output, num_hidden, num_context):
super().__init__()
# m corresponds to m(k), the maximum degree of a node in the MADE paper
self._m = []
self._masks = []
self._build_masks(num_input, num_output, num_hidden, num_layers=3)
self._check_masks()
self.input_context_net = MaskedLinear(
num_input, num_hidden, self._masks[0], num_context
)
modules = []
modules.append(nn.ReLU())
modules.append(
MaskedLinear(num_hidden, num_hidden, self._masks[1], context_features=None)
)
modules.append(nn.ReLU())
modules.append(
MaskedLinear(num_hidden, num_output, self._masks[2], context_features=None)
)
self.net = nn.Sequential(*modules)
def _build_masks(self, num_input, num_output, num_hidden, num_layers):
"""Build the masks according to Eq 12 and 13 in the MADE paper."""
rng = numpy.random.RandomState(0)
# assign input units a number between 1 and D
self._m.append(numpy.arange(1, num_input + 1))
for i in range(1, num_layers + 1):
# randomly assign maximum number of input nodes to connect to
if i == num_layers:
# assign output layer units a number between 1 and D
m = numpy.arange(1, num_input + 1)
assert (
num_output % num_input == 0
), "num_output must be multiple of num_input"
self._m.append(
numpy.hstack([m for _ in range(num_output // num_input)])
)
else:
# assign hidden layer units a number between 1 and D-1
self._m.append(rng.randint(1, num_input, size=num_hidden))
# self._m.append(numpy.arange(1, num_hidden + 1) % (num_input - 1) + 1)
if i == num_layers:
mask = self._m[i][None, :] > self._m[i - 1][:, None]
else:
# input to hidden & hidden to hidden
mask = self._m[i][None, :] >= self._m[i - 1][:, None]
# need to transpose for torch linear layer, shape (num_output, num_input)
self._masks.append(torch.from_numpy(mask.astype(numpy.float32).T))
def _check_masks(self):
"""Check that the connectivity matrix between layers is lower triangular."""
# (num_input, num_hidden)
prev = self._masks[0].t()
for i in range(1, len(self._masks)):
# num_hidden is second axis
prev = prev @ self._masks[i].t()
final = prev.numpy()
num_input = self._masks[0].shape[1]
num_output = self._masks[-1].shape[0]
assert final.shape == (num_input, num_output)
if num_output == num_input:
assert numpy.triu(final).all() == 0
else:
for submat in numpy.split(
final, indices_or_sections=num_output // num_input, axis=1
):
assert numpy.triu(submat).all() == 0
def forward(self, input, context=None):
"""
Args:
input:
context:
Returns:
"""
# first hidden layer receives input and context
hidden = self.input_context_net(input, context)
# rest of the network is conditioned on both input and context
return self.net(hidden)
class Reverse(nn.Module):
"""An implementation of a reversing layer from
Density estimation using Real NVP
(https://arxiv.org/abs/1605.08803).
From https://github.com/ikostrikov/pytorch-flows/blob/master/main.py"""
def __init__(self, num_input):
super(Reverse, self).__init__()
self.perm = numpy.array(numpy.arange(0, num_input)[::-1])
self.inv_perm = numpy.argsort(self.perm)
def forward(self, inputs, context=None, mode="forward"):
"""
Args:
inputs:
context:
mode:
Returns:
"""
if mode == "forward":
return (
inputs[..., self.perm],
torch.zeros_like(inputs, device=inputs.device),
)
elif mode == "inverse":
return (
inputs[..., self.inv_perm],
torch.zeros_like(inputs, device=inputs.device),
)
else:
raise ValueError("Mode must be one of {forward, inverse}.")
| [
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.Sequential",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.nn.LogSigmoid",
"torch.zeros_like"
] | 1.8.1 | aivclab/vision | 6c644dd72f68bca608a2900e5d9461e90fe841eb |
1.8 | import logging
import numpy
import os
import random
import shutil
import time
import torch
from draugr import AverageMeter, find_unclaimed_port
from draugr.numpy_utilities import Split
from draugr.torch_utilities import TensorBoardPytorchWriter
from pathlib import Path
from torch import distributed, multiprocessing, nn
from torch.backends import cudnn
from torch.optim import lr_scheduler
from neodroidvision.classification.architectures.self_attention_network import (
SelfAttentionTypeEnum,
make_san,
)
from san_utilities import (
cal_accuracy,
intersection_and_union_gpu,
mixup_data,
mixup_loss,
smooth_loss,
)
def get_logger():
"""
Returns:
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def worker_init_fn(worker_id):
"""
Args:
worker_id:
"""
random.seed(CONFIG.manual_seed + worker_id)
def is_main_process():
"""
Returns:
"""
return not CONFIG.multiprocessing_distributed or (
CONFIG.multiprocessing_distributed and CONFIG.rank % CONFIG.ngpus_per_node == 0
)
def main_worker(gpu, ngpus_per_node, config):
"""
Args:
gpu:
ngpus_per_node:
config:
"""
global CONFIG, best_acc1
CONFIG, best_acc1 = config, 0
train_set = config.dataset_type(CONFIG.dataset_path, Split.Training)
val_set = config.dataset_type(CONFIG.dataset_path, Split.Validation)
if CONFIG.distributed:
if CONFIG.dist_url == "env://" and CONFIG.rank == -1:
CONFIG.rank = int(os.environ["RANK"])
if CONFIG.multiprocessing_distributed:
CONFIG.rank = CONFIG.rank * ngpus_per_node + gpu
distributed.init_process_group(
backend=CONFIG.dist_backend,
init_method=CONFIG.dist_url,
world_size=CONFIG.world_size,
rank=CONFIG.rank,
)
model = make_san(
self_attention_type=SelfAttentionTypeEnum(CONFIG.self_attention_type),
layers=CONFIG.layers,
kernels=CONFIG.kernels,
num_classes=train_set.response_shape[0],
)
criterion = nn.CrossEntropyLoss(ignore_index=CONFIG.ignore_label)
optimizer = torch.optim.SGD(
model.parameters(),
lr=CONFIG.base_lr,
momentum=CONFIG.momentum,
weight_decay=CONFIG.weight_decay,
)
if CONFIG.scheduler == "step":
scheduler = lr_scheduler.MultiStepLR(
optimizer, milestones=CONFIG.step_epochs, gamma=0.1
)
elif CONFIG.scheduler == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=CONFIG.epochs)
if is_main_process():
global logger, writer
logger = get_logger()
writer = TensorBoardPytorchWriter(str(CONFIG.save_path))
logger.info(CONFIG)
logger.info("=> creating model ...")
logger.info(f"Classes: {train_set.response_shape[0]}")
logger.info(model)
if CONFIG.distributed:
torch.cuda.set_device(gpu)
CONFIG.batch_size = int(CONFIG.batch_size / ngpus_per_node)
CONFIG.batch_size_val = int(CONFIG.batch_size_val / ngpus_per_node)
CONFIG.workers = int((CONFIG.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(
model.cuda(), device_ids=[gpu]
)
else:
model = torch.nn.DataParallel(model.cuda())
if CONFIG.weight:
if Path(CONFIG.weight).is_file():
if is_main_process():
global logger
logger.info(f"=> loading weight '{CONFIG.weight}'")
checkpoint = torch.load(CONFIG.weight)
model.load_state_dict(checkpoint["state_dict"])
if is_main_process():
global logger
logger.info(f"=> loaded weight '{CONFIG.weight}'")
else:
if is_main_process():
global logger
logger.info(f"=> no weight found at '{CONFIG.weight}'")
if CONFIG.resume:
if Path(CONFIG.resume).is_file():
if is_main_process():
global logger
logger.info(f"=> loading checkpoint '{CONFIG.resume}'")
checkpoint = torch.load(
CONFIG.resume, map_location=lambda storage, loc: storage.cuda(gpu)
)
CONFIG.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["top1_val"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
if is_main_process():
global logger
logger.info(
f"=> loaded checkpoint '{CONFIG.resume}' (epoch {checkpoint['epoch']})"
)
else:
if is_main_process():
global logger
logger.info(f"=> no checkpoint found at '{CONFIG.resume}'")
if CONFIG.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_set)
else:
train_sampler = None
val_sampler = None
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=CONFIG.batch_size,
shuffle=(train_sampler is None),
num_workers=CONFIG.workers,
pin_memory=True,
sampler=train_sampler,
)
val_loader = torch.utils.data.DataLoader(
val_set,
batch_size=CONFIG.batch_size_val,
shuffle=False,
num_workers=CONFIG.workers,
pin_memory=True,
sampler=val_sampler,
)
for epoch in range(CONFIG.start_epoch, CONFIG.epochs):
if CONFIG.distributed:
train_sampler.set_epoch(epoch)
(
loss_train,
mIoU_train,
mAcc_train,
allAcc_train,
top1_train,
top5_train,
) = train(train_loader, model, criterion, optimizer, epoch)
loss_val, mIoU_val, mAcc_val, allAcc_val, top1_val, top5_val = validate(
val_loader, model, criterion
)
scheduler.step()
epoch_log = epoch + 1
if is_main_process():
global writer
writer.scalar("loss_train", loss_train, epoch_log)
writer.scalar("mIoU_train", mIoU_train, epoch_log)
writer.scalar("mAcc_train", mAcc_train, epoch_log)
writer.scalar("allAcc_train", allAcc_train, epoch_log)
writer.scalar("top1_train", top1_train, epoch_log)
writer.scalar("top5_train", top5_train, epoch_log)
writer.scalar("loss_val", loss_val, epoch_log)
writer.scalar("mIoU_val", mIoU_val, epoch_log)
writer.scalar("mAcc_val", mAcc_val, epoch_log)
writer.scalar("allAcc_val", allAcc_val, epoch_log)
writer.scalar("top1_val", top1_val, epoch_log)
writer.scalar("top5_val", top5_val, epoch_log)
if (epoch_log % CONFIG.save_freq == 0) and is_main_process():
filename = CONFIG.save_path / "train_epoch_" + str(epoch_log) + ".pth"
global logger
logger.info("Saving checkpoint to: " + filename)
torch.save(
{
"epoch": epoch_log,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"top1_val": top1_val,
"top5_val": top5_val,
},
filename,
)
if top1_val > best_acc1:
best_acc1 = top1_val
shutil.copyfile(filename, CONFIG.save_path / "model_best.pth")
if epoch_log / CONFIG.save_freq > 2:
deletename = (
CONFIG.save_path
/ f"train_epoch_{str(epoch_log - CONFIG.save_freq * 2)}.pth"
)
os.remove(deletename)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Args:
train_loader:
model:
criterion:
optimizer:
epoch:
Returns:
"""
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.train()
end = time.time()
max_iter = CONFIG.epochs * len(train_loader)
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if CONFIG.mixup_alpha:
eps = CONFIG.label_smoothing if CONFIG.label_smoothing else 0.0
input, target_a, target_b, lam = mixup_data(
input, target, CONFIG.mixup_alpha
)
output = model(input)
loss = mixup_loss(output, target_a, target_b, lam, eps)
else:
output = model(input)
loss = (
smooth_loss(output, target, CONFIG.label_smoothing)
if CONFIG.label_smoothing
else criterion(output, target)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss)
distributed.all_reduce(top1)
distributed.all_reduce(top5)
distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, train_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection)
distributed.all_reduce(union)
distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
# calculate remain time
current_iter = epoch * len(train_loader) + i + 1
remain_iter = max_iter - current_iter
remain_time = remain_iter * batch_time.avg
t_m, t_s = divmod(remain_time, 60)
t_h, t_m = divmod(t_m, 60)
remain_time = f"{int(t_h):02d}:{int(t_m):02d}:{int(t_s):02d}"
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Epoch: [{epoch + 1}/{CONFIG.epochs}][{i + 1}/{len(train_loader)}] Data {data_time.val:.3f} ("
f"{data_time.avg:.3f}) Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) Remain {remain_time} Loss "
f"{loss_meter.val:.4f} Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) "
f"Acc@5 {top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
if is_main_process():
writer.scalar("loss_train_batch", loss_meter.val, current_iter)
writer.scalar(
"mIoU_train_batch",
numpy.mean(intersection / (union + 1e-10)),
current_iter,
)
writer.scalar(
"mAcc_train_batch",
numpy.mean(intersection / (target + 1e-10)),
current_iter,
)
writer.scalar("allAcc_train_batch", accuracy, current_iter)
writer.scalar("top1_train_batch", top1, current_iter)
writer.scalar("top5_train_batch", top5, current_iter)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Train result at epoch [{epoch + 1}/{CONFIG.epochs}]: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/"
f"{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/{top5_meter.avg:.4f}."
)
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
def validate(val_loader, model, criterion):
"""
Args:
val_loader:
model:
criterion:
Returns:
"""
if is_main_process():
logger.info(">>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>")
batch_time = AverageMeter()
data_time = AverageMeter()
loss_meter = AverageMeter()
intersection_meter = AverageMeter()
union_meter = AverageMeter()
target_meter = AverageMeter()
top1_meter = AverageMeter()
top5_meter = AverageMeter()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
output = model(input)
loss = criterion(output, target)
top1, top5 = cal_accuracy(output, target, topk=(1, 5))
n = input.size(0)
if CONFIG.multiprocessing_distributed:
with torch.no_grad():
loss, top1, top5 = loss.detach() * n, top1 * n, top5 * n
count = target.new_tensor([n], dtype=torch.long)
distributed.all_reduce(loss), distributed.all_reduce(
top1
), distributed.all_reduce(top5), distributed.all_reduce(count)
n = count.item()
loss, top1, top5 = loss / n, top1 / n, top5 / n
loss_meter.update(loss.item(), n), top1_meter.update(
top1.item(), n
), top5_meter.update(top5.item(), n)
output = output.max(1)[1]
intersection, union, target = intersection_and_union_gpu(
output, target, val_loader.dataset.response_shape[0], CONFIG.ignore_label
)
if CONFIG.multiprocessing_distributed:
distributed.all_reduce(intersection), distributed.all_reduce(
union
), distributed.all_reduce(target)
intersection, union, target = (
intersection.cpu().numpy(),
union.cpu().numpy(),
target.cpu().numpy(),
)
intersection_meter.update(intersection), union_meter.update(
union
), target_meter.update(target)
accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
batch_time.update(time.time() - end)
end = time.time()
if ((i + 1) % CONFIG.print_freq == 0) and is_main_process():
logger.info(
f"Test: [{i + 1}/{len(val_loader)}] Data {data_time.val:.3f} ({data_time.avg:.3f}) Batch "
f"{batch_time.val:.3f} ({batch_time.avg:.3f}) Loss {loss_meter.val:.4f} ({loss_meter.avg:.4f}) "
f"Accuracy {accuracy:.4f} Acc@1 {top1_meter.val:.3f} ({top1_meter.avg:.3f}) Acc@5 "
f"{top5_meter.val:.3f} ({top5_meter.avg:.3f})."
)
iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
mIoU = numpy.mean(iou_class)
mAcc = numpy.mean(accuracy_class)
allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
if is_main_process():
logger.info(
f"Val result: mIoU/mAcc/allAcc/top1/top5 {mIoU:.4f}/{mAcc:.4f}/{allAcc:.4f}/{top1_meter.avg:.4f}/"
f"{top5_meter.avg:.4f}."
)
for i in range(val_loader.dataset.response_shape[0]):
if target_meter.sum[i] > 0:
logger.info(
f"Class_{i} Result: iou/accuracy {iou_class[i]:.4f}/{accuracy_class[i]:.4f} Count:"
f"{target_meter.sum[i]}"
)
logger.info("<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<")
return loss_meter.avg, mIoU, mAcc, allAcc, top1_meter.avg, top5_meter.avg
if __name__ == "__main__":
def main():
"""
"""
from samples.classification.san.configs.imagenet_san10_pairwise import (
SAN_CONFIG,
)
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
str(x) for x in SAN_CONFIG.train_gpu
)
if SAN_CONFIG.manual_seed is not None:
random.seed(SAN_CONFIG.manual_seed)
numpy.random.seed(SAN_CONFIG.manual_seed)
torch.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed(SAN_CONFIG.manualSeed)
torch.cuda.manual_seed_all(SAN_CONFIG.manualSeed)
cudnn.benchmark = False
cudnn.deterministic = True
if SAN_CONFIG.dist_url == "env://" and SAN_CONFIG.world_size == -1:
SAN_CONFIG.world_size = int(os.environ["WORLD_SIZE"])
SAN_CONFIG.distributed = (
SAN_CONFIG.world_size > 1 or SAN_CONFIG.multiprocessing_distributed
)
SAN_CONFIG.ngpus_per_node = len(SAN_CONFIG.train_gpu)
if len(SAN_CONFIG.train_gpu) == 1:
SAN_CONFIG.sync_bn = False
SAN_CONFIG.distributed = False
SAN_CONFIG.multiprocessing_distributed = False
if SAN_CONFIG.multiprocessing_distributed:
port = find_unclaimed_port()
SAN_CONFIG.dist_url = f"tcp://127.0.0.1:{port}"
SAN_CONFIG.world_size *= SAN_CONFIG.ngpus_per_node
multiprocessing.spawn(
main_worker,
nprocs=SAN_CONFIG.ngpus_per_node,
args=(SAN_CONFIG.ngpus_per_node, SAN_CONFIG),
)
else:
main_worker(SAN_CONFIG.train_gpu, SAN_CONFIG.ngpus_per_node, SAN_CONFIG)
main()
| [
"torch.cuda.manual_seed",
"torch.cuda.manual_seed_all",
"torch.distributed.init_process_group",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.no_grad",
"torch.multiprocessing.spawn",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.load",
"torch.distributed.all_reduce",
"torch.nn.CrossEntropyLoss"
] | 1.8.1 | aivclab/vision | 6c644dd72f68bca608a2900e5d9461e90fe841eb |
1.8 | import logging
import torch
import torch.utils.data
from pathlib import Path
from torch.nn import Module
from torch.utils.data import DataLoader
from tqdm import tqdm
from typing import Any, List
from warg import NOD
from neodroidvision import PROJECT_APP_PATH
from neodroidvision.data.detection.coco import COCODataset, coco_evaluation
from neodroidvision.data.detection.voc import VOCDataset, voc_evaluation
from neodroidvision.detection.single_stage.ssd.object_detection_dataloader import (
object_detection_data_loaders,
)
from neodroidvision.utilities import (
distributing_utilities,
is_main_process,
synchronise_torch_barrier,
)
__all__ = ["do_ssd_evaluation"]
from draugr.numpy_utilities import Split
def compute_on_dataset(
model: Module,
data_loader: DataLoader,
device: torch.device,
cpu_device=torch.device("cpu"),
) -> dict:
"""
Args:
model:
data_loader:
device:
cpu_device:
Returns:
"""
results_dict = {}
for batch in tqdm(data_loader):
images, targets, image_ids = batch
with torch.no_grad():
results_dict.update(
{
img_id: result
for img_id, result in zip(
image_ids, [o.to(cpu_device) for o in model(images.to(device))]
)
}
)
return results_dict
def accumulate_predictions_from_cuda_devices(predictions_per_gpu: Any) -> list:
"""
:param predictions_per_gpu:
:return:"""
all_predictions = distributing_utilities.all_gather_cuda(predictions_per_gpu)
if not distributing_utilities.is_main_process():
return
predictions = {}
for p in all_predictions: # merge the list of dicts
predictions.update(p)
image_ids = list(
sorted(predictions.keys())
) # convert a dict where the key is the index in a list
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("SSD.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not a contiguous set. Some "
"images "
"might be missing from the evaluation"
)
return [predictions[i] for i in image_ids]
def evaluate_dataset(dataset, predictions, output_dir: Path, **kwargs) -> dict:
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(list[(boxes, labels, scores)]): Each item in the list represents the
prediction results for one image. And the index should match the dataset index.
output_dir: output folder, to save evaluation files or results.
Returns:
evaluation result"""
kws = dict(
dataset=dataset, predictions=predictions, output_dir=output_dir, **kwargs
)
if isinstance(dataset, VOCDataset):
return voc_evaluation(**kws)
elif isinstance(dataset, COCODataset):
return coco_evaluation(**kws)
else:
raise NotImplementedError
def inference_ssd(
*,
model: Module,
data_loader: DataLoader,
dataset_name: str,
device: torch.device,
output_folder: Path = None,
use_cached: bool = False,
**kwargs,
) -> dict:
"""
:param model:
:param data_loader:
:param dataset_name:
:param device:
:param output_folder:
:param use_cached:
:param kwargs:
:return:"""
dataset = data_loader.dataset
logger = logging.getLogger("SSD.inference")
logger.info(f"Evaluating {dataset_name} dataset({len(dataset)} images):")
predictions_path = output_folder / "predictions.pth"
if use_cached and predictions_path.exists():
predictions = torch.load(predictions_path, map_location="cpu")
else:
predictions = compute_on_dataset(model, data_loader, device)
synchronise_torch_barrier()
predictions = accumulate_predictions_from_cuda_devices(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, predictions_path)
return evaluate_dataset(
dataset=dataset, predictions=predictions, output_dir=output_folder, **kwargs
)
@torch.no_grad()
def do_ssd_evaluation(
data_root: Path, cfg: NOD, model: Module, distributed: bool, **kwargs) -> List:
"""
Args:
:param data_root:
:param cfg:
:param model:
:param distributed:
:param kwargs:
:return:"""
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model = model.module
model.eval()
device = torch.device(cfg.MODEL.DEVICE)
eval_results = []
for dataset_name, data_loader in zip(
cfg.DATASETS.TEST,
object_detection_data_loaders(
data_root=data_root,
cfg=cfg,
split=Split.Validation,
distributed=distributed,
),
):
eval_results.append(
inference_ssd(
model=model,
data_loader=data_loader,
dataset_name=dataset_name,
device=device,
output_folder=PROJECT_APP_PATH.user_data
/ "results"
/ "inference"
/ dataset_name,
**kwargs,
)
)
return eval_results
| [
"torch.device",
"torch.no_grad",
"torch.save",
"torch.load"
] | 1.8.1 | aivclab/vision | 6c644dd72f68bca608a2900e5d9461e90fe841eb |
1.5 | import torch.nn as nn
class StyleClassifier(nn.Module): # classifies NPI outputs
def __init__(self, n=200, m=768, k=1):
"""
input_activs_shape: tuple of (b, n, m, 1)
b is the number of batches
n x m x 1 slices contain the elements of the original activations, flattened into a 2D array
target_label: tuple of (b, 1, m, 1)
the desired label for the predicted activations, as passed into the NPI network
"""
super(StyleClassifier, self).__init__()
print("Classifier INIT", flush=True)
self.n = n
self.m = m
self.k = k
self.N = self.n * self.m
fact1 = 2 ** 4
fact2 = 2 ** 5
fact3 = 2 ** 6
print("Defining classifier model", flush=True)
self.model = nn.Sequential(
nn.Linear(self.n * self.m * self.k, self.n // fact1),
nn.ReLU(),
nn.Linear(self.n // fact1, self.n // fact2),
nn.ReLU(),
nn.Linear(self.n // fact2, self.n // fact3),
nn.ReLU(),
nn.Linear(self.n // fact3, 1),
nn.Sigmoid(),
)
def forward(self, x):
return self.model(x.view(-1, self.n * self.m * self.k))
class Discriminator(nn.Module): # classifies NPI outputs
def __init__(self, input_activs_shape, input_targ_shape):
"""
input_activs_shape: tuple of (n, m, 1)
n x m x 1 slices contain the elements of the original activations, flattened into a 2D array
target_label: tuple of (b, 1, m, 1)
the desired label for the predicted activations, as passed into the NPI network
"""
super(Discriminator, self).__init__()
print("GenerationClassifier INIT")
self.n = input_activs_shape[0]
self.m = input_activs_shape[1]
self.k = input_activs_shape[2]
self.l = 1
fact1 = 2 ** 3
fact2 = 2 ** 4
fact3 = 2 ** 5
print("Defining GenerationClassifier model")
self.layer1 = nn.Sequential(nn.Linear(self.n * self.m * self.k, self.n // fact1),
nn.ReLU())
self.layer2 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),
nn.ReLU())
self.layer3 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact2),
nn.ReLU())
self.layer4 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact2),
nn.ReLU())
self.layer5 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact3),
nn.ReLU())
self.layer6 = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),
nn.ReLU())
self.layer7 = nn.Sequential(nn.Linear(self.n // fact3, self.l * self.k),
nn.Sigmoid())
def forward(self, x):
metadata = {'ordered_hidden_activations': [], 'final_out_preview': None, 'final_out_returned': None}
out1 = self.layer1(x.view(-1, self.n * self.m * self.k))
out2 = self.layer2(out1)
out3 = self.layer3(out2)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
out6 = self.layer6(out5)
final_out = self.layer7(out6)
# metadata['ordered_hidden_activations'] = [out1.detach().data.cpu().numpy(),
# out2.detach().data.cpu().numpy(),
# out3.detach().data.cpu().numpy(),
# out4.detach().data.cpu().numpy(),
# out5.detach().data.cpu().numpy(),
# out6.detach().data.cpu().numpy(),
# ]
# metadata['final_out_preview'] = final_out.detach().data.cpu().numpy()
# metadata['final_out_returned'] = final_out.view(-1, 1, self.l, self.k).detach().data.cpu().numpy()
return final_out.view(-1, 1, self.l, self.k) # , metadata
class ContentClassifier(nn.Module): # classifies NPI outputs
def __init__(self, input_activs_shape, input_targ_shape):
raise NotImplementedError("Content classifier should be pre-trained")
"""
input_activs_shape: tuple of (b, n, m, 1)
b is the number of batches
n x m x 1 slices contain the elements of the original activations, flattened into a 2D array
"""
super(ContentClassifier, self).__init__()
print("ContentClassifier INIT")
self.b = input_activs_shape[0]
self.n = input_activs_shape[1]
self.m = input_activs_shape[2]
self.k = input_activs_shape[3]
self.l = 1 # input_targ_shape[2]
fact1 = 2 ** 3
fact2 = 2 ** 3
fact3 = 2 ** 3
print("Defining ContentClassifier model")
self.linear1 = nn.Sequential(nn.Linear(self.n * self.m * self.k, self.n // fact1),
nn.ReLU())
self.linear1Post = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),
nn.ReLU())
self.linear2 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact1),
nn.ReLU())
self.linear3 = nn.Sequential(nn.Linear(self.n // fact1, self.n // fact2),
nn.ReLU())
self.linear4 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact2),
nn.ReLU())
self.linear5 = nn.Sequential(nn.Linear(self.n // fact2, self.n // fact3),
nn.ReLU())
self.linear6 = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),
nn.ReLU())
self.linear7Pre = nn.Sequential(nn.Linear(self.n // fact3, self.n // fact3),
nn.ReLU())
self.linear7 = nn.Sequential(nn.Linear(self.n // fact3, 1 * self.l * self.k),
nn.Sigmoid())
def forward(self, x):
metadata = {'ordered_hidden_activations': [], 'final_out_preview': None, 'final_out_returned': None}
out1 = self.linear1(x.view(-1, self.n * self.m * self.k))
out1Post = self.linear1Post(out1)
out2 = self.linear2(out1Post)
out3 = self.linear3(out2)
out4 = self.linear4(out3)
out5 = self.linear5(out4)
out6 = self.linear6(out5)
out7Pre = self.linear7Pre(out6)
final_out = self.linear7(out6)
metadata['ordered_hidden_activations'] = [out1.detach().data.cpu().numpy(),
out1Post.detach().data.cpu().numpy(),
out2.detach().data.cpu().numpy(),
out3.detach().data.cpu().numpy(),
out4.detach().data.cpu().numpy(),
out5.detach().data.cpu().numpy(),
out6.detach().data.cpu().numpy(),
out7Pre.detach().data.cpu().numpy(),
]
metadata['final_out_preview'] = final_out.detach().data.cpu().numpy()
metadata['final_out_returned'] = final_out.view(-1, 1, self.l, self.k).detach().data.cpu().numpy()
return final_out.view(-1, 1, self.l, self.k), metadata
| [
"torch.nn.Linear",
"torch.nn.Sigmoid",
"torch.nn.ReLU"
] | 1.5.1 | NancyFulda/towards-neural-programming-interfaces | 21b467af56848c4fc8642fb0412f9f8d1b7718a2 |
1.4 | '''
fast scnn
author: zacario li
date: 2020-03-27
'''
import time
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
class FastSCNN(nn.Module):
def __init__(self, numClasses, aux=False, **kwargs):
super(FastSCNN, self).__init__()
# auxiliary, use to accelarate the convergence
self.aux = aux
# learning to down-sample (ph1)
self.learningToDownSample = LearningToDownSample(32, 48, 64)
# global feature extractor (ph2)
self.globalFeatureExtractor = GlobalFeatureExtractor(64, [64, 96, 128], 128, 6, [3,3,3])
# feature fusion (ph3)
self.featureFusion = FeatureFusion(64,128, 128)
# classifier (ph4)
self.classifier = Classifier(128, numClasses)
# for training only use
if self.aux is not None:
self.auxlayer = nn.Sequential(
nn.Conv2d(64, 32, 3, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True),
nn.Dropout(0.1),
nn.Conv2d(32, numClasses, 1)
)
def forward(self, x):
inputSize = x.shape[2:]
out = []
# ph1
ph1 = self.learningToDownSample(x)
# ph2
x = self.globalFeatureExtractor(ph1)
# ph3
x = self.featureFusion(ph1,x)
# ph4
x = self.classifier(x)
# resize to input img size
x = F.interpolate(x, inputSize, mode='bilinear', align_corners=True)
out.append(x)
# when training, use auxiliary
if self.aux:
auxout = self.auxlayer(ph1)
auxout = F.interpolate(auxout, inputSize, mode='bilinear', align_corners=True)
out.append(auxout)
return out
'''
common used module in paper
Red: Conv2D
Gray: DWConv
Blue: DSConv
Green: Bottleneck
Pink: Pyramid Pooling
Yellow: Upsample
'''
class _Conv2D(nn.Module):
'''
Red
'''
def __init__(self, inChannels, outChannels, kernel, stride=1, padding=0, **kwargs):
super(_Conv2D, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inChannels, outChannels, kernel, stride, padding, bias=False),
nn.BatchNorm2d(outChannels),
nn.ReLU(True)
)
def forward(self, x):
x = self.conv(x)
return x
class _DSConv(nn.Module):
'''
Blue
'''
def __init__(self, inChannels, outChannels, stride=1, **kwargs):
super(_DSConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inChannels,inChannels, 3, stride, 1, groups=inChannels, bias=False),
nn.BatchNorm2d(inChannels),
nn.ReLU(True),
nn.Conv2d(inChannels,outChannels,1,bias=False),
nn.BatchNorm2d(outChannels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _DWConv(nn.Module):
'''
Gray
'''
def __init__(self, inChannels, outChannels, stride=1, **kwargs):
super(_DWConv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(inChannels, outChannels, 3, stride, 1, groups=inChannels,bias=False),
nn.BatchNorm2d(outChannels),
nn.ReLU(True)
)
def forward(self, x):
return self.conv(x)
class _Bottleneck(nn.Module):
'''
Green:
Bottleneck
'''
def __init__(self, inChannels, outChannels, t=6, stride=2, **kwargs):
super(_Bottleneck, self).__init__()
self.shortcut = stride == 1 and inChannels == outChannels
self.block = nn.Sequential(
_Conv2D(inChannels, inChannels*t, 1),
_DWConv(inChannels*t, inChannels*t, stride),
#the last pointwise conv does not use non-linearity f. described in Table 2. Page 4
nn.Conv2d(inChannels*t, outChannels, 1,bias=False),
nn.BatchNorm2d(outChannels)
)
def forward(self, x):
out = self.block(x)
if self.shortcut:
out = x + out
return out
class _PPM(nn.Module):
'''
Pink
'''
def __init__(self,inChannels, outChannels, **kwargs):
super(_PPM, self).__init__()
# described in PSPNet paper(https://arxiv.org/pdf/1612.01105.pdf), 3.2, page 3
tempChannel = int(inChannels/4)
self.p1 = _Conv2D(inChannels, tempChannel, 1)
self.p2 = _Conv2D(inChannels, tempChannel, 1)
self.p3 = _Conv2D(inChannels, tempChannel, 1)
self.p4 = _Conv2D(inChannels, tempChannel, 1)
# why need conv2d here? There isn't any words about it in the paper
self.cat = _Conv2D(inChannels*2, outChannels, 1)
def featurePooling(self, x, size):
avgp = nn.AdaptiveAvgPool2d(size)
x = avgp(x)
return x
def upsample(self, x, size):
return F.interpolate(x, size, mode='bilinear', align_corners=True)
def forward(self, x):
size = x.shape[2:]
f1 = self.upsample(self.p1(self.featurePooling(x,1)),size)
f2 = self.upsample(self.p2(self.featurePooling(x,2)),size)
f3 = self.upsample(self.p3(self.featurePooling(x,3)),size)
f4 = self.upsample(self.p4(self.featurePooling(x,6)),size)
x = torch.cat([x, f1, f2, f3, f4],dim=1)
x = self.cat(x)
return x
# ph1
class LearningToDownSample(nn.Module):
'''
ph1 has two dsconv, so wo need input these parameters
'''
def __init__(self, dsc1, dsc2, dsc2out, **kwargs):
super(LearningToDownSample, self).__init__()
# described in paper, Table 1, page 4
self.conv = _Conv2D(3,dsc1, 3, 2)
self.dsc1 = _DSConv(dsc1,dsc2,2)
self.dsc2 = _DSConv(dsc2,dsc2out,2)
def forward(self, x):
x = self.conv(x)
x = self.dsc1(x)
x = self.dsc2(x)
return x
# ph2
class GlobalFeatureExtractor(nn.Module):
'''
ph2
'''
def __init__(self, inChannels=64, btChannels=[64,96,128],
outChannels=128, t=6, numBt=[3,3,3], **kwargs):
super(GlobalFeatureExtractor, self).__init__()
# described in paper, Figure 1, page 2, we have 3 different shape bottlenecks
self.bt1 = self._make_btlayer(_Bottleneck, inChannels, btChannels[0],numBt[0],t,2)
self.bt2 = self._make_btlayer(_Bottleneck, btChannels[0], btChannels[1],numBt[1],t,2)
self.bt3 = self._make_btlayer(_Bottleneck, btChannels[1], btChannels[2],numBt[2],t,1)
self.ppm = _PPM(btChannels[2],outChannels)
def _make_btlayer(self, bt, inChannels, outChannels, numBlock, t=6, stride=1):
layers = []
layers.append(bt(inChannels, outChannels, t, stride))
for i in range(1, numBlock):
layers.append(bt(outChannels, outChannels, t, 1))
return nn.Sequential(*layers)
def forward(self, x):
x = self.bt1(x)
x = self.bt2(x)
x = self.bt3(x)
x = self.ppm(x)
return x
# ph3
class FeatureFusion(nn.Module):
def __init__(self, ph1InChannel, ph2InChannel, outChannels, scale=4, **kwargs):
super(FeatureFusion, self).__init__()
self.scale = scale
self.dwconv = _DWConv(ph2InChannel,outChannels,1)
self.upBranch = nn.Sequential(nn.Conv2d(outChannels, outChannels, 1),
nn.BatchNorm2d(outChannels))
self.downBranch = nn.Sequential(nn.Conv2d(ph1InChannel, outChannels, 1),
nn.BatchNorm2d(outChannels))
self.activation = nn.ReLU(True)
def forward(self, ph1Feature, ph2Feature):
xUp = F.interpolate(ph2Feature, size=ph1Feature.shape[2:], mode='bilinear', align_corners=True)
xUp = self.dwconv(xUp)
xUp = self.upBranch(xUp)
xDown = self.downBranch(ph1Feature)
out = xUp + xDown
out = self.activation(out)
return out
# ph4
class Classifier(nn.Module):
'''
without upsample and softmax
'''
def __init__(self, inChannels, numClasses, stride=1):
super(Classifier, self).__init__()
# described in 3.2.4 Classifier, page 5
self.dsconv1 = _DSConv(inChannels, inChannels, stride)
self.dsconv2 = _DSConv(inChannels, inChannels, stride)
self.conv = nn.Conv2d(inChannels, numClasses, 1)
def forward(self, x):
x = self.dsconv1(x)
x = self.dsconv2(x)
x = self.conv(x)
return x
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
ntimes = 100
model = FastSCNN(4)
model.cuda()
model.eval()
with torch.no_grad():
x = torch.randn(1,3,320,320)
x = x.cuda()
# warmup
out = model(x)
start = time.time()
for i in range(ntimes):
model(x)
print('fps is :', 1.0/((time.time() - start)/ntimes)) | [
"torch.cat",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
] | 1.4.0 | zacario-li/Fast-SCNN_pytorch | c7ff081e3ed626fcf7fc752696a38431f9a00942 |
1.8 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from .fc_encoder import FcEncoder
class BiLSTMEncoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(BiLSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,
bidirectional=True, num_layers=1)
def forward(self, x, states=None):
'''
Parameters:
------------------------
x: input feature seqences
states: (h_0, c_0)
'''
r_out, (h_n, h_c) = self.rnn(x, states)
return r_out, (h_n, h_c)
class AttentiveLSTMEncoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(AttentiveLSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,
bidirectional=True, num_layers=1)
self.layer_norm = nn.LayerNorm(hidden_size * 2)
self.se = nn.Sequential(
nn.Conv1d(hidden_size*2, hidden_size // 2, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.Conv1d(hidden_size // 2, hidden_size // 2, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(hidden_size // 2, hidden_size*2, kernel_size=1),
nn.Sigmoid()
)
self.out_cnn = nn.Sequential(
nn.Conv1d(hidden_size*2, hidden_size*2, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.Conv1d(hidden_size*2, hidden_size*2, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x, states=None):
'''
Parameters:
------------------------
x: input feature seqences
states: (h_0, c_0)
'''
r_out, (h_n, h_c) = self.rnn(x, states)
# attn = self.se(r_out.transpose(1, 2))
# attn = attn.transpose(1, 2)
# return r_out * attn, (h_n, h_c)
return r_out, (h_n, h_c)
class LSTMEncoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(LSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,
num_layers=1)
def forward(self, x, states):
'''
Parameters:
------------------------
x: input feature seqences
states: (h_0, c_0)
'''
r_out, (h_n, h_c) = self.rnn(x, states)
return r_out, (h_n, h_c)
class BiLSTM_official_Encoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(BiLSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,
bidirectional=True, num_layers=1)
def forward(self, x):
'''
Parameters:
------------------------
x: input feature seqences
'''
r_out, (h_n, h_c) = self.rnn(x)
return r_out, (h_n, h_c)
class LSTM_official_Encoder(nn.Module):
def __init__(self, input_size, hidden_size):
super(LSTM_official_Encoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True,
num_layers=1)
def forward(self, x):
'''
Parameters:
------------------------
x: input feature seqences
'''
r_out, (h_n, h_c) = self.rnn(x)
return r_out, (h_n, h_c)
class FcLstmEncoder(nn.Module):
def __init__(self, input_size, hidden_size, bidirectional=False):
super(FcLstmEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.fc = FcEncoder(input_size, [hidden_size, hidden_size], dropout=0.1, dropout_input=False)
self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True,
num_layers=1, bidirectional=bidirectional)
def forward(self, x, states):
x = self.fc(x)
r_out, (h_n, h_c) = self.rnn(x, states)
return r_out, (h_n, h_c)
class AttentionFusionNet(nn.Module):
def __init__(self, a_dim, v_dim, l_dim, hidden_size):
super(AttentionFusionNet, self).__init__()
self.a_dim = a_dim
self.v_dim = v_dim
self.l_dim = l_dim
self.hidden_size = hidden_size
self.mapping = nn.Linear(self.hidden_size, self.hidden_size)
self.modality_context = nn.Parameter(torch.Tensor(self.hidden_size, 1))
self.modality_context.data.normal_(0, 0.05)
self.A_conv = nn.Conv1d(a_dim, hidden_size, kernel_size=1, padding=0)
self.V_conv = nn.Conv1d(v_dim, hidden_size, kernel_size=1, padding=0)
self.L_conv = nn.Conv1d(l_dim, hidden_size, kernel_size=1, padding=0)
self.rnn = self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True, )
def atten_embd(self, a_input, v_input, l_input):
a_input = a_input.unsqueeze(-2) # [batch_size, seq_len, 1, embd_dim]
v_input = v_input.unsqueeze(-2)
l_input = l_input.unsqueeze(-2)
data = torch.cat([a_input, v_input, l_input], dim=-2) # [batch_size, seq_len, 3, embd_dim]
batch_size, seq_len, _, embd_dim = data.size()
proj_data = torch.tanh(self.mapping(data)) # [batch_size, seq_len, 3, hidden_size]
weight = F.softmax(data @ self.modality_context, dim=-2) # [batch_size, seq_len, 3, 1]
fusion = torch.sum(data * weight, dim=-2)
return fusion
def forward(self, a_input, v_input, l_input, states):
'''
Input size [batch_size, seq_len, embd_dim]
'''
a_input = self.A_conv(a_input.transpose(1, 2)).permute(0, 2, 1)
v_input = self.V_conv(v_input.transpose(1, 2)).permute(0, 2, 1)
l_input = self.L_conv(l_input.transpose(1, 2)).permute(0, 2, 1)
fusion = self.atten_embd(a_input, v_input, l_input) # [batch_size, seq_len, embd_dim]
r_out, (h_n, h_c) = self.rnn(fusion, states)
return r_out, (h_n, h_c)
class AttentionFusionNet2(nn.Module):
def __init__(self, a_dim, v_dim, l_dim, hidden_size):
super(AttentionFusionNet2, self).__init__()
self.a_dim = a_dim
self.v_dim = v_dim
self.l_dim = l_dim
self.hidden_size = hidden_size
self.mapping = nn.Linear(self.hidden_size, self.hidden_size)
self.A_conv = nn.Conv1d(a_dim, hidden_size, kernel_size=1, padding=0)
self.V_conv = nn.Conv1d(v_dim, hidden_size, kernel_size=1, padding=0)
self.L_conv = nn.Conv1d(l_dim, hidden_size, kernel_size=1, padding=0)
self.context_proj = nn.Linear(3 * hidden_size, hidden_size)
self.rnn = self.rnn = nn.LSTM(self.hidden_size, self.hidden_size, batch_first=True, )
def atten_embd(self, a_input, v_input, l_input):
batch_size, seq_len, embd_dim = a_input.size()
context = torch.cat([a_input, v_input, l_input], dim=-1)
context = torch.tanh(self.context_proj(context)).view(-1, self.hidden_size, 1) # [batch_size * seq_len, hidden_size, 1]
_a_input = a_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]
_v_input = v_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]
_l_input = l_input.contiguous().view(batch_size * seq_len, 1, self.hidden_size) # [batch_size * seq_len, 1, hidden_size]
a_weight = torch.bmm(_a_input, context).view(batch_size, -1, 1) # [batch_size, seq_len, 1]
v_weight = torch.bmm(_v_input, context).view(batch_size, -1, 1)
l_weight = torch.bmm(_l_input, context).view(batch_size, -1, 1)
weight = torch.cat([a_weight, v_weight, l_weight], dim=-1) # [batch_size, seq_len, 3]
weight = F.softmax(weight, dim=-1).unsqueeze(-1)
data = torch.cat([a_input.unsqueeze(-2), v_input.unsqueeze(-2), l_input.unsqueeze(-2)], dim=-2)
fusion = torch.sum(data * weight, dim=-2)
return fusion
def forward(self, a_input, v_input, l_input, states):
'''
Input size [batch_size, seq_len, embd_dim]
'''
a_input = self.A_conv(a_input.transpose(1, 2)).permute(0, 2, 1)
v_input = self.V_conv(v_input.transpose(1, 2)).permute(0, 2, 1)
l_input = self.L_conv(l_input.transpose(1, 2)).permute(0, 2, 1)
fusion = self.atten_embd(a_input, v_input, l_input) # [batch_size, seq_len, embd_dim]
r_out, (h_n, h_c) = self.rnn(fusion, states)
return r_out, (h_n, h_c)
"""
class BiLSTMEncoder(nn.Module):
''' LSTM encoder
'''
def __init__(self, input_size, hidden_size, embd_size):
super(LSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.embd_size = embd_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, bidirectional=True, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(self.hidden_size*2, self.embd_size),
nn.ReLU(),
)
def forward(self, x, length):
batch_size = x.size(0)
# x = pack_padded_sequence(x, length, batch_first=True, enforce_sorted=False)
r_out, (h_n, h_c) = self.rnn(x)
h_n = h_n.contiguous().view(batch_size, -1)
embd = self.fc(h_n)
return embd
class LSTMEncoder(nn.Module):
''' one directional LSTM encoder
'''
def __init__(self, input_size, hidden_size, embd_method='last'):
super(LSTMEncoder, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.rnn = nn.LSTM(self.input_size, self.hidden_size, batch_first=True)
assert embd_method in ['maxpool', 'attention', 'last']
self.embd_method = embd_method
if self.embd_method == 'maxpool':
self.maxpool = nn.MaxPool1d(self.hidden_size)
elif self.embd_method == 'attention':
self.attention_vector_weight = nn.Parameter(torch.Tensor(hidden_size, 1))
self.attention_layer = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.Tanh(),
)
self.softmax = nn.Softmax(dim=-1)
def embd_attention(self, r_out, h_n):
''''
参考这篇博客的实现:
https://blog.csdn.net/dendi_hust/article/details/94435919
https://blog.csdn.net/fkyyly/article/details/82501126
论文:Hierarchical Attention Networks for Document Classification
formulation: lstm_output*softmax(u * tanh(W*lstm_output + Bias)
W and Bias 是映射函数,其中 Bias 可加可不加
u 是 attention vector 大小等于 hidden size
'''
hidden_reps = self.attention_layer(r_out) # [batch_size, seq_len, hidden_size]
atten_weight = (hidden_reps @ self.attention_vector_weight) # [batch_size, seq_len, 1]
atten_weight = self.softmax(atten_weight) # [batch_size, seq_len, 1]
# [batch_size, seq_len, hidden_size] * [batch_size, seq_len, 1] = [batch_size, seq_len, hidden_size]
sentence_vector = torch.sum(r_out * atten_weight, dim=1) # [batch_size, hidden_size]
return sentence_vector
def embd_maxpool(self, r_out, h_n):
embd = self.maxpool(r_out.transpose(1,2)) # r_out.size()=>[batch_size, seq_len, hidden_size]
# r_out.transpose(1, 2) => [batch_size, hidden_size, seq_len]
return embd.squeeze()
def embd_last(self, r_out, h_n):
#Just for one layer and single direction
return h_n.squeeze()
def forward(self, x):
'''
r_out shape: seq_len, batch, num_directions * hidden_size
hn and hc shape: num_layers * num_directions, batch, hidden_size
'''
r_out, (h_n, h_c) = self.rnn(x)
embd = getattr(self, 'embd_'+self.embd_method)(r_out, h_n)
return embd
"""
if __name__ == '__main__':
# model = AttentionFusionNet2(100, 200, 300, 128)
# a_input = torch.rand(12, 30, 100)
# v_input = torch.rand(12, 30, 200)
# l_input = torch.rand(12, 30, 300)
# state = (torch.zeros(1, 12, 128), torch.zeros(1, 12, 128))
# r_out, (h_n, h_c) = model(a_input, v_input, l_input, state)
# print(r_out.shape)
model = AttentiveLSTMEncoder(345, 256)
input = torch.rand(32, 300, 345)
out, _ = model(input)
print(out.shape) | [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.nn.LayerNorm",
"torch.nn.LSTM",
"torch.nn.Conv1d",
"torch.nn.Sigmoid",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.functional.softmax",
"torch.Tensor",
"torch.sum"
] | 1.8.0 | AIM3-RUC/ABAW2022 | f1d25dc9914cc6768e58c14cea893c8e00b541bd |
1.1 | # Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example of building a reinforcement learning based,
data augmentation enhanced sentence classifier
based on pre-trained BERT model.
"""
import argparse
import functools
import logging
import os
import torch
import torch.nn.functional as F
import texar.torch as tx
from transformers import BertForMaskedLM
from config import config_data, config_classifier
from utils import model_utils
from forte.models.da_rl import MetaAugmentationWrapper, TexarBertMetaModule
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained-model-name",
type=str,
default="bert-base-uncased",
choices=tx.modules.BERTEncoder.available_checkpoints(),
help="Name of the pre-trained downstream checkpoint to load.",
)
parser.add_argument(
"--output-dir",
default="output/",
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--do-train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do-eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do-test",
action="store_true",
help="Whether to run test on the test set.",
)
parser.add_argument(
"--augmentation-model-name",
type=str,
default="bert-base-uncased",
choices=tx.modules.BERTEncoder.available_checkpoints(),
help="Name of the pre-trained augmentation model checkpoint to load.",
)
parser.add_argument(
"--num-aug",
type=int,
default=4,
help="number of augmentation samples when fine-tuning aug model",
)
parser.add_argument(
"--classifier-pretrain-epoch",
type=int,
default=10,
help="number of epochs to pretrain the classifier",
)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.root.setLevel(logging.INFO)
class RLAugmentClassifierTrainer:
def __init__(self):
self._prepare_data_iterator()
self._init_aug_model()
self._init_classifier()
def _prepare_data_iterator(self):
tx.utils.maybe_create_dir(args.output_dir)
# Loads data
num_train_data = config_data.num_train_data
self.num_train_steps = int(
num_train_data
/ config_data.train_batch_size
* config_data.max_train_epoch
)
train_dataset = tx.data.RecordData(
hparams=config_data.train_hparam, device=device
)
val_dataset = tx.data.RecordData(
hparams=config_data.eval_hparam, device=device
)
test_dataset = tx.data.RecordData(
hparams=config_data.test_hparam, device=device
)
self.iterator = tx.data.DataIterator(
{"train": train_dataset, "dev": val_dataset, "test": test_dataset}
)
self.val_data_iterator = tx.data.DataIterator({"dev": val_dataset})
self.val_data_iterator.switch_to_dataset("dev")
def _init_aug_model(self):
# pylint: disable=protected-access
# Builds data augmentation BERT
aug_model = BertForMaskedLM.from_pretrained(
args.augmentation_model_name
)
aug_model.to(device)
aug_tokenizer = tx.data.BERTTokenizer(
pretrained_model_name=args.augmentation_model_name
)
input_mask_ids = aug_tokenizer._map_token_to_id("[MASK]")
# Builds augmentation optimizer
aug_lr = 4e-5
param_optimizer = list(aug_model.named_parameters())
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in param_optimizer
if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p
for n, p in param_optimizer
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
aug_optim = tx.core.BertAdam(
optimizer_grouped_parameters,
betas=(0.9, 0.999),
eps=1e-6,
lr=aug_lr,
)
# Builds data augmentation wrapper
self.aug_wrapper = MetaAugmentationWrapper(
aug_model, aug_optim, input_mask_ids, device, args.num_aug
)
def _init_classifier(self):
# Builds BERT for classification task.
config_downstream = {
k: v
for k, v in config_classifier.__dict__.items()
if not k.startswith("__") and k != "hyperparams"
}
self.classifier = tx.modules.BERTClassifier(
pretrained_model_name=args.pretrained_model_name,
hparams=config_downstream,
)
self.classifier.to(device)
# Builds learning rate decay scheduler
classifier_lr = 4e-5
vars_with_decay = []
vars_without_decay = []
for name, param in self.classifier.named_parameters():
if "layer_norm" in name or name.endswith("bias"):
vars_without_decay.append(param)
else:
vars_with_decay.append(param)
opt_params = [
{
"params": vars_with_decay,
"weight_decay": 0.01,
},
{
"params": vars_without_decay,
"weight_decay": 0.0,
},
]
self.optim = tx.core.BertAdam(
opt_params, betas=(0.9, 0.999), eps=1e-6, lr=classifier_lr
)
num_warmup_steps = int(
self.num_train_steps * config_data.warmup_proportion
)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optim,
functools.partial(
model_utils.get_lr_multiplier,
total_steps=self.num_train_steps,
warmup_steps=num_warmup_steps,
),
)
def pre_train_classifier_epoch(self):
r"""Pre-trains model on the training set
for better weight initialization.
"""
self.iterator.switch_to_dataset("train")
self.classifier.train()
for _ in range(args.classifier_pretrain_epoch):
for batch in self.iterator:
self.optim.zero_grad()
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, _ = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
loss.backward()
self.optim.step()
self.scheduler.step()
def train_epoch(self):
r"""Trains on the training set, and evaluates on the validation set
periodically.
"""
self.iterator.switch_to_dataset("train")
self.classifier.train()
self.optim.zero_grad()
for batch in self.iterator:
input_ids = batch["input_ids"]
input_mask = batch["input_mask"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
# Train augmentation model params phi.
self.aug_wrapper.reset_model()
# Iterate over training instances.
num_instances = len(input_ids)
for i in range(num_instances):
features = (
input_ids[i],
input_mask[i],
segment_ids[i],
labels[i],
)
# Augmented instance with params phi exposed
(
aug_probs,
input_mask_aug,
segment_ids_aug,
label_ids_aug,
) = self.aug_wrapper.augment_instance(features)
# Compute classifier loss.
self.classifier.zero_grad()
input_length_aug = ((input_mask_aug == 1).int()).sum(dim=1)
logits, _ = self.classifier(
aug_probs, input_length_aug, segment_ids_aug
)
loss = self._compute_loss(logits, label_ids_aug)
# Update classifier params on meta_model.
meta_model = TexarBertMetaModule(self.classifier)
meta_model = self.aug_wrapper.update_meta_model(
meta_model, loss, self.classifier, self.optim
)
# Compute grads of aug_model on validation data.
for val_batch in self.val_data_iterator: # one batch
val_input_ids = val_batch["input_ids"]
val_segment_ids = val_batch["segment_ids"]
val_labels = val_batch["label_ids"]
val_input_length = (1 - (val_input_ids == 0).int()).sum(
dim=1
)
val_logits, _ = meta_model(
val_input_ids, val_input_length, val_segment_ids
)
val_loss = self._compute_loss(val_logits, val_labels)
val_loss = (
val_loss
/ num_instances
/ args.num_aug
/ len(self.val_data_iterator)
)
val_loss.backward()
# Update aug_model param phi.
self.aug_wrapper.update_phi()
# Train classifier with augmented batch
(
input_probs,
input_masks,
segment_ids,
label_ids,
) = self.aug_wrapper.augment_batch(
(input_ids, input_mask, segment_ids, labels)
)
input_length = ((input_masks == 1).int()).sum(dim=1)
self.optim.zero_grad()
logits, _ = self.classifier(input_probs, input_length, segment_ids)
loss = self._compute_loss(logits, label_ids)
loss.backward()
self.optim.step()
self.scheduler.step()
self._display_logging(loss)
@torch.no_grad()
def eval_epoch(self):
"""Evaluates on the dev set."""
self.iterator.switch_to_dataset("dev")
self.classifier.eval()
nsamples = 0
avg_rec = tx.utils.AverageRecorder()
for batch in self.iterator:
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, preds = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
accu = tx.evals.accuracy(labels, preds)
batch_size = input_ids.size()[0]
avg_rec.add([accu, loss], batch_size)
nsamples += batch_size
logging.info(
"eval accu: %.4f; loss: %.4f; nsamples: %d",
avg_rec.avg(0),
avg_rec.avg(1),
nsamples,
)
@torch.no_grad()
def test_epoch(self, test_file):
"""Does predictions on the test set."""
self.iterator.switch_to_dataset("test")
self.classifier.eval()
_all_preds = []
nsamples = 0
avg_rec = tx.utils.AverageRecorder()
for batch in self.iterator:
input_ids = batch["input_ids"]
segment_ids = batch["segment_ids"]
labels = batch["label_ids"]
input_length = (1 - (input_ids == 0).int()).sum(dim=1)
logits, preds = self.classifier(
input_ids, input_length, segment_ids
)
loss = self._compute_loss(logits, labels)
accu = tx.evals.accuracy(labels, preds)
batch_size = input_ids.size()[0]
avg_rec.add([accu, loss], batch_size)
nsamples += batch_size
_all_preds.extend(preds.tolist())
logging.info(
"test accu: %.4f; loss: %.4f; nsamples: %d",
avg_rec.avg(0),
avg_rec.avg(1),
nsamples,
)
output_file = os.path.join(args.output_dir, test_file)
with open(output_file, "w+") as writer:
writer.write("\n".join(str(p) for p in _all_preds))
logging.info("test output written to %s", output_file)
def _compute_loss(self, logits, labels):
r"""Compute loss."""
if self.classifier.is_binary:
loss = F.binary_cross_entropy(
logits.view(-1), labels.view(-1), reduction="mean"
)
else:
loss = F.cross_entropy(
logits.view(-1, self.classifier.num_classes),
labels.view(-1),
reduction="mean",
)
return loss
def _display_logging(self, loss):
step = self.scheduler.last_epoch
dis_steps = config_data.display_steps
if dis_steps > 0 and step % dis_steps == 0:
logging.info("step: %d; loss: %f", step, loss)
eval_steps = config_data.eval_steps
if eval_steps > 0 and step % eval_steps == 0:
self._eval_epoch()
self.classifier.train()
def main():
trainer = RLAugmentClassifierTrainer()
trainer.pre_train_classifier_epoch()
if args.do_train:
for k in range(config_data.max_train_epoch):
logging.info("training epoch %d", k)
trainer.train_epoch()
if args.do_eval:
trainer.eval_epoch()
if args.do_test:
trainer.test_epoch("test_results.tsv")
if __name__ == "__main__":
main()
| [
"torch.no_grad",
"torch.cuda.is_available"
] | 1.1.0 | jzpang/forte | 489fb9cafba6faf5739bda935836b61b5e3d02b6 |
1.8 | import torch
# from torch.autograd import Variable
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.weight_norm import WeightNorm
from Batchtransfer_EMA import BatchInstanceTransNorm as BIT2d
def init_layer(L):
# Initialization using fan-in
if isinstance(L, nn.Conv2d):
n = L.kernel_size[0]*L.kernel_size[1]*L.out_channels
L.weight.data.normal_(0,math.sqrt(2.0/float(n)))
elif isinstance(L, BIT2d):
L.weight.data.fill_(1)
L.bias.data.fill_(0)
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
# Simple ResNet Block
class SimpleBlock(nn.Module):
maml = False #Default
def __init__(self, indim, outdim, half_res):
super(SimpleBlock, self).__init__()
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, outdim, kernel_size=3, stride=2 if half_res else 1, padding=1, bias=False)
self.BN1 = BIT2d(outdim)
self.C2 = nn.Conv2d(outdim, outdim,kernel_size=3, padding=1,bias=False)
self.BN2 = BIT2d(outdim)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.parametrized_layers = [self.C1, self.C2, self.BN1, self.BN2]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, 2 if half_res else 1, bias=False)
self.BNshortcut = BIT2d(outdim)
self.parametrized_layers.append(self.shortcut)
self.parametrized_layers.append(self.BNshortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
out = self.C1(x)
out = self.BN1(out)
out = self.relu1(out)
out = self.C2(out)
out = self.BN2(out)
short_out = x if self.shortcut_type == 'identity' else self.BNshortcut(self.shortcut(x))
out = out + short_out
out = self.relu2(out)
return out
# Bottleneck block
class BottleneckBlock(nn.Module):
def __init__(self, indim, outdim, half_res):
super(BottleneckBlock, self).__init__()
bottleneckdim = int(outdim/4)
self.indim = indim
self.outdim = outdim
self.C1 = nn.Conv2d(indim, bottleneckdim, kernel_size=1, bias=False)
self.BN1 = BIT2d(bottleneckdim)
self.C2 = nn.Conv2d(bottleneckdim, bottleneckdim, kernel_size=3, stride=2 if half_res else 1,padding=1)
self.BN2 = BIT2d(bottleneckdim)
self.C3 = nn.Conv2d(bottleneckdim, outdim, kernel_size=1, bias=False)
self.BN3 = BIT2d(outdim)
self.relu = nn.ReLU()
self.parametrized_layers = [self.C1, self.BN1, self.C2, self.BN2, self.C3, self.BN3]
self.half_res = half_res
# if the input number of channels is not equal to the output, then need a 1x1 convolution
if indim!=outdim:
self.shortcut = nn.Conv2d(indim, outdim, 1, stride=2 if half_res else 1, bias=False)
self.parametrized_layers.append(self.shortcut)
self.shortcut_type = '1x1'
else:
self.shortcut_type = 'identity'
for layer in self.parametrized_layers:
init_layer(layer)
def forward(self, x):
short_out = x if self.shortcut_type == 'identity' else self.shortcut(x)
out = self.C1(x)
out = self.BN1(out)
out = self.relu(out)
out = self.C2(out)
out = self.BN2(out)
out = self.relu(out)
out = self.C3(out)
out = self.BN3(out)
out = out + short_out
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,block,list_of_num_layers, list_of_out_dims, flatten = False):
# list_of_num_layers specifies number of layers in each stage
# list_of_out_dims specifies number of output channel for each stage
super(ResNet,self).__init__()
assert len(list_of_num_layers)==4, 'Can have only four stages'
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
bn1 = BIT2d(64)
relu = nn.ReLU()
pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
init_layer(conv1)
init_layer(bn1)
trunk = [conv1, bn1, relu, pool1]
indim = 64
for i in range(4):
for j in range(list_of_num_layers[i]):
half_res = (i>=1) and (j==0)
B = block(indim, list_of_out_dims[i], half_res)
trunk.append(B)
indim = list_of_out_dims[i]
if flatten:
# avgpool = nn.AvgPool2d(7)
avgpool = nn.AdaptiveAvgPool2d((1, 1))
trunk.append(avgpool)
trunk.append(Flatten())
self.final_feat_dim = indim
else:
self.final_feat_dim = [ indim, 7, 7]
self.trunk = nn.Sequential(*trunk)
def forward(self,x):
out = self.trunk(x)
return out
def ResNet10_BITrans( flatten = True):
return ResNet(SimpleBlock, [1,1,1,1],[64,128,256,512], flatten)
| [
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.8.1 | MosyMosy/VDT | e07f28d0cd6367ed30740c147ed2f270ead8fb63 |
1.8 | # -*- coding: utf-8 -*-
# @Time : 2021/5/29
# @Author : Lart Pang
# @GitHub : https://github.com/lartpang
from functools import partial
from torch.utils import data
from utils import builder, misc
def get_tr_loader(cfg, shuffle=True, drop_last=True, pin_memory=True):
dataset = builder.build_obj_from_registry(
registry_name="DATASETS",
obj_name=cfg.datasets.train.dataset_type,
obj_cfg=dict(
root=[(name, path) for name, path in cfg.datasets.train.path.items()],
shape=cfg.datasets.train.shape,
extra_scales=cfg.train.ms.extra_scales if cfg.train.ms.enable else None,
interp_cfg=cfg.datasets.train.get("interp_cfg", None),
),
)
if cfg.use_ddp:
train_sampler = data.distributed.DistributedSampler(dataset, shuffle=shuffle)
shuffle = False
else:
train_sampler = None
shuffle = shuffle
if cfg.train.ms.enable:
collate_fn = getattr(dataset, "collate_fn", None)
assert collate_fn is not None
else:
collate_fn = None
loader = data.DataLoader(
dataset=dataset,
batch_size=cfg.train.batch_size,
sampler=train_sampler,
shuffle=shuffle,
num_workers=cfg.train.num_workers,
drop_last=drop_last,
pin_memory=pin_memory,
collate_fn=collate_fn,
worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)
if cfg.use_custom_worker_init
else None,
)
print(f"Length of Trainset: {len(dataset)}")
return loader
def get_te_loader(cfg, shuffle=False, drop_last=False, pin_memory=True) -> list:
for i, (te_data_name, te_data_path) in enumerate(cfg.datasets.test.path.items()):
dataset = builder.build_obj_from_registry(
registry_name="DATASETS",
obj_name=cfg.datasets.test.dataset_type,
obj_cfg=dict(
root=(te_data_name, te_data_path),
shape=cfg.datasets.test.shape,
interp_cfg=cfg.datasets.test.get("interp_cfg", None),
),
)
loader = data.DataLoader(
dataset=dataset,
batch_size=cfg.test.batch_size,
num_workers=cfg.test.num_workers,
shuffle=shuffle,
drop_last=drop_last,
pin_memory=pin_memory,
collate_fn=getattr(dataset, "collate_fn", None),
worker_init_fn=partial(misc.customized_worker_init_fn, base_seed=cfg.base_seed)
if cfg.use_custom_worker_init
else None,
)
print(f"Testing with testset: {te_data_name}: {len(dataset)}")
yield te_data_name, te_data_path, loader
| [
"torch.utils.data.distributed.DistributedSampler"
] | 1.8.1 | lartpang/ZoomNet | 1f329e80db5469eaf6a513ec384cd19bafdaece2 |
1.0 | """
Contains classes for the comparison of models on the MNIST dataset.
Main models:
- MLPNet: Feed forward NN with linear layers
- SPNNet: Same as MLPNet but replaces certain layers with SPNLayer
- SPNNeuron: Defines the SPN architecture of a single neuron in a SPNLayer
"""
import logging
import time
import numpy as np
import torch
from torch import distributions as dist
from torch import nn
from torch.nn import functional as F
from src.models.pytorch import GaussianNode, ProductNode, SumNode
from src.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152
from src.spn.distributions import Normal
from src.spn import layers
from src.utils.utils import count_params
from src.utils.utils import time_delta_now
logger = logging.getLogger(__name__)
###########
# Neurons #
###########
class SPNNeuronShallow(nn.Module):
"""
SPN Neuron implementation using the vectorized layers.
"""
def __init__(self, in_features, n_gaussians=3):
"""
Initialize the SPNNeuronShallow.
Args:
in_features: Number of input features.
n_gaussians: Number of different pairwise independence mixtures of the leaf nodes.
"""
# Init
super().__init__()
self.n_gaussians = n_gaussians
self.in_features = in_features
self.gauss = Normal(multiplicity=3, in_features=in_features)
self.prod = layers.Product(in_features=128, cardinality=2)
self.sum = layers.Sum(in_channels=3, in_features=64, out_channels=1)
self.out = layers.Product(in_features=64, cardinality=64)
# Randomize features
self.rand_idxs = torch.tensor(np.random.permutation(in_features))
def forward(self, x):
# Random permutation
x = x[:, self.rand_idxs]
x = self.gauss(x)
x = self.prod(x)
x = self.sum(x)
x = self.out(x)
return x
class SPNNeuronBig(nn.Module):
"""
SPN Neuron implementation using the vectorized layers.
"""
def __init__(self, in_features, n_gaussians=3):
"""
Initialize the SPNNeuron.
Args:
in_features: Number of input features.
n_gaussians: Number of different pairwise independence mixtures of the leaf nodes.
"""
# Init
super().__init__()
self.n_gaussians = n_gaussians
self.in_features = in_features
ch = [2 ** i for i in range(4, 0, -1)]
# self.spn = nn.Sequential(
# Normal(multiplicity=n_gaussians, in_channels=1, in_features=in_features),
# layers.Sum(
# in_channels=n_gaussians, in_features=in_features, out_channels=ch[0]
# ),
# layers.Product(in_features=in_features, cardinality=2),
# layers.Sum(
# in_channels=ch[0], in_features=in_features / 2, out_channels=ch[1]
# ),
# layers.Product(in_features=in_features / 2, cardinality=2),
# layers.Sum(
# in_channels=ch[1], in_features=in_features / 4, out_channels=ch[2]
# ),
# layers.Product(in_features=in_features / 4, cardinality=2),
# layers.Sum(
# in_channels=ch[2], in_features=in_features / 8, out_channels=ch[3]
# ),
# layers.Product(in_features=in_features / 8, cardinality=2),
# layers.Sum(in_channels=ch[3], in_features=in_features / 16, out_channels=1),
# layers.Product(in_features=in_features / 16, cardinality=in_features // 16),
# )
ch = [1, 1, 1, 1]
card = 5
self.spn = nn.Sequential(
Normal(multiplicity=n_gaussians, in_features=in_features),
layers.Sum(
in_channels=n_gaussians, in_features=in_features, out_channels=ch[0]
),
layers.Product(in_features=in_features, cardinality=card),
layers.Sum(
in_channels=ch[0], in_features=in_features / 5, out_channels=ch[1]
),
layers.Product(in_features=in_features / 5, cardinality=card),
layers.Sum(
in_channels=ch[1], in_features=in_features / 5 ** 2, out_channels=ch[2]
),
layers.Product(in_features=in_features / 5 ** 2, cardinality=card),
layers.Sum(
in_channels=ch[2], in_features=in_features / 5 ** 3, out_channels=ch[3]
),
layers.Product(in_features=in_features / 5 ** 3, cardinality=card),
layers.Sum(
in_channels=ch[3], in_features=in_features / 5 ** 4, out_channels=1
),
layers.Product(
in_features=in_features / 5 ** 4, cardinality=in_features // 5 ** 4
),
)
# Randomize features
self.rand_idxs = torch.tensor(np.random.permutation(in_features))
def forward(self, x):
x = x[:, self.rand_idxs]
x = self.spn(x)
return x
# class SPNNeuron(nn.Module):
# def __init__(self, in_features, n_gaussians=3):
# """
# Initialize the SPNNeuron.
# Args:
# in_features: Number of input features.
# n_gaussians: Number of different pairwise independence mixtures of the leaf nodes.
# """
# # Init
# super().__init__()
# self.n_gaussians = n_gaussians
# self.in_features = in_features
# # Create gaussian means and stds
# self.means = nn.Parameter(torch.randn(n_gaussians, 1, in_features))
# self.stds = nn.Parameter(torch.rand(n_gaussians, 1, in_features))
# self.gauss = dist.Normal(loc=self.means, scale=self.stds)
# # Create random sequence of scopes
# scopes = np.random.permutation(in_features)
# # sums = []
# self.product_scopes_left = []
# self.product_scopes_right = []
# # Sum weights
# self.sum_weights = nn.Parameter(torch.rand(n_gaussians, int(in_features / 2)))
# # For two consecutive (random) scopes
# for i in range(0, in_features, 2):
# # Collect scopes
# self.product_scopes_left.append(scopes[i])
# self.product_scopes_right.append(scopes[i + 1])
# def forward(self, x, marginals=[]):
# # First apply gaussian
# # Expand x to match the gaussian mean/std matrix
# batch_size = x.shape[0]
# x = x.expand([self.n_gaussians, batch_size, self.in_features])
# x = self.gauss.log_prob(x)
# # Marginalize certain leaf nodes: Set likelihood of the leaf to 1 (log(1)=0)
# x[:, :, marginals] = 0.0
# # Current dimensions: n_gaussian x batch_size x in_features
# # ______
# # / /|
# # /_____/ |
# # | | /
# # | | /
# # |____|/
# # Apply products between features i and j: X[:, :, i] * X[:, :, j] ( sum in logspace )
# x = x[:, :, self.product_scopes_left] + x[:, :, self.product_scopes_right]
# # Current dimensions: n_gaussian x batch_size x in_features / 2
# # ______
# # / /|
# # /_____/ |
# # | | /
# # | | /
# # |____|/
# # Apply sum over the n_gaussian axis (dim=0)
# x = torch.logsumexp(x + torch.log(self.sum_weights.unsqueeze(1)), dim=0)
# # Current dimensions: batch_size x in_features / 2
# # ____
# # | |
# # | |
# # |____|
# # Apply product over all features ( sum in logspace )
# x = torch.sum(x, dim=1)
# return x
# class MaxOutSpnNeuron(nn.Module):
# def __init__(self, in_features, n_gaussians=3):
# """
# Initialize the SPNNeuron.
# Args:
# in_features: Number of input features.
# n_gaussians: Number of different pairwise independence mixtures of the leaf nodes.
# """
# # Init
# super(MaxOutSpnNeuron, self).__init__()
# self.n_gaussians = n_gaussians
# self.in_features = in_features
# # Create gaussian means and stds
# self.means = nn.Parameter(torch.randn(n_gaussians, 1, in_features))
# self.stds = nn.Parameter(torch.rand(n_gaussians, 1, in_features))
# self.gauss = dist.Normal(loc=self.means, scale=self.stds)
# # Create random sequence of scopes
# scopes = np.random.permutation(in_features)
# # sums = []
# self.product_scopes_left = []
# self.product_scopes_right = []
# # Sum weights
# self.sum_weights = nn.Parameter(torch.rand(n_gaussians, int(in_features / 2)))
# # For two consecutive (random) scopes
# for i in range(0, in_features, 2):
# # Collect scopes
# self.product_scopes_left.append(scopes[i])
# self.product_scopes_right.append(scopes[i + 1])
# def forward(self, x, marginals=[]):
# # First apply gaussian
# # Expand x to match the gaussian mean/std matrix
# batch_size = x.shape[0]
# x = x.expand([self.n_gaussians, batch_size, self.in_features])
# x = self.gauss.log_prob(x)
# # Marginalize certain leaf nodes: Set likelihood of the leaf to 1 (log(1)=0)
# x[:, :, marginals] = 0.0
# # Current dimensions: n_gaussian x batch_size x in_features
# # ______
# # / /|
# # /_____/ |
# # | | /
# # | | /
# # |____|/
# # Apply products between features i and j: X[:, :, i] * X[:, :, j] ( sum in logspace )
# x = x[:, :, self.product_scopes_left] + x[:, :, self.product_scopes_right]
# # Current dimensions: n_gaussian x batch_size x in_features / 2
# # ______
# # / /|
# # /_____/ |
# # | | /
# # | | /
# # |____|/
# # The above is similar to the maxout approch but instead returns a weighted sum for each scope
# x, _ = torch.max(x + torch.log(self.sum_weights.unsqueeze(1)), dim=0)
# # x, _ = torch.max(x, dim=0)
# # Current dimensions: batch_size x in_features / 2
# # ____
# # | |
# # | |
# # |____|
# # Apply product over all features ( sum in logspace )
# x = torch.sum(x, dim=1)
# return x
# class ConditionalSPNNeuron(nn.Module):
# """
# Maps each input feature to the likeliood of that feature, given all other features:
# z_i = P(x_i | X \ {x_i})
# Dimension in: N, dimension out: N
# """
# def __init__(self, in_features: int):
# # Init
# super(ConditionalSPNNeuron, self).__init__()
# self.spn = SPNNeuron(in_features=in_features)
# self.in_features = in_features
# def forward(self, x):
# x_full_pass = self.spn(x)
# x_marginalized = [self.spn(x, i) for i in range(self.in_features)]
# x_stacked = torch.stack(x_marginalized, dim=1)
# x_conditional = x_full_pass.view(-1, 1) - x_stacked
# return x_conditional
# class SPNNeuronOld(nn.Module):
# def __init__(self, in_features, n_mv=2):
# """
# Initialize the SPNNeuron.
# Args:
# in_features: Number of input features.
# n_mv: Number of different pairwise independence mixtures of the leaf nodes.
# """
# # Init
# super(SPNNeuronOld, self).__init__()
# # Create random sequence of scopes
# scopes = np.random.permutation(in_features)
# sums = []
# # For two consecutive (random) scopes
# for i in range(0, in_features, 2):
# scope_1 = scopes[i]
# scope_2 = scopes[i + 1]
# # Create n_mv MultivariateGaussian from these two scopes
# mvs = []
# for _ in range(n_mv):
# # TODO: MVG are currently not trainable
# # mv = MultivariateGaussian(n_vars=2, scope=[scope_1, scope_2])
# # mvs.append(mv)
# g1 = GaussianNode(scope=scope_1)
# g2 = GaussianNode(scope=scope_2)
# prod = ProductNode([g1, g2])
# mvs.append(prod)
# sumnode = SumNode(children=mvs)
# sums.append(sumnode)
# self.root = ProductNode(children=sums)
# def forward(self, x):
# x = self.root(x)
# return x
##########
# Layers #
##########
class SPNOutLayer(nn.Module):
"""
A PyTorch module that contains multiple SPNs with the same structure and treats them as single nodes in a layer.
"""
def __init__(self, neuron: nn.Module, in_features: int, n_labels: int):
"""
Initialize the SPNLayer.
Args:
in_features (int): Number of input features for this layer.
n_labels (int): Number of output labels for this layer.
"""
super().__init__()
# Create out_features number of SPNNeurons
neurons = [neuron(in_features) for _ in range(n_labels)]
self.spns = nn.ModuleList(neurons)
self.class_weights_log = nn.Parameter(
torch.log(torch.ones(n_labels) / n_labels), requires_grad=False
)
self.n_labels = n_labels
def forward(self, x):
# Feed forward each neuron and stack the results
spn_results = [spn(x) for spn in self.spns]
x = torch.stack(spn_results, dim=1)
x.squeeze_(2)
# Normalize: S(y=i | x) = S_i(x) * w_i / sum_i { w_i p_i(x) }
# In logspace: S(y=i | x) = S_i(x) + log(w_i) - logsumexp{ log(w_i) + p_i(x) }
# w_i = 1/n_labels
z = torch.logsumexp(self.class_weights_log + x, dim=1).view(x.shape[0], 1)
y = x + self.class_weights_log - z
return x
class SPNLayer(nn.Module):
"""
A PyTorch module that contains multiple SPNs with the same structure and treats them as single nodes in a layer.
"""
def __init__(self, neuron: nn.Module, in_features: int, out_features: int):
"""
Initialize the SPNLayer.
Args:
in_features (int): Number of input features for this layer.
out_features (int): Number of output features for this layer.
"""
super(SPNLayer, self).__init__()
# Create out_features number of SPNNeurons
neurons = [neuron(in_features) for _ in range(out_features)]
self.spns = nn.ModuleList(neurons)
self.bn = nn.BatchNorm1d(out_features)
def forward(self, x):
# Feed forward each neuron and stack the results
spn_results = [spn(x) for spn in self.spns]
x = torch.stack(spn_results, dim=1)
x = x.squeeze()
x = self.bn(x)
return x
############
# Networks #
############
class ResNet(nn.Module):
def __init__(self, in_features, n_labels, resnet_arch=resnet18, in_channels=1):
"""
Resnet.
Args:
in_features: Number of input features.
n_labels: Number of output labels.
resnet_arch: Resnet architecture.
"""
super(ResNet, self).__init__()
self.n_labels = n_labels
self.resnet = resnet_arch(
pretrained=False, num_classes=128, in_channels=in_channels
)
self.linear = nn.Linear(128, n_labels)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
def forward(self, x):
x = F.relu(self.resnet(x))
x = self.linear(x)
return x.sigmoid()
class ResNetCifar10(nn.Module):
def __init__(self, in_features, n_labels, resnet_arch=resnet18, in_channels=3):
"""
Resnet.
Args:
in_features: Number of input features.
n_labels: Number of output labels.
resnet_arch: Resnet architecture.
"""
super().__init__()
self.n_labels = n_labels
self.resnet = resnet_arch(
pretrained=False, num_classes=128, in_channels=in_channels
)
self.linear = nn.Linear(128, n_labels)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
def forward(self, x):
x = F.relu(self.resnet(x))
x = self.linear(x)
return x
# class SPNResnetParallel(nn.Module):
# def __init__(self, in_features, n_labels, resnet_arch):
# """
# Apply Resnet and SPN on the input in parallel, merge results and classify with 256 dimensional linear layer
# afterwards.
# Args:
# in_features: Number of input features.
# n_labels: Number of output labels.
# resnet_arch: Resnet architecture.
# """
# super(SPNResnetParallel, self).__init__()
# self.n_labels = n_labels
# self.resnet = resnet18(pretrained=False, num_classes=128, in_channels=1)
# self.mid = SPNLayer(neuron=SPNNeuron, in_features=in_features, out_features=128)
# self.linear = nn.Linear(128 * 2, n_labels)
# for m in self.modules():
# if isinstance(m, nn.Linear):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.Conv2d):
# nn.init.xavier_normal_(m.weight)
# def forward(self, x):
# x_resnet = F.relu(self.resnet(x))
# x_spn = self.mid(x.view(x.shape[0], -1))
# x_concat = torch.cat([x_resnet, x_spn], dim=1)
# x = self.linear(x_concat)
# return x.sigmoid()
class SPNNetPure(nn.Module):
def __init__(self, in_features, n_labels, spnneuron=SPNNeuronShallow):
"""
Apply SPN on input and directly produce the output.
Args:
in_features: Number of input features.
n_labels: Number of output labels.
"""
super().__init__()
self.n_labels = n_labels
self.mid = SPNLayer(
neuron=spnneuron, in_features=in_features, out_features=n_labels
)
def forward(self, x):
x = x.view(x.shape[0], -1)
x = self.mid(x)
return x.sigmoid()
class SPNPosteriorNet(nn.Module):
def __init__(
self,
in_features,
n_labels,
resnet_arch=resnet18,
spnneuron=SPNNeuronShallow,
in_channels=1,
):
"""
Apply Resnet and SPN sequentially.
SPN models the posterior distribution P(Y | X)
Args:
in_features: Number of input features.
n_labels: Number of output labels.
resnet_arch: Resnet architecture.
spnneuron: SPN neuron type that defines the SPN architecture.
"""
super().__init__()
self.n_labels = n_labels
self.resnet = resnet_arch(
pretrained=False, num_classes=128, in_channels=in_channels
)
self.mid = SPNOutLayer(neuron=spnneuron, in_features=128, n_labels=n_labels)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
def forward(self, x):
x = F.relu(self.resnet(x))
x = self.mid(x)
return x
class SPNNet(nn.Module):
def __init__(
self,
in_features,
n_labels,
resnet_arch=resnet18,
spnneuron=SPNNeuronShallow,
in_channels=1,
):
"""
Apply Resnet and SPN sequentially.
Args:
in_features: Number of input features.
n_labels: Number of output labels.
resnet_arch: Resnet architecture.
spnneuron: SPN neuron type that defines the SPN architecture.
"""
super().__init__()
self.n_labels = n_labels
self.resnet = resnet_arch(
pretrained=False, num_classes=128, in_channels=in_channels
)
self.mid = SPNLayer(neuron=spnneuron, in_features=128, out_features=n_labels)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
def forward(self, x):
x = F.relu(self.resnet(x))
x = self.mid(x)
return x.sigmoid()
class SPNNetCifar10(nn.Module):
def __init__(
self,
in_features,
n_labels,
resnet_arch=resnet18,
spnneuron=SPNNeuronShallow,
in_channels=3,
):
"""
Apply Resnet and SPN sequentially.
Args:
in_features: Number of input features.
n_labels: Number of output labels.
resnet_arch: Resnet architecture.
spnneuron: SPN neuron type that defines the SPN architecture.
"""
super().__init__()
self.n_labels = n_labels
self.resnet = resnet_arch(
pretrained=False, num_classes=128, in_channels=in_channels
)
self.mid = SPNOutLayer(neuron=spnneuron, in_features=128, n_labels=n_labels)
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight)
def forward(self, x):
x = F.relu(self.resnet(x))
x = self.mid(x)
return x
def get_model_by_tag(
tag, device, args, in_features, n_labels, in_channels=1
) -> nn.Module:
"""
Return the model for a given tag.
Args:
tag (str): Model tag.
device: Device to create the model on.
args: Arguments
in_features: Number of input features.
n_labels: Number of output labels.
in_channels: Number of input channels.
Returns:
nn.Module: PyTorch model.
"""
resnet_arch = {
"resnet18": resnet18,
"resnet34": resnet34,
"resnet50": resnet50,
"resnet101": resnet101,
"resnet152": resnet152,
}.get(args.resnet_arch)
logger.info("Selecting model %s with device %s", tag, device)
# Select model
if tag.lower() == "resnet+spn":
model = SPNNet(
in_features=in_features,
n_labels=n_labels,
spnneuron=SPNNeuronShallow,
in_channels=in_channels,
).to(device)
elif tag.lower() == "resnet+posterior+spn":
model = SPNPosteriorNet(
in_features=in_features,
n_labels=n_labels,
spnneuron=SPNNeuronShallow,
in_channels=in_channels,
).to(device)
elif tag.lower() == "spn-shallow":
model = SPNNetPure(
spnneuron=SPNNeuronShallow, in_features=in_features, n_labels=n_labels
).to(device)
elif tag.lower() == "spn-deep":
model = SPNNetPure(
in_features=in_features, n_labels=n_labels, spnneuron=SPNNeuronBig
).to(device)
elif tag.lower() == "resnet":
model = ResNet(
in_features=in_features,
n_labels=n_labels,
resnet_arch=resnet_arch,
in_channels=in_channels,
).to(device)
elif tag.lower() == "resnet-cifar10":
model = ResNetCifar10(
in_features=in_features,
n_labels=n_labels,
resnet_arch=resnet_arch,
in_channels=in_channels,
).to(device)
elif tag.lower() == "resnet+spn-cifar10":
model = SPNNetCifar10(
in_features=in_features,
n_labels=n_labels,
spnneuron=SPNNeuronShallow,
in_channels=in_channels,
).to(device)
else:
raise Exception("Invalid network: %s" % tag)
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args(args=[])
args.resnet_arch = "resnet18"
dev = "cuda:0"
resnet = get_model_by_tag("resnet", torch.device(dev), args, 50 ** 2, 10)
resnetspn = get_model_by_tag("resnet+spn", torch.device(dev), args, 50 ** 2, 10)
shallow = get_model_by_tag("spn-shallow", torch.device(dev), args, 50 ** 2, 10)
x = torch.rand(3, 1, 50, 50).to(torch.device(dev))
for net, name in [
(resnet, "resnet"),
(resnetspn, "resnetspn"),
(shallow, "shallow"),
]:
print(f"{name}: {count_params(net)}")
t = time.time()
net(x)
print(name, "took", time_delta_now(t))
| [
"torch.nn.Linear",
"torch.device",
"torch.rand",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.init.kaiming_normal_",
"torch.logsumexp",
"torch.ones",
"torch.nn.BatchNorm1d",
"torch.nn.init.xavier_normal_"
] | 1.0.1 | cvoelcker/spn-pytorch-experiments | 495d1fddf00f76fe28e926f7e558b26089e5428e |
1.0 | import logging
import time
from src.utils.utils import time_delta_now
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
logger = logging.getLogger(__name__)
class Sum(nn.Module):
def __init__(self, in_channels, in_features, out_channels, dropout=0.0):
super(Sum, self).__init__()
self.in_channels = in_channels
self.in_features = in_features
self.out_channels = out_channels
self.dropout = dropout
assert out_channels > 0, (
"Number of output channels must be at least 1, but was %s." % out_channels
)
in_features = int(in_features)
# Weights, such that each sumnode has its own weights
ws = torch.randn(in_features, in_channels, out_channels)
self.sum_weights = nn.Parameter(ws)
self._bernoulli_dist = torch.distributions.Bernoulli(probs=dropout)
self.out_shape = f"(N, {in_features}, C_in)"
def forward(self, x):
"""
Sum layer foward pass.
Args:
x: Input of shape [batch, in_features, in_channels].
Returns:
torch.Tensor: Output of shape [batch, in_features, out_channels]
"""
# Apply dropout: Set random sum node children to 0 (-inf in log domain)
if self.dropout > 0.0:
r = self._bernoulli_dist.sample(x.shape).type(torch.bool)
x[r] = np.NINF
# Multiply x with weights in logspace
# Resuts in shape: [n, d, ic, oc]
x = x.unsqueeze(3) + F.log_softmax(self.sum_weights, dim=1)
# Compute sum via logsumexp along dimension "ic" (in_channels)
# Resuts in shape: [n, d, oc]
x = torch.logsumexp(x, dim=2)
return x
def __repr__(self):
return "Sum(in_channels={}, in_features={}, out_channels={}, dropout={}, out_shape={})".format(
self.in_channels,
self.in_features,
self.out_channels,
self.dropout,
self.out_shape,
)
class Product(nn.Module):
"""
Product Node Layer that chooses k scopes as children for a product node.
"""
def __init__(self, in_features, cardinality):
"""
Create a product node layer.
Args:
in_features (int): Number of input features.
cardinality (int): Number of random children for each product node.
"""
super(Product, self).__init__()
self.in_features = in_features
self.cardinality = int(cardinality)
in_features = int(in_features)
self._out_features = np.ceil(in_features / cardinality).astype(int)
self.out_shape = f"(N, {self._out_features}, C_in)"
def forward(self, x):
"""
Product layer forward pass.
Args:
x: Input of shape [batch, in_features, channel].
Returns:
torch.Tensor: Output of shape [batch, ceil(in_features/cardinality), channel].
"""
# Only one product node
if self.cardinality == x.shape[1]:
return x.sum(1)
x_split = list(torch.split(x, self.cardinality, dim=1))
# Check if splits have the same shape (If split cannot be made even, the last chunk will be smaller)
if x_split[-1].shape != x_split[0].shape:
# How much is the last chunk smaller
diff = x_split[0].shape[1] - x_split[-1].shape[1]
# Pad the last chunk by the difference with zeros (=maginalized nodes)
x_split[-1] = F.pad(
x_split[-1], pad=[0, 0, 0, diff], mode="constant", value=0.0
)
# Stack along new split axis
x_split_stack = torch.stack(x_split, dim=2)
# Sum over feature axis
result = torch.sum(x_split_stack, dim=1)
return result
def __repr__(self):
return "Product(in_features={}, cardinality={}, out_shape={})".format(
self.in_features, self.cardinality, self.out_shape
)
if __name__ == "__main__":
from src.spn.distributions import Normal
from src.spn.layers import Sum, Product
import torch
from torch import nn
# 1 Sample, 4 features, 1 channel
x = torch.rand(1, 4, 1)
p = Product(in_features=4, cardinality=2)
p(x)
| [
"torch.rand",
"torch.stack",
"torch.split",
"torch.logsumexp",
"torch.nn.Parameter",
"torch.distributions.Bernoulli",
"torch.nn.functional.log_softmax",
"torch.nn.functional.pad",
"torch.randn",
"torch.sum"
] | 1.0.1 | cvoelcker/spn-pytorch-experiments | 495d1fddf00f76fe28e926f7e558b26089e5428e |
1.0 | import logging
import os
import numpy as np
import torch
from torch import optim
from src.utils.utils import count_params
from src.data.data import store_results
from src.data.data_loader import get_mnist_loaders
from src.models.mnist import evaluate_model, train
from src.models.models import get_model_by_tag
logger = logging.getLogger(__name__)
class MnistExperiment:
"""Main experiment class."""
def __init__(self, args):
"""
Initialize the experiment.
Args:
args: Experiment options.
"""
self.args = args
def run(self):
"""Run the MNIST experiment."""
use_cuda = self.args.cuda and torch.cuda.is_available()
torch.manual_seed(self.args.seed)
if use_cuda:
device = "cuda:%s" % self.args.cuda_device_id
else:
device = "cpu"
logger.info("Selected device: {}".format(device))
# Get the mnist loader
train_loader, test_loader = get_mnist_loaders(use_cuda, self.args)
model = get_model_by_tag(self.args.net, device)
logger.info("Model: {}".format(model))
# with SummaryWriter(comment="Model", log_dir="tensorboard") as w:
# w.add_graph(model, torch.zeros(1, 28, 28), True)
# exit()
logger.info("Number of paramters: %s", count_params(model))
# Define optimizer
optimizer = optim.Adam(model.parameters(), lr=self.args.lr)
# Scheduler for learning rate
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=25, gamma=0.5)
# Collect acc and loss
train_accs, test_accs = [], []
train_losses, test_losses = [], []
# Run epochs
for epoch in range(1, self.args.epochs + 1):
scheduler.step()
# Run train
train(model, device, train_loader, optimizer, epoch, self.args.log_interval)
# Evaluate model on train and test data
train_loss, train_acc = evaluate_model(model, device, train_loader, "Train")
test_loss, test_acc = evaluate_model(model, device, test_loader, "Test")
# Store acc/loss
train_accs.append(train_acc)
train_losses.append(train_loss)
test_accs.append(test_acc)
test_losses.append(test_loss)
# Store results
column_names = ["train_acc", "test_acc", "train_loss", "test_loss"]
data = np.c_[train_accs, test_accs, train_losses, test_losses]
store_results(
result_dir=os.path.join(self.args.result_dir, self.args.experiment_name),
dataset_name="mnist",
column_names=column_names,
data=data,
)
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.StepLR"
] | 1.0.1 | cvoelcker/spn-pytorch-experiments | 495d1fddf00f76fe28e926f7e558b26089e5428e |
1.6 | import torch
from layers import Conv2d, Linear
class ConvModel(torch.nn.Module):
def __init__(self, in_channels: int, out_channels: int, dropout: bool = True):
super().__init__()
self.features = torch.nn.Sequential(
Conv2d(in_channels, 32, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(32, 64, 3, padding=1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(64, 128, 3, padding=1),
torch.nn.Dropout(0.25 if dropout else 0.0),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2),
Conv2d(128, 256, 3, padding=1),
torch.nn.ReLU(),
torch.nn.Dropout(0.5 if dropout else 0.0)
)
# Certain neurons play a crucial role
self.out_layer = Linear(256, out_channels)
def __call__(self, inp: torch.Tensor) -> torch.Tensor:
return self.out_layer(self.features(inp).mean(dim=(2,3))) | [
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.MaxPool2d"
] | 1.6.0 | RobertCsordas/modules | efdb8790b074862581e035c9ab5bf889440a8023 |
1.6 | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from tuframework.network_architecture.generic_UNet import Generic_UNet
from tuframework.network_architecture.initialization import InitWeights_He
from tuframework.network_architecture.neural_network import SegmentationNetwork
from tuframework.training.data_augmentation.data_augmentation_moreDA import get_moreDA_augmentation
from tuframework.training.data_augmentation.default_data_augmentation import default_3D_augmentation_params, \
default_2D_augmentation_params, get_patch_size
from tuframework.training.dataloading.dataset_loading import unpack_dataset
from tuframework.training.loss_functions.dice_loss import DC_and_CE_loss
from tuframework.training.network_training.tuTrainer import tuframeworkTrainer
from tuframework.training.network_training.tuTrainerV2 import tuframeworkTrainerV2
from tuframework.utilities.nd_softmax import softmax_helper
from torch import nn
import torch
class tuframeworkTrainerV2_noDeepSupervision(tuframeworkTrainerV2):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,
deterministic, fp16)
self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})
def setup_DA_params(self):
"""
we leave out the creation of self.deep_supervision_scales, so it remains None
:return:
"""
if self.threeD:
self.data_aug_params = default_3D_augmentation_params
self.data_aug_params['rotation_x'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_y'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
self.data_aug_params['rotation_z'] = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi)
if self.do_dummy_2D_aug:
self.data_aug_params["dummy_2D"] = True
self.print_to_log_file("Using dummy2d data augmentation")
self.data_aug_params["elastic_deform_alpha"] = \
default_2D_augmentation_params["elastic_deform_alpha"]
self.data_aug_params["elastic_deform_sigma"] = \
default_2D_augmentation_params["elastic_deform_sigma"]
self.data_aug_params["rotation_x"] = default_2D_augmentation_params["rotation_x"]
else:
self.do_dummy_2D_aug = False
if max(self.patch_size) / min(self.patch_size) > 1.5:
default_2D_augmentation_params['rotation_x'] = (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)
self.data_aug_params = default_2D_augmentation_params
self.data_aug_params["mask_was_used_for_normalization"] = self.use_mask_for_norm
if self.do_dummy_2D_aug:
self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],
self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
self.basic_generator_patch_size = np.array([self.patch_size[0]] + list(self.basic_generator_patch_size))
patch_size_for_spatialtransform = self.patch_size[1:]
else:
self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],
self.data_aug_params['rotation_y'],
self.data_aug_params['rotation_z'],
self.data_aug_params['scale_range'])
patch_size_for_spatialtransform = self.patch_size
self.data_aug_params["scale_range"] = (0.7, 1.4)
self.data_aug_params["do_elastic"] = False
self.data_aug_params['selected_seg_channels'] = [0]
self.data_aug_params['patch_size_for_spatialtransform'] = patch_size_for_spatialtransform
def initialize(self, training=True, force_load_plans=False):
"""
removed deep supervision
:return:
"""
if not self.was_initialized:
maybe_mkdir_p(self.output_folder)
if force_load_plans or (self.plans is None):
self.load_plans_file()
self.process_plans(self.plans)
self.setup_DA_params()
self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +
"_stage%d" % self.stage)
if training:
self.dl_tr, self.dl_val = self.get_basic_generators()
if self.unpack_data:
print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
print("done")
else:
print(
"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you "
"will wait all winter for your model to finish!")
assert self.deep_supervision_scales is None
self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val,
self.data_aug_params[
'patch_size_for_spatialtransform'],
self.data_aug_params,
deep_supervision_scales=self.deep_supervision_scales,
classes=None,
pin_memory=self.pin_memory)
self.print_to_log_file("TRAINING KEYS:\n %s" % (str(self.dataset_tr.keys())),
also_print_to_console=False)
self.print_to_log_file("VALIDATION KEYS:\n %s" % (str(self.dataset_val.keys())),
also_print_to_console=False)
else:
pass
self.initialize_network()
self.initialize_optimizer_and_scheduler()
assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))
else:
self.print_to_log_file('self.was_initialized is True, not running self.initialize again')
self.was_initialized = True
def initialize_network(self):
"""
changed deep supervision to False
:return:
"""
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.InstanceNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.InstanceNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
def run_online_evaluation(self, output, target):
return tuframeworkTrainer.run_online_evaluation(self, output, target)
| [
"torch.cuda.is_available"
] | 1.6.0 | Magnety/tuFramework | b31cb34d476ef306b52da955021f93c91c14ddf4 |
1.4 | import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
class GaussianSmoothing(nn.Module):
"""
Apply gaussian smoothing on a
1d, 2d or 3d tensor. Filtering is performed seperately for each channel
in the input using a depthwise convolution.
Arguments:
channels (int, sequence): Number of channels of the input tensors. Output will
have this number of channels as well.
kernel_size (int, sequence): Size of the gaussian kernel.
sigma (float, sequence): Standard deviation of the gaussian kernel.
dim (int, optional): The number of dimensions of the data.
Default value is 2 (spatial).
"""
def __init__(self, channels, kernel_size, sigma, dim=2):
super(GaussianSmoothing, self).__init__()
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The gaussian kernel is the product of the
# gaussian function of each dimension.
kernel = 1
meshgrids = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \
torch.exp(-((mgrid - mean) / (2 * std)) ** 2)
# Make sure sum of values in gaussian kernel equals 1.
kernel = kernel / torch.sum(kernel)
# Reshape to depthwise convolutional weight
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.groups = channels
if dim == 1:
self.conv = F.conv1d
elif dim == 2:
self.conv = F.conv2d
elif dim == 3:
self.conv = F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
def forward(self, input, stride: int = 1):
"""
Apply gaussian filter to input.
Arguments:
input (torch.Tensor): Input to apply gaussian filter on.
stride for applying conv
Returns:
filtered (torch.Tensor): Filtered output.
"""
padding = (self.weight.shape[-1] - 1) // 2
return self.conv(input, weight=self.weight, groups=self.groups, padding=padding, stride=stride)
| [
"torch.exp",
"torch.arange",
"torch.sum"
] | 1.4.0 | PaulCzaban/Time-Travel-Rephotography.github.io | 5d0ce32a48dfd7156a0f8dfddf0eadbb55b0be52 |
1.6 | # %% Load packages
import numpy as np
import torch
from sklearn.metrics import accuracy_score
from bnn_mcmc_examples.examples.mlp.hawks.constants import num_chains
from bnn_mcmc_examples.examples.mlp.hawks.dataloaders import test_dataloader
from bnn_mcmc_examples.examples.mlp.hawks.prior.constants import sampler_output_path, sampler_output_run_paths
# %% Load test data and labels
_, test_labels = next(iter(test_dataloader))
# %% Compute predictive accuracies
accuracies = np.empty(num_chains)
for i in range(num_chains):
test_preds = np.loadtxt(sampler_output_run_paths[i].joinpath('preds_via_mean.txt'), skiprows=0)
accuracies[i] = accuracy_score(test_preds, torch.argmax(test_labels, 1))
# %% Save predictive accuracies
np.savetxt(sampler_output_path.joinpath('accuracies_via_mean.txt'), accuracies)
| [
"torch.argmax"
] | 1.6.0 | papamarkou/bnn_mcmc_examples | 297cdb1e74335860989bebdb4ff6f6322b6adc06 |
1.8 | import os
from typing import Any, Dict, Optional
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.plugins import FullyShardedNativeMixedPrecisionPlugin
from pytorch_lightning.strategies import DDPFullyShardedStrategy
from pytorch_lightning.utilities import _FAIRSCALE_FULLY_SHARDED_AVAILABLE
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
if _FAIRSCALE_FULLY_SHARDED_AVAILABLE:
from fairscale.nn import FullyShardedDataParallel, wrap
def test_invalid_on_cpu(tmpdir):
"""Test to ensure that to raise Misconfiguration for FSDP on CPU."""
with pytest.raises(
MisconfigurationException, match="You selected strategy to be `ddp_fully_sharded`, but GPU is not available."
):
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True, strategy="fsdp")
assert isinstance(trainer.strategy, DDPFullyShardedStrategy)
trainer.strategy.setup_environment()
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0"})
@mock.patch("torch.cuda.device_count", return_value=1)
@mock.patch("torch.cuda.is_available", return_value=True)
@RunIf(fairscale_fully_sharded=True)
def test_fsdp_with_sharded_amp(device_count_mock, mock_cuda_available, tmpdir):
"""Test to ensure that plugin native amp plugin is correctly chosen when using sharded."""
trainer = Trainer(
default_root_dir=tmpdir, fast_dev_run=True, strategy="fsdp", accelerator="gpu", devices=1, precision=16
)
assert isinstance(trainer.strategy, DDPFullyShardedStrategy)
assert isinstance(trainer.strategy.precision_plugin, FullyShardedNativeMixedPrecisionPlugin)
class TestFSDPModel(BoringModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.layer: Optional[torch.nn.Module] = None
def _init_model(self) -> None:
self.layer = torch.nn.Sequential(torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 2))
def setup(self, stage: str) -> None:
if self.layer is None:
self._init_model()
def configure_sharded_model(self) -> None:
# the model is already wrapped with FSDP: no need to wrap again!
if isinstance(self.layer, FullyShardedDataParallel):
return
for i, layer in enumerate(self.layer):
if i % 2 == 0:
self.layer[i] = wrap(layer)
self.layer = wrap(self.layer)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
# when loading full state dict, we first need to create a new unwrapped model
self._init_model()
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def on_train_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_test_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_validation_start(self) -> None:
self._assert_layer_fsdp_instance()
def on_prediction_start(self) -> None:
self._assert_layer_fsdp_instance()
def _assert_layer_fsdp_instance(self) -> None:
assert isinstance(self.layer, FullyShardedDataParallel)
assert isinstance(self.layer.module[0], FullyShardedDataParallel)
assert isinstance(self.layer.module[2], FullyShardedDataParallel)
# Assert that the nested layers are set reshard_after_forward to True
assert self.layer.module[0].reshard_after_forward is True
assert self.layer.module[2].reshard_after_forward is True
if isinstance(self.trainer.precision_plugin, FullyShardedNativeMixedPrecisionPlugin):
assert self.layer.mixed_precision
assert self.layer.module[0].mixed_precision
assert self.layer.module[2].mixed_precision
@RunIf(min_gpus=1, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fully_sharded_strategy_checkpoint(tmpdir):
"""Test to ensure that checkpoint is saved correctly when using a single GPU, and all stages can be run."""
model = TestFSDPModel()
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=1,
strategy="fsdp",
precision=16,
max_epochs=1,
enable_progress_bar=False,
enable_model_summary=False,
)
_run_multiple_stages(trainer, model, os.path.join(tmpdir, "last.ckpt"))
@RunIf(min_gpus=2, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fully_sharded_strategy_checkpoint_multi_gpus(tmpdir):
"""Test to ensure that checkpoint is saved correctly when using multiple GPUs, and all stages can be run."""
model = TestFSDPModel()
ck = ModelCheckpoint(save_last=True)
trainer = Trainer(
default_root_dir=tmpdir,
accelerator="gpu",
devices=2,
strategy="fsdp",
precision=16,
max_epochs=1,
callbacks=[ck],
enable_progress_bar=False,
enable_model_summary=False,
)
_run_multiple_stages(trainer, model)
def _assert_save_equality(trainer, ckpt_path, cls=TestFSDPModel):
# Use FullySharded to get the state dict for the sake of comparison
model_state_dict = trainer.strategy.lightning_module_state_dict()
if trainer.is_global_zero:
saved_model = cls.load_from_checkpoint(ckpt_path)
# Assert model parameters are identical after loading
for ddp_param, shard_param in zip(model_state_dict.values(), saved_model.state_dict().values()):
assert torch.equal(ddp_param.float().cpu(), shard_param)
def _run_multiple_stages(trainer, model, model_path: Optional[str] = None):
trainer.fit(model)
model_path = model_path if model_path else trainer.checkpoint_callback.last_model_path
trainer.save_checkpoint(model_path, weights_only=True)
_assert_save_equality(trainer, model_path, cls=TestFSDPModel)
# Test entry point
trainer.test(model) # model is wrapped, will not call configure_shared_model
# provide model path, will create a new unwrapped model and load and then call configure_shared_model to wrap
trainer.test(ckpt_path=model_path)
@RunIf(min_gpus=1, skip_windows=True, standalone=True, fairscale_fully_sharded=True)
def test_fsdp_gradient_clipping_raises(tmpdir):
"""Test to ensure that an exception is raised when clipping gradients by value with FSDP."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
strategy="fsdp",
fast_dev_run=True,
accelerator="gpu",
devices=1,
precision=16,
gradient_clip_val=1,
gradient_clip_algorithm="norm",
enable_progress_bar=False,
enable_model_summary=False,
)
with pytest.raises(
MisconfigurationException, match="gradient_clip_algorithm='norm'` is currently not supported for `FullySharded"
):
trainer.fit(model)
| [
"torch.nn.Linear",
"torch.nn.ReLU"
] | 1.8 | mathemusician/pytorch-lightning | 15fa5389387b3a220bc044dd30eb0be1e8f64944 |
1.8 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Type
import pytest
import torch
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.strategies import DDPSpawnShardedStrategy
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.runif import RunIf
class SeedTrainLoaderModel(BoringModel):
"""Overrides training loader to ensure we enforce the same seed for all DDP processes."""
def train_dataloader(self):
seed_everything(42)
return torch.utils.data.DataLoader(RandomDataset(32, 64))
class SeedTrainLoaderManualModel(SeedTrainLoaderModel):
def training_step(self, batch, batch_idx, optimizer_idx):
# manual
# access your optimizers with use_pl_optimizer=False. Default is True
(opt_a, opt_b) = self.optimizers(use_pl_optimizer=True)
loss_1 = self.step(batch)
self.manual_backward(loss_1)
opt_a.step()
# fake discriminator
loss_2 = self.step(batch[0])
# ensure we forward the correct params to the optimizer
# without retain_graph we can't do multiple backward passes
self.manual_backward(loss_2)
# todo: understand why synchronization breaks there.
# self.manual_backward(loss_2, retain_graph=True)
opt_b.step()
assert self.layer.weight.grad is None or torch.all(self.layer.weight.grad == 0)
def training_epoch_end(self, outputs) -> None:
# outputs should be an array with an entry per optimizer
assert len(outputs) == 2
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer, optimizer_2
@property
def automatic_optimization(self) -> bool:
return False
class SeedTrainLoaderMultipleOptimizersModel(SeedTrainLoaderModel):
def training_step(self, batch, batch_idx, optimizer_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs) -> None:
# outputs should be an array with an entry per optimizer
assert len(outputs) == 2
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
return optimizer, optimizer_2
def record_ddp_fit_model_stats(trainer, model, use_cuda):
"""Helper to calculate wall clock time for fit + max allocated memory.
Args:
trainer: The trainer object.
model: The model to fit.
use_cuda: Whether to sync CUDA kernels.
Returns:
Max Memory if using GPUs, and total wall clock time.
"""
max_memory = None
time_start = time.perf_counter()
if use_cuda:
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
trainer.fit(model)
if use_cuda:
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() / 2**20
total_time = time.perf_counter() - time_start
return max_memory, total_time
def plugin_parity_test(
model_cls: Type[SeedTrainLoaderModel],
seed: int = 42,
gpus: int = 0,
precision: int = 32,
max_percent_speed_diff: float = 0.1,
):
"""Ensures that the trained model is identical to the standard DDP implementation. Also checks for speed/memory
regressions, we should expect always less memory but performance to fluctuate.
Args:
model_cls: Model class to use for test.
seed: Seed for generators. Note that this does not handle the seed for data-loading on multi-process.
gpus: Number of GPUS to enable.
precision: Whether to use AMP or normal FP32 training.
max_percent_speed_diff: The maximum speed difference compared to normal DDP training.
This is more a safety net for variability in CI which can vary in speed, not for benchmarking.
"""
# Train normal DDP
seed_everything(seed)
ddp_model = model_cls()
use_cuda = gpus > 0
trainer = Trainer(
fast_dev_run=True, max_epochs=1, accelerator="gpu", devices=gpus, precision=precision, strategy="ddp_spawn"
)
max_memory_ddp, ddp_time = record_ddp_fit_model_stats(trainer=trainer, model=ddp_model, use_cuda=use_cuda)
# Reset and train Custom DDP
seed_everything(seed)
custom_plugin_model = model_cls()
trainer = Trainer(
fast_dev_run=True,
max_epochs=1,
accelerator="gpu",
devices=gpus,
precision=precision,
strategy="ddp_sharded_spawn",
)
assert isinstance(trainer.strategy, DDPSpawnShardedStrategy)
max_memory_custom, custom_model_time = record_ddp_fit_model_stats(
trainer=trainer, model=custom_plugin_model, use_cuda=use_cuda
)
# Assert model parameters are identical after fit
for ddp_param, custom_param in zip(ddp_model.parameters(), custom_plugin_model.parameters()):
assert torch.equal(ddp_param, custom_param), "Model parameters are different between DDP and Custom plugin"
# Assert speed parity by ensuring percentage difference between custom/ddp is below threshold
percent_diff = (custom_model_time - ddp_time) / custom_model_time
assert (
percent_diff <= max_percent_speed_diff
), f"Custom DDP was too slow compared to regular DDP, Custom Plugin Time: {custom_model_time}, DDP Time: {ddp_time}"
if use_cuda:
# Assert CUDA memory parity
assert max_memory_custom <= max_memory_ddp, (
"Custom plugin used too much memory compared to DDP, "
f"Custom Mem: {max_memory_custom}, DDP Mem: {max_memory_ddp}"
)
@RunIf(skip_windows=True, fairscale=True)
@pytest.mark.parametrize(
"kwargs",
[
pytest.param(dict(gpus=1, model_cls=SeedTrainLoaderModel), marks=RunIf(min_gpus=1)),
pytest.param(
dict(gpus=1, precision=16, model_cls=SeedTrainLoaderModel), marks=RunIf(min_gpus=1, amp_native=True)
),
pytest.param(dict(gpus=2, model_cls=SeedTrainLoaderModel), marks=RunIf(min_gpus=2)),
pytest.param(
dict(gpus=2, precision=16, model_cls=SeedTrainLoaderModel), marks=RunIf(min_gpus=2, amp_native=True)
),
pytest.param(
dict(gpus=2, model_cls=SeedTrainLoaderMultipleOptimizersModel),
marks=[
RunIf(min_gpus=2),
pytest.mark.skip(reason="TODO: Current issue with multiple optimizers and FairScale."),
],
),
pytest.param(
dict(gpus=2, model_cls=SeedTrainLoaderManualModel),
marks=[
RunIf(min_gpus=2),
pytest.mark.skip(reason="TODO: Current issue with multiple optimizers and FairScale."),
],
),
],
)
def test_ddp_spawn_sharded_strategy(kwargs):
if kwargs["gpus"] > 1:
# TODO: decrease speed diff since only 2 GPUs sharding 2 optimizers
kwargs["max_percent_speed_diff"] = 0.25
plugin_parity_test(**kwargs)
| [
"torch.cuda.reset_peak_memory_stats",
"torch.cuda.synchronize",
"torch.cuda.max_memory_allocated",
"torch.all",
"torch.equal"
] | 1.8 | mathemusician/pytorch-lightning | 15fa5389387b3a220bc044dd30eb0be1e8f64944 |
1.6 | #!/usr/bin/env python3
import warnings
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import captum._utils.common as common
import torch
from captum._utils.av import AV
from captum.attr import LayerActivation
from captum.influence._core.influence import DataInfluence
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
r"""
Additional helper functions to calculate similarity metrics.
"""
def euclidean_distance(test, train) -> Tensor:
r"""
Calculates the pairwise euclidean distance for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise euclidean distance Tensor of shape (batch_size_1, batch_size_2).
"""
similarity = torch.cdist(
test.view(test.shape[0], -1).unsqueeze(0),
train.view(train.shape[0], -1).unsqueeze(0),
).squeeze(0)
return similarity
def cosine_similarity(test, train, replace_nan=0) -> Tensor:
r"""
Calculates the pairwise cosine similarity for batches of feature vectors.
Tensors test and train have shape (batch_size_1, *), and (batch_size_2, *).
Returns pairwise cosine similarity Tensor of shape (batch_size_1, batch_size_2).
"""
test = test.view(test.shape[0], -1)
train = train.view(train.shape[0], -1)
if torch.__version__ <= "1.6.0":
test_norm = torch.norm(test, p=None, dim=1, keepdim=True)
train_norm = torch.norm(train, p=None, dim=1, keepdim=True)
else:
test_norm = torch.linalg.norm(test, ord=2, dim=1, keepdim=True)
train_norm = torch.linalg.norm(train, ord=2, dim=1, keepdim=True)
test = torch.where(test_norm != 0.0, test / test_norm, Tensor([replace_nan]))
train = torch.where(train_norm != 0.0, train / train_norm, Tensor([replace_nan])).T
similarity = torch.mm(test, train)
return similarity
r"""
Implements abstract DataInfluence class and provides implementation details for
similarity metric-based influence computation. Similarity metrics can be used to compare
intermediate or final activation vectors of a model for different sets of input. Then,
these can be used to draw conclusions about influential instances.
Some standard similarity metrics such as dot product similarity or euclidean distance
are provided, but the user can provide any custom similarity metric as well.
"""
class SimilarityInfluence(DataInfluence):
def __init__(
self,
module: Module,
layers: Union[str, List[str]],
influence_src_dataset: Dataset,
activation_dir: str,
model_id: str = "",
similarity_metric: Callable = cosine_similarity,
similarity_direction: str = "max",
batch_size: int = 1,
**kwargs: Any,
):
r"""
Args:
module (torch.nn.Module): An instance of pytorch model. This model should
define all of its layers as attributes of the model.
layers (str or List of str): The fully qualified layer(s) for which the
activation vectors are computed.
influence_src_dataset (torch.utils.data.Dataset): PyTorch Dataset that is
used to create a PyTorch Dataloader to iterate over the dataset and
its labels. This is the dataset for which we will be seeking for
influential instances. In most cases this is the training dataset.
activation_dir (str): The directory of the path to store
and retrieve activation computations. Best practice would be to use
an absolute path.
model_id (str): The name/version of the model for which layer
activations are being computed. Activations will be stored and
loaded under the subdirectory with this name if provided.
similarity_metric (Callable): This is a callable function that computes a
similarity metric between two representations. For example, the
representations pair could be from the training and test sets.
This function must adhere to certain standards. The inputs should be
torch Tensors with shape (batch_size_i/j, feature dimensions). The
output Tensor should have shape (batch_size_i, batch_size_j) with
scalar values corresponding to the similarity metric used for each
pairwise combination from the two batches.
For example, suppose we use `batch_size_1 = 16` for iterating
through `influence_src_dataset`, and for the `inputs` argument
we pass in a Tensor with 3 examples, i.e. batch_size_2 = 3. Also,
suppose that our inputs and intermediate activations throughout the
model will have dimension (N, C, H, W). Then, the feature dimensions
should be flattened within this function. For example::
>>> av_test.shape
torch.Size([3, N, C, H, W])
>>> av_src.shape
torch.Size([16, N, C, H, W])
>>> av_test = torch.view(av_test.shape[0], -1)
>>> av_test.shape
torch.Size([3, N x C x H x W])
and similarly for av_src. The similarity_metric should then use
these flattened tensors to return the pairwise similarity matrix.
For example, `similarity_metric(av_test, av_src)` should return a
tensor of shape (3, 16).
batch_size (int): Batch size for iterating through `influence_src_dataset`.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
"""
self.module = module
self.layers = [layers] if isinstance(layers, str) else layers
self.influence_src_dataset = influence_src_dataset
self.activation_dir = activation_dir
self.model_id = model_id
self.batch_size = batch_size
if similarity_direction == "max" or similarity_direction == "min":
self.similarity_direction = similarity_direction
else:
raise ValueError(
f"{similarity_direction} is not a valid value. "
"Must be either 'max' or 'min'"
)
if similarity_metric is cosine_similarity:
if "replace_nan" in kwargs:
self.replace_nan = kwargs["replace_nan"]
else:
self.replace_nan = -2 if self.similarity_direction == "max" else 2
similarity_metric = partial(cosine_similarity, replace_nan=self.replace_nan)
self.similarity_metric = similarity_metric
self.influence_src_dataloader = DataLoader(
influence_src_dataset, batch_size, shuffle=False
)
def influence( # type: ignore[override]
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
top_k: int = 1,
additional_forward_args: Optional[Any] = None,
load_src_from_disk: bool = True,
**kwargs: Any,
) -> Dict:
r"""
Args:
inputs (tensor or tuple of tensors): Batch of examples for which influential
instances are computed. They are passed to the forward_func. The
first dimension in `inputs` tensor or tuple of tensors corresponds
to the batch size. A tuple of tensors is only passed in if this
is the input form that `module` accepts.
top_k (int): The number of top-matching activations to return
additional_forward_args (optional): Additional arguments that will be
passed to forward_func after inputs.
load_src_from_disk (bool): Loads activations for `influence_src_dataset`
where possible. Setting to False would force regeneration of
activations.
load_input_from_disk (bool): Regenerates activations for inputs by default
and removes previous `inputs` activations that are flagged with
`inputs_id`. Setting to True will load prior matching inputs
activations. Note that this could lead to unexpected behavior if
`inputs_id` is not configured properly and activations are loaded
for a different, prior `inputs`.
inputs_id (str): Used to identify inputs for loading activations.
**kwargs: Additional key-value arguments that are necessary for specific
implementation of `DataInfluence` abstract class.
Returns:
influences (dict): Returns the influential instances retrieved from
`influence_src_dataset` for each test example represented through a
tensor or a tuple of tensor in `inputs`. Returned influential
examples are represented as dict, with keys corresponding to
the layer names passed in `layers`. Each value in the dict is a
tuple containing the indices and values for the top k similarities
from `influence_src_dataset` by the chosen metric. The first value
in the tuple corresponds to the indices corresponding to the top k
most similar examples, and the second value is the similarity score.
The batch dimension corresponds to the batch dimension of `inputs`.
If inputs.shape[0] == 5, then dict[`layer_name`][0].shape[0] == 5.
These tensors will be of shape (inputs.shape[0], top_k).
"""
inputs_batch_size = (
inputs[0].shape[0] if isinstance(inputs, tuple) else inputs.shape[0]
)
influences: Dict[str, Any] = {}
layer_AVDatasets = AV.generate_dataset_activations(
self.activation_dir,
self.module,
self.model_id,
self.layers,
DataLoader(self.influence_src_dataset, self.batch_size, shuffle=False),
identifier="src",
load_from_disk=load_src_from_disk,
return_activations=True,
)
assert layer_AVDatasets is not None and not isinstance(
layer_AVDatasets, AV.AVDataset
)
layer_modules = [
common._get_module_from_name(self.module, layer) for layer in self.layers
]
test_activations = LayerActivation(self.module, layer_modules).attribute(
inputs, additional_forward_args
)
minmax = self.similarity_direction == "max"
# av_inputs shape: (inputs_batch_size, *) e.g. (inputs_batch_size, N, C, H, W)
# av_src shape: (self.batch_size, *) e.g. (self.batch_size, N, C, H, W)
test_activations = (
test_activations if len(self.layers) > 1 else [test_activations]
)
for i, (layer, layer_AVDataset) in enumerate(
zip(self.layers, layer_AVDatasets)
):
topk_val, topk_idx = torch.Tensor(), torch.Tensor().long()
zero_acts = torch.Tensor().long()
av_inputs = test_activations[i]
src_loader = DataLoader(layer_AVDataset)
for j, av_src in enumerate(src_loader):
av_src = av_src.squeeze(0)
similarity = self.similarity_metric(av_inputs, av_src)
msg = (
"Output of custom similarity does not meet required dimensions. "
f"Your output has shape {similarity.shape}.\nPlease ensure the "
"output shape matches (inputs_batch_size, src_dataset_batch_size), "
f"which should be {(inputs_batch_size, self.batch_size)}."
)
assert similarity.shape == (inputs_batch_size, av_src.shape[0]), msg
if hasattr(self, "replace_nan"):
idx = (similarity == self.replace_nan).nonzero()
zero_acts = torch.cat((zero_acts, idx))
r"""
TODO: For models that can have tuples as activations, we should
allow similarity metrics to accept tuples, support topk selection.
"""
topk_batch = min(top_k, self.batch_size)
values, indices = torch.topk(
similarity, topk_batch, dim=1, largest=minmax
)
indices += int(j * self.batch_size)
topk_val = torch.cat((topk_val, values), dim=1)
topk_idx = torch.cat((topk_idx, indices), dim=1)
# can modify how often to sort for efficiency? minor
sort_idx = torch.argsort(topk_val, dim=1, descending=minmax)
topk_val = torch.gather(topk_val, 1, sort_idx[:, :top_k])
topk_idx = torch.gather(topk_idx, 1, sort_idx[:, :top_k])
influences[layer] = (topk_idx, topk_val)
if torch.numel(zero_acts != 0):
zero_warning = (
f"Layer {layer} has zero-vector activations for some inputs. This "
"may cause undefined behavior for cosine similarity. The indices "
"for the offending inputs will be included under the key "
f"'zero_acts-{layer}' in the output dictionary. Indices are "
"returned as a tensor with [inputs_idx, src_dataset_idx] pairs "
"which may have corrupted similarity scores."
)
warnings.warn(zero_warning, RuntimeWarning)
key = "-".join(["zero_acts", layer])
influences[key] = zero_acts
return influences
| [
"torch.numel",
"torch.cat",
"torch.gather",
"torch.norm",
"torch.argsort",
"torch.mm",
"torch.utils.data.DataLoader",
"torch.linalg.norm",
"torch.Tensor",
"torch.topk"
] | 1.6 | i-jones/captum | 567ec6fc67ab85ce07d075b25428be22bb65e31b |
1.6 | #!/usr/bin/env python3
from typing import Any, Callable, Generic, List, Tuple, Type, Union, cast
import torch
import torch.nn.functional as F
from captum._utils.common import (
_format_additional_forward_args,
_format_tensor_into_tuples,
_run_forward,
_validate_target,
)
from captum._utils.gradient import compute_gradients
from captum._utils.typing import ModuleOrModuleList, TargetType
from captum.attr._utils.common import (
_format_input_baseline,
_sum_rows,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
from torch import Tensor
from torch.nn import Module
class Attribution:
r"""
All attribution algorithms extend this class. It enforces its child classes
to extend and override core `attribute` method.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
self.forward_func = forward_func
attribute: Callable
r"""
This method computes and returns the attribution values for each input tensor.
Deriving classes are responsible for implementing its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs (tensor or tuple of tensors): Input for which attribution
is computed. It can be provided as a single tensor or
a tuple of multiple tensors. If multiple input tensors
are provided, the batch sizes must be aligned accross all
tensors.
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution values for each
input tensor. The `attributions` have the same shape and
dimensionality as the inputs.
If a single tensor is provided as inputs, a single tensor
is returned. If a tuple is provided for inputs, a tuple of
corresponding sized tensors is returned.
"""
@property
def multiplies_by_inputs(self):
return False
def has_convergence_delta(self) -> bool:
r"""
This method informs the user whether the attribution algorithm provides
a convergence delta (aka an approximation error) or not. Convergence
delta may serve as a proxy of correctness of attribution algorithm's
approximation. If deriving attribution class provides a
`compute_convergence_delta` method, it should
override both `compute_convergence_delta` and `has_convergence_delta` methods.
Returns:
bool:
Returns whether the attribution algorithm
provides a convergence delta (aka approximation error) or not.
"""
return False
compute_convergence_delta: Callable
r"""
The attribution algorithms which derive `Attribution` class and provide
convergence delta (aka approximation error) should implement this method.
Convergence delta can be computed based on certain properties of the
attribution alogrithms.
Args:
attributions (tensor or tuple of tensors): Attribution scores that
are precomputed by an attribution algorithm.
Attributions can be provided in form of a single tensor
or a tuple of those. It is assumed that attribution
tensor's dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
*args (optional): Additonal arguments that are used by the
sub-classes depending on the specific implementation
of `compute_convergence_delta`.
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
Depending on specific implementaion of
sub-classes, convergence delta can be returned per
sample in form of a tensor or it can be aggregated
across multuple samples and returned in form of a
single floating point tensor.
"""
@classmethod
def get_name(cls: Type["Attribution"]) -> str:
r"""
Create readable class name by inserting a space before any capital
characters besides the very first.
Returns:
str: a readable class name
Example:
for a class called IntegratedGradients, we return the string
'Integrated Gradients'
"""
return "".join(
[
char if char.islower() or idx == 0 else " " + char
for idx, char in enumerate(cls.__name__)
]
)
class GradientAttribution(Attribution):
r"""
All gradient based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
self.gradient_func = compute_gradients
@log_usage()
def compute_convergence_delta(
self,
attributions: Union[Tensor, Tuple[Tensor, ...]],
start_point: Union[
None, int, float, Tensor, Tuple[Union[int, float, Tensor], ...]
],
end_point: Union[Tensor, Tuple[Tensor, ...]],
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
r"""
Here we provide a specific implementation for `compute_convergence_delta`
which is based on a common property among gradient-based attribution algorithms.
In the literature sometimes it is also called completeness axiom. Completeness
axiom states that the sum of the attribution must be equal to the differences of
NN Models's function at its end and start points. In other words:
sum(attributions) - (F(end_point) - F(start_point)) is close to zero.
Returned delta of this method is defined as above stated difference.
This implementation assumes that both the `start_point` and `end_point` have
the same shape and dimensionality. It also assumes that the target must have
the same number of examples as the `start_point` and the `end_point` in case
it is provided in form of a list or a non-singleton tensor.
Args:
attributions (tensor or tuple of tensors): Precomputed attribution
scores. The user can compute those using any attribution
algorithm. It is assumed the the shape and the
dimensionality of attributions must match the shape and
the dimensionality of `start_point` and `end_point`.
It also assumes that the attribution tensor's
dimension 0 corresponds to the number of
examples, and if multiple input tensors are provided,
the examples must be aligned appropriately.
start_point (tensor or tuple of tensors, optional): `start_point`
is passed as an input to model's forward function. It
is the starting point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
end_point (tensor or tuple of tensors): `end_point`
is passed as an input to model's forward function. It
is the end point of attributions' approximation.
It is assumed that both `start_point` and `end_point`
have the same shape and dimensionality.
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples.
`additional_forward_args` is used both for `start_point`
and `end_point` when computing the forward pass.
Default: None
Returns:
*tensor* of **deltas**:
- **deltas** (*tensor*):
This implementation returns convergence delta per
sample. Deriving sub-classes may do any type of aggregation
of those values, if necessary.
"""
end_point, start_point = _format_input_baseline(end_point, start_point)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
# tensorizing start_point in case it is a scalar or one example baseline
# If the batch size is large we could potentially also tensorize only one
# sample and expand the output to the rest of the elements in the batch
start_point = _tensorize_baseline(end_point, start_point)
attributions = _format_tensor_into_tuples(attributions)
# verify that the attributions and end_point match on 1st dimension
for attribution, end_point_tnsr in zip(attributions, end_point):
assert end_point_tnsr.shape[0] == attribution.shape[0], (
"Attributions tensor and the end_point must match on the first"
" dimension but found attribution: {} and end_point: {}".format(
attribution.shape[0], end_point_tnsr.shape[0]
)
)
num_samples = end_point[0].shape[0]
_validate_input(end_point, start_point)
_validate_target(num_samples, target)
with torch.no_grad():
start_out_sum = _sum_rows(
_run_forward(
self.forward_func, start_point, target, additional_forward_args
)
)
end_out_sum = _sum_rows(
_run_forward(
self.forward_func, end_point, target, additional_forward_args
)
)
row_sums = [_sum_rows(attribution) for attribution in attributions]
attr_sum = torch.stack(
[cast(Tensor, sum(row_sum)) for row_sum in zip(*row_sums)]
)
_delta = attr_sum - (end_out_sum - start_out_sum)
return _delta
class PerturbationAttribution(Attribution):
r"""
All perturbation based attribution algorithms extend this class. It requires a
forward function, which most commonly is the forward function of the model
that we want to interpret or the model itself.
"""
def __init__(self, forward_func: Callable) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
"""
Attribution.__init__(self, forward_func)
@property
def multiplies_by_inputs(self):
return True
class InternalAttribution(Attribution, Generic[ModuleOrModuleList]):
layer: ModuleOrModuleList
r"""
Shared base class for LayerAttrubution and NeuronAttribution,
attribution types that require a model and a particular layer.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
Attribution.__init__(self, forward_func)
self.layer = layer
self.device_ids = device_ids
class LayerAttribution(InternalAttribution):
r"""
Layer attribution provides attribution values for the given layer, quanitfying
the importance of each neuron within the given layer's output. The output
attribution of calling attribute on a LayerAttribution object always matches
the size of the layer output.
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
@staticmethod
def interpolate(
layer_attribution: Tensor,
interpolate_dims: Union[int, Tuple[int, ...]],
interpolate_mode: str = "nearest",
) -> Tensor:
r"""
Interpolates given 3D, 4D or 5D layer attribution to given dimensions.
This is often utilized to upsample the attribution of a convolutional layer
to the size of an input, which allows visualizing in the input space.
Args:
layer_attribution (torch.Tensor): Tensor of given layer attributions.
interpolate_dims (int or tuple): Upsampled dimensions. The
number of elements must be the number of dimensions
of layer_attribution - 2, since the first dimension
corresponds to number of examples and the second is
assumed to correspond to the number of channels.
interpolate_mode (str): Method for interpolation, which
must be a valid input interpolation mode for
torch.nn.functional. These methods are
"nearest", "area", "linear" (3D-only), "bilinear"
(4D-only), "bicubic" (4D-only), "trilinear" (5D-only)
based on the number of dimensions of the given layer
attribution.
Returns:
*tensor* of upsampled **attributions**:
- **attributions** (*tensor*):
Upsampled layer attributions with first 2 dimensions matching
slayer_attribution and remaining dimensions given by
interpolate_dims.
"""
return F.interpolate(layer_attribution, interpolate_dims, mode=interpolate_mode)
class NeuronAttribution(InternalAttribution):
r"""
Neuron attribution provides input attribution for a given neuron, quanitfying
the importance of each input feature in the activation of a particular neuron.
Calling attribute on a NeuronAttribution object requires also providing
the index of the neuron in the output of the given layer for which attributions
are required.
The output attribution of calling attribute on a NeuronAttribution object
always matches the size of the input.
"""
def __init__(
self,
forward_func: Callable,
layer: Module,
device_ids: Union[None, List[int]] = None,
) -> None:
r"""
Args:
forward_func (callable or torch.nn.Module): This can either be an instance
of pytorch model or any modification of model's forward
function.
layer (torch.nn.Module): Layer for which output attributions are computed.
Output size of attribute matches that of layer output.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model, which allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
"""
InternalAttribution.__init__(self, forward_func, layer, device_ids)
attribute: Callable
r"""
This method computes and returns the neuron attribution values for each
input tensor. Deriving classes are responsible for implementing
its logic accordingly.
Specific attribution algorithms that extend this class take relevant
arguments.
Args:
inputs: A single high dimensional input tensor or a tuple of them.
neuron_selector (int or tuple): Tuple providing index of neuron in output
of given layer for which attribution is desired. Length of
this tuple must be one less than the number of
dimensions in the output of the given layer (since
dimension 0 corresponds to number of examples).
Returns:
*tensor* or tuple of *tensors* of **attributions**:
- **attributions** (*tensor* or tuple of *tensors*):
Attribution values for
each input vector. The `attributions` have the
dimensionality of inputs.
"""
| [
"torch.nn.functional.interpolate",
"torch.no_grad"
] | 1.6 | i-jones/captum | 567ec6fc67ab85ce07d075b25428be22bb65e31b |
1.7 | import math
from logging import getLogger
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import weight_norm
from libcity.model import loss
from libcity.model.abstract_traffic_state_model import AbstractTrafficStateModel
def remove_self_loops(edge_index: torch.Tensor):
return edge_index[:, edge_index[0] != edge_index[1]]
def maybe_num_nodes(edge_index: torch.Tensor, num_nodes: Optional[int] = None):
if num_nodes is not None:
return num_nodes
else:
return int(edge_index.max()) + 1
def add_self_loops(edge_index: torch.Tensor, num_nodes: Optional[int] = None):
return torch.cat((edge_index,
torch.arange(maybe_num_nodes(edge_index, num_nodes))
.repeat(2, 1)
.to(edge_index.device)), dim=1)
def softmax(x: torch.Tensor, index: torch.Tensor, num_nodes: Optional[int] = None, dim: int = 0):
N = maybe_num_nodes(index, num_nodes)
x_max = scatter(x, index, dim, dim_size=N, reduce='max').index_select(dim, index)
out = (x - x_max).exp()
out_sum = scatter(out, index, dim, dim_size=N, reduce='sum').index_select(dim, index)
return out / out_sum
class STAGGCN(AbstractTrafficStateModel):
def __init__(self, config, data_feature):
super().__init__(config, data_feature)
self._scaler = self.data_feature.get('scaler')
self.adj_mx = self.data_feature.get('adj_mx', 1)
self.num_nodes = self.data_feature.get('num_nodes', 1)
self.input_dim = self.data_feature.get('feature_dim', 1)
self.output_dim = self.data_feature.get('output_dim', 1)
self.ext_dim = self.data_feature.get('ext_dim', 1)
# 以下两项是STAG-GCN对数据集额外进行预处理得到的边关系数据
# 对数据集预处理得到的空间邻接边集
self.edge_index = self.data_feature.get('edge_index', torch.tensor([[], []], dtype=torch.long)) # 空间邻接边
# 对数据集预处理得到的语义邻接边集
self.dtw_edge_index = self.data_feature.get('dtw_edge_index', torch.tensor([[], []], dtype=torch.long)) # 语义邻接边
self._logger = getLogger()
self.device = config.get('device', torch.device('cpu'))
self.input_window = config.get('input_window', 1)
self.output_window = config.get('output_window', 1)
self.graph_dim = config.get('graph_dim', 32)
self.tcn_dim = config.get('tcn_dim', [10])
self.attn_head = config.get('atten_head', 3)
self.choice = config.get('choice', [1, 1, 1])
self.batch_size = config.get('batch_size', 64)
self.edge_index = self.edge_index.to(self.device)
self.dtw_edge_index = self.dtw_edge_index.to(self.device)
self.model = STAGGCNModel(input_dim=self.input_dim,
output_dim=self.output_dim,
node_num=self.num_nodes,
seq_len=self.input_window,
pred_len=self.output_window,
graph_dim=self.graph_dim,
tcn_dim=self.tcn_dim,
attn_head=self.attn_head,
choice=self.choice).to(self.device)
def forward(self, batch):
x = batch['X'] # shape = (batch_size, input_length, num_nodes, input_dim)
# [batch_size, pred_len, num_nodes, output_dim]
return self.model(x, self.edge_index, self.dtw_edge_index)
def calculate_loss(self, batch):
y_true = batch['y']
y_predicted = self.predict(batch)
y_true = self._scaler.inverse_transform(y_true[..., :self.output_dim])
y_predicted = self._scaler.inverse_transform(y_predicted[..., :self.output_dim])
return loss.masked_mae_torch(y_predicted, y_true)
def predict(self, batch):
# one-inference multi-step prediction
return self.forward(batch)
class STAGGCNModel(nn.Module):
def __init__(self, input_dim=1, output_dim=1,
node_num=325, seq_len=12, pred_len=6, graph_dim=32,
tcn_dim=[10], attn_head=4, choice=[1, 1, 1]):
super(STAGGCNModel, self).__init__()
self.node_num = node_num
self.seq_len = seq_len
self.pred_len = pred_len
self.graph_dim = graph_dim
# self.output_dim = seq_len + np.sum(choice) * graph_dim
self.pred_len_raw = np.sum(choice) * graph_dim
self.STCell = STCell(node_num, seq_len, graph_dim, tcn_dim,
choice=choice, attn_head=attn_head,
input_dim=input_dim, output_dim=output_dim)
self.output_linear = nn.Linear(in_features=self.pred_len_raw, out_features=self.pred_len)
# self.output_linear_0 = nn.Linear(in_features=self.graph_dim, out_features=256)
# self.output_linear_1 = nn.Linear(in_features=256, out_features=self.pred_len)
def forward(self, x, edge_index, dtw_edge_index):
# x: [batch_size, seq_len, num_nodes, input_dim]
# st_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim ==
# [batch_size, num_nodes, output_dim, pred_len_raw]]
st_output = self.STCell(x, edge_index, dtw_edge_index)
output = st_output
# [batch_size, num_nodes, output_dim, pred_len]
output = self.output_linear(output)
# output = F.relu(self.output_linear_0(output))
# output = self.output_linear_1(output)
# output = torch.reshape(output, (-1, self.node_num, self.pred_len))
# [batch_size, pred_len, num_nodes, output_dim]
return output.permute(0, 3, 1, 2).contiguous()
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
# x: [batch_size*input_dim*num_nodes, n_inputs, seq_len]
# self.conv1(x): [batch_size*input_dim*num_nodes, n_outputs, ...]
# self.chomp1(self.conv2(x)): [batch_size*input_dim*num_nodes, n_outputs, seq_len]
# return: [batch_size*input_dim*num_nodes, n_outputs, seq_len]
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size - 1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
# x: [batch_size*num_nodes, input_dim, seq_len]
# return: [batch_size*num_nodes, output_dim*num_channels[-1], seq_len]
return self.network(x)
class LearnedGCN(nn.Module):
def __init__(self, node_num, in_feature, out_feature):
super(LearnedGCN, self).__init__()
self.node_num = node_num
self.in_feature = in_feature
self.out_feature = out_feature
self.source_embed = nn.Parameter(torch.Tensor(self.node_num, 10))
self.target_embed = nn.Parameter(torch.Tensor(10, self.node_num))
self.linear = nn.Linear(self.in_feature, self.out_feature)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.source_embed.size(0))
self.source_embed.data.uniform_(-stdv, stdv)
self.target_embed.data.uniform_(-stdv, stdv)
def forward(self, input):
learned_matrix = F.softmax(F.relu(torch.mm(self.source_embed, self.target_embed)), dim=1)
output = learned_matrix.matmul(input)
output = self.linear(output)
return output
class GATConv(nn.Module):
def __init__(self,
in_channels: int, out_channels: int,
heads: int = 1, concat: bool = True,
negative_slope: float = 0.2, dropout: float = 0.0,
add_self_loops: bool = True, bias: bool = True):
super(GATConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.attn_heads = heads
self.negative_slope = negative_slope
self.dropout = dropout
self.bias = bias
self.concat = concat
self.add_self_loops = add_self_loops
self.linear = nn.Linear(self.in_channels, self.attn_heads * self.out_channels, bias=False)
self.attn_j = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))
self.attn_i = nn.Parameter(torch.Tensor(1, self.attn_heads, self.out_channels))
if bias and concat:
self.bias = nn.Parameter(torch.Tensor(self.attn_heads * self.out_channels))
elif bias and not concat:
self.bias = nn.Parameter(torch.Tensor(self.out_channels))
else:
self.register_parameter('bias', None)
self._alpha = None
self.init_weights()
def init_weights(self):
self._glorot(self.linear.weight)
self._glorot(self.attn_j)
self._glorot(self.attn_i)
self._zeros(self.bias)
@staticmethod
def _glorot(t: torch.Tensor):
if t is None:
return
stdv = math.sqrt(6. / (t.size(-2) * t.size(-1)))
t.data.uniform_(-stdv, stdv)
@staticmethod
def _zeros(t: torch.Tensor):
if t is None:
return
t.data.fill_(0.)
def forward(self, x: torch.Tensor, edge_index: torch.Tensor):
num_nodes = x.size(0)
edge_index = remove_self_loops(edge_index)
edge_index = add_self_loops(edge_index, num_nodes=num_nodes)
edge_index_j, edge_index_i = edge_index
# x: [num_nodes, num_features]
# [num_edges, attn_heads, out_channels]
x_j = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_j]
x_i = self.linear(x).view(-1, self.attn_heads, self.out_channels)[edge_index_i]
# [num_edges, attn_heads]
alpha_j = (x_j * self.attn_j).sum(dim=-1)[edge_index_j]
alpha_i = (x_i * self.attn_i).sum(dim=-1)[edge_index_i]
# message passing
# [num_edges, attn_heads]
alpha = alpha_j + alpha_i
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, edge_index_i, x_i.size(0))
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
# [num_edges, attn_heads, out_channels]
message = x_j * alpha.unsqueeze(-1)
out = scatter(message, edge_index_i, dim=0, reduce='add')
if self.concat:
out = out.view(-1, self.attn_heads * self.out_channels)
else:
out = out.mean(dim=1)
if self.bias is not None:
out += self.bias
return out
class STCell(nn.Module):
def __init__(self, node_num=524, seq_len=12, graph_dim=16, tcn_dim=[10],
choice=[1, 1, 1], attn_head=2, input_dim=1, output_dim=1):
super(STCell, self).__init__()
self.node_num = node_num
self.seq_len = seq_len
self.graph_dim = graph_dim
self.tcn_dim = tcn_dim
self.pred_len_raw = np.sum(choice) * graph_dim
self.choice = choice
# self.jklayer = JumpingKnowledge("max")
# self.jklayer = JumpingKnowledge("lstm", self.graph_dim, 1)
self.input_dim = input_dim
self.output_dim = output_dim
self.in_features = seq_len * input_dim
self.seq_linear = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.input_dim * seq_len)
if choice[0] == 1:
print("[TCN]")
print("node_num:", node_num, "\tattn_head:", attn_head)
# one node of one input feature per embedding element
self.self_attn = nn.MultiheadAttention(embed_dim=node_num * input_dim, num_heads=attn_head)
# expand convolution output_dimension by output_dim
self.tcn = TemporalConvNet(num_inputs=self.input_dim,
num_channels=[x * self.output_dim for x in self.tcn_dim])
self.tlinear = nn.Linear(in_features=self.output_dim * self.tcn_dim[-1] * self.seq_len,
out_features=self.output_dim * self.graph_dim)
if choice[1] == 1:
print("[SP]")
self.sp_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)
self.sp_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)
self.sp_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)
self.sp_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)
self.sp_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=1, concat=False)
# self.sp_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)
self.sp_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))
self.sp_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))
self.sp_linear_1 = nn.Linear(self.input_dim * seq_len, self.output_dim * self.graph_dim)
self.sp_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
self.sp_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
self.sp_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
# self.sp_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)
# self.sp_jklayer = JumpingKnowledge("max")
nn.init.xavier_uniform_(self.sp_source_embed)
nn.init.xavier_uniform_(self.sp_target_embed)
if choice[2] == 1:
print("[DTW]")
self.dtw_origin = nn.Linear(in_features=self.input_dim * seq_len, out_features=self.output_dim * graph_dim)
self.dtw_gconv1 = GATConv(self.input_dim * seq_len, self.output_dim * graph_dim, heads=3, concat=False)
self.dtw_gconv2 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)
self.dtw_gconv3 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)
self.dtw_gconv4 = GATConv(self.output_dim * graph_dim, self.output_dim * graph_dim, heads=3, concat=False)
# self.dtw_gconv5 = GATConv(graph_dim, graph_dim, heads = 1, concat = False)
self.dtw_source_embed = nn.Parameter(torch.Tensor(self.node_num, 12))
self.dtw_target_embed = nn.Parameter(torch.Tensor(12, self.node_num))
self.dtw_linear_1 = nn.Linear(self.input_dim * self.seq_len, self.output_dim * self.graph_dim)
self.dtw_linear_2 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
self.dtw_linear_3 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
self.dtw_linear_4 = nn.Linear(self.output_dim * self.graph_dim, self.output_dim * self.graph_dim)
# self.dtw_linear_5 = nn.Linear(self.graph_dim, self.graph_dim)
# self.dtw_jklayer = JumpingKnowledge("max")
nn.init.xavier_uniform_(self.dtw_source_embed)
nn.init.xavier_uniform_(self.dtw_target_embed)
def forward(self, x, edge_index, dtw_edge_index):
# x: [batch_size, seq_len, num_nodes, input_dim]
output_list = [0, 0, 0]
batch_size = x.shape[0]
if self.choice[0] == 1:
# [seq_len, batch_size, input_dim*num_nodes]
attn_input = x.permute(1, 0, 3, 2).reshape(self.seq_len, batch_size, -1).contiguous()
# [seq_len, batch_size, input_dim*num_nodes]
# input_dim*num_nodes is the embedding dimension
attn_output, _ = self.self_attn(attn_input, attn_input, attn_input)
# [seq_len, batch_size, input_dim*num_nodes]
attn_output = torch.tanh(attn_output + attn_input)
# [batch_size*num_nodes, input_dim, seq_len]
attn_output = attn_output.reshape(self.seq_len, batch_size, self.input_dim, self.node_num) \
.permute(1, 3, 2, 0) \
.reshape(-1, self.input_dim, self.seq_len)
# [batch_size*num_nodes, input_dim, seq_len]
tcn_input = attn_output
# [batch_size*num_nodes, output_dim*self.tcn_dim[-1], seq_len]
tcn_output = self.tcn(tcn_input)
# [batch_size*num_nodes, output_dim*self.tcn_dim[-1]*seq_len]
tcn_output = torch.reshape(tcn_output,
(-1, self.output_dim * self.tcn_dim[-1] * self.seq_len))
# [batch_size*num_nodes, output_dim*self.graph_dim]
tcn_output = self.tlinear(tcn_output)
# [batch_size, num_nodes, output_dim, self.graph_dim]
tcn_output = torch.reshape(tcn_output, (batch_size, self.node_num, self.output_dim, self.graph_dim))
output_list[0] = tcn_output
if self.choice[1] == 1 or self.choice[2] == 1:
# [batch_size, num_nodes, input_dim*seq_len]
sp_gout_0 = x.permute(0, 2, 3, 1).reshape(-1, self.input_dim * self.seq_len).contiguous()
dtw_gout_0 = sp_gout_0.detach().clone()
if self.choice[1] == 1:
# [batch_size*num_nodes, input_dim*seq_len]
sp_gout_0 = self.seq_linear(sp_gout_0) + sp_gout_0
# [num_nodes, num_nodes]
sp_learned_matrix = F.softmax(F.relu(torch.mm(self.sp_source_embed, self.sp_target_embed)), dim=1)
# GATConv: [input_dim*seq_len, output_dim*graph_dim]
# [batch_size*num_nodes, output_dim*graph_dim]
sp_gout_1 = self.sp_gconv1(sp_gout_0, edge_index)
# [batch_size, num_nodes, input_dim*seq_len]
adp_input_1 = torch.reshape(sp_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))
# [batch_size, num_nodes, output_dim*graph_dim]
sp_adp_1 = self.sp_linear_1(sp_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_adp_1 = torch.reshape(sp_adp_1, (-1, self.output_dim * self.graph_dim))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_origin = self.sp_origin(sp_gout_0)
# [batch_size*num_nodes, output_dim*graph_dim]
sp_output_1 = torch.tanh(sp_gout_1) * torch.sigmoid(sp_adp_1) + sp_origin * (1 - torch.sigmoid(sp_adp_1))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_gout_2 = self.sp_gconv2(torch.tanh(sp_output_1), edge_index)
# [batch_size, num_nodes, output_dim*graph_dim]
adp_input_2 = torch.reshape(torch.tanh(sp_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))
# [batch_size, num_nodes, output_dim*graph_dim]
sp_adp_2 = self.sp_linear_2(sp_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_adp_2 = torch.reshape(sp_adp_2, (-1, self.output_dim * self.graph_dim))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_output_2 = F.leaky_relu(sp_gout_2) * torch.sigmoid(sp_adp_2) + \
sp_output_1 * (1 - torch.sigmoid(sp_adp_2))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_gout_3 = self.sp_gconv3(F.relu(sp_output_2), edge_index)
# [batch_size, num_nodes, output_dim*graph_dim]
adp_input_3 = torch.reshape(F.relu(sp_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))
# [batch_size, num_nodes, output_dim*graph_dim]
sp_adp_3 = self.sp_linear_3(sp_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_adp_3 = torch.reshape(sp_adp_3, (-1, self.output_dim * self.graph_dim))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_output_3 = F.relu(sp_gout_3) * torch.sigmoid(sp_adp_3) + sp_output_2 * (1 - torch.sigmoid(sp_adp_3))
sp_gout_4 = self.sp_gconv4(F.relu(sp_output_3), edge_index)
adp_input_4 = torch.reshape(F.relu(sp_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))
sp_adp_4 = self.sp_linear_4(sp_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))
sp_adp_4 = torch.reshape(sp_adp_4, (-1, self.output_dim * self.graph_dim))
# [batch_size*num_nodes, output_dim*graph_dim]
sp_output_4 = F.relu(sp_gout_4) * torch.sigmoid(sp_adp_4) + sp_output_3 * (1 - torch.sigmoid(sp_adp_4))
# sp_gout_5 = self.sp_gconv5(F.relu(sp_output_4), edge_index)
# adp_input_5 = torch.reshape(F.relu(sp_output_4), (-1, self.node_num, self.graph_dim))
# sp_adp_5 = self.sp_linear_5(sp_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))
# sp_adp_5 = torch.reshape(sp_adp_5, (-1, self.graph_dim))
# sp_output_5 = F.relu(sp_gout_5) * torch.sigmoid(sp_adp_5) + sp_output_4 * (1 - torch.sigmoid(sp_adp_5))
# [batch_size, num_nodes, output_dim, graph_dim]
sp_output = torch.reshape(sp_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))
# sp_output = sp_output_4
output_list[1] = sp_output
if self.choice[2] == 1:
dtw_gout_0 = self.seq_linear(dtw_gout_0) + dtw_gout_0
dtw_learned_matrix = F.softmax(F.relu(torch.mm(self.dtw_source_embed, self.dtw_target_embed)), dim=1)
dtw_gout_1 = self.dtw_gconv1(dtw_gout_0, dtw_edge_index)
adp_input_1 = torch.reshape(dtw_gout_0, (-1, self.node_num, self.input_dim * self.seq_len))
dtw_adp_1 = self.dtw_linear_1(dtw_learned_matrix.matmul(F.dropout(adp_input_1, p=0.1)))
dtw_adp_1 = torch.reshape(dtw_adp_1, (-1, self.output_dim * self.graph_dim))
dtw_origin = self.dtw_origin(dtw_gout_0)
dtw_output_1 = torch.tanh(dtw_gout_1) * torch.sigmoid(dtw_adp_1) + \
dtw_origin * (1 - torch.sigmoid(dtw_adp_1))
dtw_gout_2 = self.dtw_gconv2(torch.tanh(dtw_output_1), dtw_edge_index)
adp_input_2 = torch.reshape(torch.tanh(dtw_output_1), (-1, self.node_num, self.output_dim * self.graph_dim))
dtw_adp_2 = self.dtw_linear_2(dtw_learned_matrix.matmul(F.dropout(adp_input_2, p=0.1)))
dtw_adp_2 = torch.reshape(dtw_adp_2, (-1, self.output_dim * self.graph_dim))
dtw_output_2 = F.leaky_relu(dtw_gout_2) * torch.sigmoid(dtw_adp_2) + \
dtw_output_1 * (1 - torch.sigmoid(dtw_adp_2))
dtw_gout_3 = self.dtw_gconv3(F.relu(dtw_output_2), dtw_edge_index)
adp_input_3 = torch.reshape(F.relu(dtw_output_2), (-1, self.node_num, self.output_dim * self.graph_dim))
dtw_adp_3 = self.dtw_linear_3(dtw_learned_matrix.matmul(F.dropout(adp_input_3, p=0.1)))
dtw_adp_3 = torch.reshape(dtw_adp_3, (-1, self.output_dim * self.graph_dim))
dtw_output_3 = F.relu(dtw_gout_3) * torch.sigmoid(dtw_adp_3) + dtw_output_2 * (1 - torch.sigmoid(dtw_adp_3))
dtw_gout_4 = self.dtw_gconv4(F.relu(dtw_output_3), dtw_edge_index)
adp_input_4 = torch.reshape(F.relu(dtw_output_3), (-1, self.node_num, self.output_dim * self.graph_dim))
dtw_adp_4 = self.dtw_linear_4(dtw_learned_matrix.matmul(F.dropout(adp_input_4, p=0.1)))
dtw_adp_4 = torch.reshape(dtw_adp_4, (-1, self.output_dim * self.graph_dim))
# [batch_size*num_nodes, output_dim*graph_dim]
dtw_output_4 = F.relu(dtw_gout_4) * torch.sigmoid(dtw_adp_4) + dtw_output_3 * (1 - torch.sigmoid(dtw_adp_4))
# dtw_gout_5 = self.dtw_gconv5(F.relu(dtw_output_4), dtw_edge_index)
# adp_input_5 = torch.reshape(F.relu(dtw_output_4), (-1, self.node_num, self.graph_dim))
# dtw_adp_5 = self.dtw_linear_5(dtw_learned_matrix.matmul(F.dropout(adp_input_5,p=0.1)))
# dtw_adp_5 = torch.reshape(dtw_adp_5, (-1, self.graph_dim))
# dtw_output_5 = \
# F.relu(dtw_gout_5) * torch.sigmoid(dtw_adp_5) + dtw_output_4 * (1 - torch.sigmoid(dtw_adp_5))
# [batch_size, num_nodes, output_dim, graph_dim]
dtw_output = torch.reshape(dtw_output_4, (batch_size, self.node_num, self.output_dim, self.graph_dim))
# dtw_output = dtw_output_4
output_list[2] = dtw_output
# output_list[*]: [batch_size, num_nodes, output_dim, graph_dim]
# cell_output: [batch_size, num_nodes, output_dim, sum(choice)*graph_dim]
step = 0
for i in range(len(self.choice)):
if self.choice[i] == 1 and step == 0:
cell_output = output_list[i]
step += 1
elif self.choice[i] == 1:
cell_output = torch.cat((cell_output, output_list[i]), dim=3)
# cell_output = self.jklayer([output_list[0], output_list[1], output_list[2]])
# cell_output = self.out(cell_output)
# cell_output = torch.reshape(cell_output, (-1, self.pred_len_raw))
return cell_output
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.MultiheadAttention",
"torch.reshape",
"torch.sigmoid",
"torch.nn.Conv1d",
"torch.tensor",
"torch.nn.functional.relu",
"torch.Tensor",
"torch.device",
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.mm",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.init.xavier_uniform_",
"torch.tanh",
"torch.nn.functional.leaky_relu"
] | 1.7.1 | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 |
1.7 | from libcity.data import get_dataset
from libcity.utils import get_logger, get_executor, get_model
if __name__ == '__main__':
config = {
'log_level': 'INFO',
'input_window': 12,
'output_window': 12,
'train_rate': 0.7,
'eval_rate': 0.1,
'cache_dataset': True,
'batch_size': 64,
'num_workers': 1,
'evaluator': 'TrafficStateEvaluator',
'dataset_class': 'TrafficStatePointDataset',
'executor': 'TrafficStateExecutor',
'model': 'GWNET',
'learning_rate': 0.001,
'learner': 'adam',
'lr_decay': False,
'weight_decay': 0.0001,
'dropout': 0.3,
'max_epoch': 100,
'epoch': 0,
'max_grad_norm': 5,
'clip_grad_norm': True,
'metrics': ['MAE', 'MSE', 'RMSE', 'MAPE', 'masked_MAE', 'masked_MSE', 'masked_RMSE', 'masked_MAPE', 'R2', 'EVAR'],
'gpu': True,
'gpu_id': '1',
'dataset': 'METR_LA',
'weight_col': 'cost',
'data_col': ['traffic_speed'],
'calculate_weight': True,
'add_time_in_day': False,
'add_day_in_week': False,
'scaler': "standard",
'use_early_stop': False,
}
import os
os.environ["CUDA_VISIBLE_DEVICES"] = config['gpu_id']
import torch
config['device'] = torch.device("cuda" if torch.cuda.is_available() and config['gpu'] else "cpu")
logger = get_logger(config)
dataset = get_dataset(config)
train_data, valid_data, test_data = dataset.get_data()
print(len(train_data.dataset), train_data.dataset[0][0].shape, train_data.dataset[0][1].shape,
train_data.batch_size)
print(len(valid_data.dataset), valid_data.dataset[0][0].shape, valid_data.dataset[0][1].shape,
valid_data.batch_size)
print(len(test_data.dataset), test_data.dataset[0][0].shape, test_data.dataset[0][1].shape, test_data.batch_size)
data_feature = dataset.get_data_feature()
print(data_feature['adj_mx'].shape)
print(data_feature['adj_mx'].sum())
model = get_model(config, data_feature)
executor = get_executor(config, model)
executor.train(train_data, valid_data)
model_cache_file = './libcity/cache/model_cache/' + config['model'] + '_' + config['dataset'] + '.m'
executor.save_model(model_cache_file)
executor.load_model(model_cache_file)
# 评估,评估结果将会放在 cache/evaluate_cache 下
executor.evaluate(test_data)
| [
"torch.cuda.is_available"
] | 1.7.1 | moghadas76/test_bigcity | 607b9602c5b1113b23e1830455e174b0901d7558 |
1.5 | import os
from glob import glob
from typing import Optional
import cv2
import numpy as np
import torch
import yaml
from fire import Fire
from tqdm import tqdm
from aug import get_normalize
from models.networks import get_generator
class Predictor:
def __init__(self, weights_path: str, model_name: str = ''):
with open('/content/DeblurGANv2/config/config.yaml') as cfg:
config = yaml.load(cfg)
model = get_generator(model_name or config['model'])
v1=torch.load(weights_path)
print(v1)
v2=torch.load(weights_path)['model']
print(v2)
model.load_state_dict(torch.load(weights_path)['model'])
self.model = model.cuda()
self.model.train(True)
# GAN inference should be in train mode to use actual stats in norm layers,
# it's not a bug
self.normalize_fn = get_normalize()
@staticmethod
def _array_to_batch(x):
x = np.transpose(x, (2, 0, 1))
x = np.expand_dims(x, 0)
return torch.from_numpy(x)
def _preprocess(self, x: np.ndarray, mask: Optional[np.ndarray]):
x, _ = self.normalize_fn(x, x)
if mask is None:
mask = np.ones_like(x, dtype=np.float32)
else:
mask = np.round(mask.astype('float32') / 255)
h, w, _ = x.shape
block_size = 32
min_height = (h // block_size + 1) * block_size
min_width = (w // block_size + 1) * block_size
pad_params = {'mode': 'constant',
'constant_values': 0,
'pad_width': ((0, min_height - h), (0, min_width - w), (0, 0))
}
x = np.pad(x, **pad_params)
mask = np.pad(mask, **pad_params)
return map(self._array_to_batch, (x, mask)), h, w
@staticmethod
def _postprocess(x: torch.Tensor) -> np.ndarray:
x, = x
x = x.detach().cpu().float().numpy()
x = (np.transpose(x, (1, 2, 0)) + 1) / 2.0 * 255.0
return x.astype('uint8')
def __call__(self, img: np.ndarray, mask: Optional[np.ndarray], ignore_mask=True) -> np.ndarray:
(img, mask), h, w = self._preprocess(img, mask)
with torch.no_grad():
inputs = [img.cuda()]
if not ignore_mask:
inputs += [mask]
pred = self.model(*inputs)
return self._postprocess(pred)[:h, :w, :]
def process_video(pairs, predictor, output_dir):
for video_filepath, mask in tqdm(pairs):
video_filename = os.path.basename(video_filepath)
output_filepath = os.path.join(output_dir, os.path.splitext(video_filename)[0]+'_deblur.mp4')
video_in = cv2.VideoCapture(video_filepath)
fps = video_in.get(cv2.CAP_PROP_FPS)
width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
total_frame_num = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))
video_out = cv2.VideoWriter(output_filepath, cv2.VideoWriter_fourcc(*'MP4V'), fps, (width, height))
tqdm.write(f'process {video_filepath} to {output_filepath}, {fps}fps, resolution: {width}x{height}')
for frame_num in tqdm(range(total_frame_num), desc=video_filename):
res, img = video_in.read()
if not res:
break
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pred = predictor(img, mask)
pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)
video_out.write(pred)
def main(img_pattern: str,
mask_pattern: Optional[str] = None,
weights_path='/content/best_fpn.h5',
out_dir='/content/submit/',
side_by_side: bool = False,
video: bool = False):
def sorted_glob(pattern):
return sorted(glob(pattern))
imgs = sorted_glob(img_pattern)
masks = sorted_glob(mask_pattern) if mask_pattern is not None else [None for _ in imgs]
pairs = zip(imgs, masks)
names = sorted([os.path.basename(x) for x in glob(img_pattern)])
print(weights_path)
predictor = Predictor(weights_path=weights_path)
os.makedirs(out_dir, exist_ok=True)
if not video:
for name, pair in tqdm(zip(names, pairs), total=len(names)):
f_img, f_mask = pair
img, mask = map(cv2.imread, (f_img, f_mask))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pred = predictor(img, mask)
if side_by_side:
pred = np.hstack((img, pred))
pred = cv2.cvtColor(pred, cv2.COLOR_RGB2BGR)
cv2.imwrite(os.path.join(out_dir, name),
pred)
else:
process_video(pairs, predictor, out_dir)
if __name__ == '__main__':
Fire(main)
| [
"torch.no_grad",
"torch.from_numpy",
"torch.load"
] | 1.5.1 | ZurMaD/DeblurGANv2 | bf8ab7d178ecf32db7eba588ede3f3f121d17470 |
1.1 | #!/usr/bin/env python3
"""Script to check whether the installation is done correctly."""
# Copyright 2018 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import importlib
import logging
import sys
import traceback
from distutils.version import LooseVersion
# NOTE: add the libraries which are not included in setup.py
MANUALLY_INSTALLED_LIBRARIES = [
("espnet", None),
("kaldiio", None),
("matplotlib", None),
("chainer", ("6.0.0")),
("chainer_ctc", None),
("warprnnt_pytorch", ("0.1")),
]
# NOTE: list all torch versions which are compatible with espnet
COMPATIBLE_TORCH_VERSIONS = (
"0.4.1",
"1.0.0",
"1.0.1",
"1.0.1.post2",
"1.1.0",
"1.2.0",
"1.3.0",
"1.3.1",
"1.4.0",
"1.5.0",
"1.5.1",
"1.6.0",
)
def main(args):
"""Check the installation."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="Disable cuda-related tests",
)
parser.add_argument(
"--no-cupy",
action="store_true",
default=False,
help="Disable cupy test",
)
args = parser.parse_args(args)
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
logging.info(f"python version = {sys.version}")
library_list = []
if args.no_cuda:
args.no_cupy = True
if not args.no_cupy:
library_list.append(("cupy", ("6.0.0")))
# check torch installation at first
try:
import torch
logging.info(f"pytorch version = {torch.__version__}")
if torch.__version__ not in COMPATIBLE_TORCH_VERSIONS:
logging.warning(f"{torch.__version__} is not tested. please be careful.")
except ImportError:
logging.warning("torch is not installed.")
logging.warning("please try to setup again and then re-run this script.")
sys.exit(1)
# warpctc can be installed only for pytorch < 1.4
if LooseVersion(torch.__version__) < LooseVersion("1.4.0"):
library_list.append(("warpctc_pytorch", ("0.1.1", "0.1.2", "0.1.3")))
library_list.extend(MANUALLY_INSTALLED_LIBRARIES)
# check library availableness
logging.info("library availableness check start.")
logging.info("# libraries to be checked = %d" % len(library_list))
is_correct_installed_list = []
for idx, (name, version) in enumerate(library_list):
try:
importlib.import_module(name)
logging.info("--> %s is installed." % name)
is_correct_installed_list.append(True)
except ImportError:
logging.warning("--> %s is not installed.\n###### Raw Error ######\n%s#######################" % (name, traceback.format_exc()))
is_correct_installed_list.append(False)
logging.info("library availableness check done.")
logging.info(
"%d / %d libraries are correctly installed."
% (sum(is_correct_installed_list), len(library_list))
)
if len(library_list) != sum(is_correct_installed_list):
logging.warning("please try to setup again and then re-run this script.")
sys.exit(1)
# check library version
num_version_specified = sum(
[True if v is not None else False for n, v in library_list]
)
logging.info("library version check start.")
logging.info("# libraries to be checked = %d" % num_version_specified)
is_correct_version_list = []
for idx, (name, version) in enumerate(library_list):
if version is not None:
# Note: temp. fix for warprnnt_pytorch
# not found version with importlib
if name == "warprnnt_pytorch":
import pkg_resources
vers = pkg_resources.get_distribution(name).version
else:
vers = importlib.import_module(name).__version__
if vers is not None:
is_correct = vers in version
if is_correct:
logging.info("--> %s version is matched (%s)." % (name, vers))
is_correct_version_list.append(True)
else:
logging.warning(
"--> %s version is incorrect (%s is not in %s)."
% (name, vers, str(version))
)
is_correct_version_list.append(False)
else:
logging.info(
"--> %s has no version info, but version is specified." % name
)
logging.info("--> maybe it is better to reinstall the latest version.")
is_correct_version_list.append(False)
logging.info("library version check done.")
logging.info(
"%d / %d libraries are correct version."
% (sum(is_correct_version_list), num_version_specified)
)
if sum(is_correct_version_list) != num_version_specified:
logging.info("please try to setup again and then re-run this script.")
sys.exit(1)
# check cuda availableness
if args.no_cuda:
logging.info("cuda availableness check skipped.")
else:
logging.info("cuda availableness check start.")
import chainer
import torch
try:
assert torch.cuda.is_available()
logging.info("--> cuda is available in torch.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in torch.")
try:
assert torch.backends.cudnn.is_available()
logging.info("--> cudnn is available in torch.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in torch.")
try:
assert chainer.backends.cuda.available
logging.info("--> cuda is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in chainer.")
try:
assert chainer.backends.cuda.cudnn_enabled
logging.info("--> cudnn is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in chainer.")
try:
from cupy.cuda import nccl # NOQA
logging.info("--> nccl is installed.")
except ImportError:
logging.warning(
"--> it seems that nccl is not installed. multi-gpu is not enabled."
)
logging.warning(
"--> if you want to use multi-gpu, please install it and then re-setup."
)
try:
assert torch.cuda.device_count() > 1
logging.info(
f"--> multi-gpu is available (#gpus={torch.cuda.device_count()})."
)
except AssertionError:
logging.warning("--> it seems that only single gpu is available.")
logging.warning("--> maybe your machine has only one gpu.")
logging.info("cuda availableness check done.")
logging.info("installation check is done.")
if __name__ == "__main__":
main(sys.argv[1:])
| [
"torch.cuda.is_available",
"torch.backends.cudnn.is_available",
"torch.cuda.device_count"
] | 1.1.0 | dertilo/espnet | 4d2414b3d56154ab8c6ded0eb0a3f076e073344b |
1.7 | import glob
import os
import pretrainedmodels
import torch
from torch import nn
from torchvision import models as torch_models
import cifar_models as models
from adversarial_defense.model.denoise_resnet import DenoiseResNet50, DenoiseResNet101, DenoiseResNet152
from adversarial_defense.model.pcl_resnet import PrototypeConformityLossResNet
from cifar_models_myself import Conv3, DenseNet121, DenseNet169, DenseNet201, GoogLeNet, MobileNet, MobileNetV2, \
ResNet18, \
ResNet34, ResNet50, ResNet101, ResNet152, PNASNetA, PNASNetB, EfficientNetB0, DPN26, DPN92, ResNeXt29_2x64d, \
ResNeXt29_4x64d, ResNeXt29_8x64d, ResNeXt29_32x4d, SENet18, ShuffleNetG2, ShuffleNetG3, vgg11, vgg13, vgg16, vgg19, \
PreActResNet18, PreActResNet34, PreActResNet50, PreActResNet101, PreActResNet152, wideresnet28, wideresnet34, \
wideresnet40, carlinet, wideresnet28drop, wideresnet34drop, wideresnet40drop
from cifar_models_myself.miscellaneous import Identity
from config import pretrained_cifar_model_conf, IN_CHANNELS, IMAGE_SIZE, CLASS_NUM, PROJECT_PATH
from cifar_models_myself.efficient_densenet import EfficientDenseNet
from cifar_models_myself.ghostnet import ghost_net
from tiny_imagenet_models.densenet import densenet161, densenet121, densenet169, densenet201
from tiny_imagenet_models.resnext import resnext101_32x4d, resnext101_64x4d
import torchvision.models as vision_models
from tiny_imagenet_models.inception import inception_v3
from tiny_imagenet_models.wrn import tiny_imagenet_wrn
class FeatureDefenseModel(nn.Module):
"""
A StandardModel object wraps a cnn model.
This model always accept standard image: in [0, 1] range, RGB order, un-normalized, NCHW format
"""
def __init__(self, dataset, arch, no_grad=True):
super(FeatureDefenseModel, self).__init__()
# init cnn model
self.in_channels = IN_CHANNELS[dataset]
self.dataset = dataset
if "denoise" in arch.lower():
# CIFAR-100@ResNet50_with_denoise_NonLocal_Filter_3.pth.tar
trained_model_path = "{root}/train_pytorch_model/adversarial_train/feature_denoise/{dataset}@{arch}_NonLocal_Filter_3.pth.tar".format(root=PROJECT_PATH, dataset=dataset, arch=arch)
assert os.path.exists(trained_model_path), "{} does not exist!".format(trained_model_path)
elif dataset.startswith("CIFAR"):
trained_model_path = "{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}/checkpoint.pth.tar".format(root=PROJECT_PATH, dataset=dataset, arch=arch)
assert os.path.exists(trained_model_path), "{} does not exist!".format(trained_model_path)
elif dataset == "TinyImageNet":
arch = arch.replace("resnet-", "resnet")
trained_model_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar".format(root=PROJECT_PATH, dataset=dataset, arch=arch)
trained_model_path_list = list(glob.glob(trained_model_path))
assert len(trained_model_path_list)>0, "{} does not exist!".format(trained_model_path)
trained_model_path = trained_model_path_list[0]
else:
trained_model_path = "{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/checkpoints/{arch}*.pth".format(
root=PROJECT_PATH, dataset=dataset, arch=arch)
trained_model_path_ls = list(glob.glob(trained_model_path))
assert trained_model_path_ls, "{} does not exist!".format(trained_model_path)
trained_model_path = trained_model_path_ls[0]
self.cnn = self.make_model(dataset, arch, self.in_channels, CLASS_NUM[dataset], trained_model_path=trained_model_path)
# init cnn model meta-information
self.mean = torch.FloatTensor(self.cnn.mean).view(1, self.in_channels, 1, 1).cuda()
self.mean.requires_grad =True
self.std = torch.FloatTensor(self.cnn.std).view(1, self.in_channels, 1, 1).cuda()
self.std.requires_grad = True
self.input_space = self.cnn.input_space # 'RGB' or 'GBR'
self.input_range = self.cnn.input_range # [0, 1] or [0, 255]
self.input_size = self.cnn.input_size
self.no_grad = no_grad
self.arch = arch
@staticmethod
def check_arch(arch, dataset):
if dataset == "ImageNet":
return arch in pretrainedmodels.__dict__
elif dataset == "TinyImageNet":
trained_model_path = "{root}/train_pytorch_model/real_image_model/{dataset}@{arch}@*.pth.tar".format(
root=PROJECT_PATH, dataset=dataset, arch=arch)
trained_model_path_list = list(glob.glob(trained_model_path))
return len(trained_model_path_list) > 0
else:
trained_model_path = "{root}/train_pytorch_model/real_image_model/{dataset}-pretrained/{arch}*".format(
root=PROJECT_PATH, dataset=dataset, arch=arch)
trained_model_path = glob.glob(trained_model_path)
if len(trained_model_path) > 0:
return os.path.exists(trained_model_path[0] + "/checkpoint.pth.tar")
else:
return False
def forward(self, x):
# assign dropout probability
# if hasattr(self, 'drop'):
# self.cnn.drop = self.drop
# channel order
if self.input_space == 'BGR':
x = x[:, [2, 1, 0], :, :] # pytorch does not support negative stride index (::-1) yet
# input range
if max(self.input_range) == 255:
x = x * 255
# normalization
x = (x - self.mean.type(x.dtype).to(x.device)) / self.std.type(x.dtype).to(x.device)
if self.no_grad:
with torch.no_grad():
if "pcl" in self.arch:
feats128, feats256, feats1024, x = self.cnn(x)
else:
x = self.cnn(x)
else:
if "pcl" in self.arch:
feats128, feats256, feats1024, x = self.cnn(x)
else:
x = self.cnn(x)
x = x.view(x.size(0), -1)
if "pcl" in self.arch:
return feats128, feats256, feats1024, x
return x
def load_weight_from_pth_checkpoint(self, model, fname):
raw_state_dict = torch.load(fname, map_location='cpu')
if "state_dict" in raw_state_dict:
raw_state_dict = raw_state_dict["state_dict"]
state_dict = dict()
for key, val in raw_state_dict.items():
new_key = key.replace('module.', '')
state_dict[new_key] = val
model.load_state_dict(state_dict)
def construct_cifar_model(self, arch, dataset, num_classes):
if "denoise" not in arch.lower():
conf = pretrained_cifar_model_conf[dataset][arch]
arch = arch.split("-")[0]
if arch.startswith('resnext'):
model = models.__dict__[arch](
cardinality=conf["cardinality"],
num_classes=num_classes,
depth=conf["depth"],
widen_factor=conf["widen_factor"],
dropRate=conf["drop"],
)
elif arch.startswith('densenet'):
model = models.__dict__[arch](
num_classes=num_classes,
depth=conf["depth"],
growthRate=conf["growthRate"],
compressionRate=conf["compressionRate"],
dropRate=conf["drop"],
)
elif arch.startswith('wrn'):
model = models.__dict__[arch](
num_classes=num_classes,
depth=conf["depth"],
widen_factor=conf["widen_factor"],
dropRate=conf["drop"],
)
elif arch.endswith('resnet') and "pcl_" not in arch and "denoise" not in arch:
model = models.__dict__[arch](
num_classes=num_classes,
depth=conf["depth"],
block_name=conf["block_name"],
)
elif "pcl_resnet" in arch:
model = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=conf["depth"], num_classes=CLASS_NUM[dataset])
elif arch == "DenoiseResNet50":
model = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
elif arch == "DenoiseResNet101":
model = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
elif arch == "DenoiseResNet152":
model = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
else:
model = models.__dict__[arch](num_classes=num_classes)
return model
def make_model(self, dataset, arch, in_channel, num_classes, trained_model_path=None):
"""
Make model, and load pre-trained weights.
:param dataset: cifar10 or imagenet
:param arch: arch name, e.g., alexnet_bn
:return: model (in cpu and training mode)
"""
if dataset in ['CIFAR-10',"CIFAR-100", "MNIST","FashionMNIST"]:
assert trained_model_path is not None and os.path.exists(trained_model_path), "Pretrained weight model file {} does not exist!".format(trained_model_path)
if arch == 'gdas':
model = models.gdas(in_channel, num_classes)
model.mean = [125.3 / 255, 123.0 / 255, 113.9 / 255]
model.std = [63.0 / 255, 62.1 / 255, 66.7 / 255]
model.input_space = 'RGB'
model.input_range = [0, 1]
model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]
elif arch == 'pyramidnet272':
model = models.pyramidnet272(in_channel, num_classes)
model.mean = [0.49139968, 0.48215841, 0.44653091]
model.std = [0.24703223, 0.24348513, 0.26158784]
model.input_space = 'RGB'
model.input_range = [0, 1]
model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]
else:
model = self.construct_cifar_model(arch, dataset, num_classes) #
model.mean = [0.4914, 0.4822, 0.4465]
model.std = [0.2023, 0.1994, 0.2010]
model.input_space = 'RGB'
model.input_range = [0, 1]
model.input_size = [in_channel, IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]
# self.load_weight_from_pth_checkpoint(model, trained_model_path)
elif dataset == "TinyImageNet":
model = MetaLearnerModelBuilder.construct_tiny_imagenet_model(arch, dataset)
model.input_space = 'RGB'
model.input_range = [0, 1]
model.mean = [0.4914, 0.4822, 0.4465] # if "defense_resnet" not in arch and "denoise" not in arch: [0,0,0] . [1,1,1]
model.std = [0.2023, 0.1994, 0.2010]
model.input_size = [in_channel,IMAGE_SIZE[dataset][0], IMAGE_SIZE[dataset][1]]
# model.load_state_dict(torch.load(trained_model_path, map_location=lambda storage, location: storage)["state_dict"])
elif dataset == 'ImageNet':
os.environ["TORCH_HOME"] = "{}/train_pytorch_model/real_image_model/ImageNet-pretrained".format(PROJECT_PATH)
model = pretrainedmodels.__dict__[arch](num_classes=1000, pretrained="imagenet")
return model
class MetaLearnerModelBuilder(object):
@staticmethod
def construct_cifar_model(arch, dataset):
if arch == "conv3":
network = Conv3(IN_CHANNELS[dataset], IMAGE_SIZE[dataset], CLASS_NUM[dataset])
elif arch == "densenet121":
network = DenseNet121(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "densenet169":
network = DenseNet169(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "densenet201":
network = DenseNet201(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "googlenet":
network = GoogLeNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "mobilenet":
network = MobileNet(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "mobilenet_v2":
network = MobileNetV2(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "ghost_net":
network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnet18":
network = ResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnet34":
network = ResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnet50":
network = ResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnet101":
network = ResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnet152":
network = ResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "pnasnetA":
network = PNASNetA(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "pnasnetB":
network = PNASNetB(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "efficientnet":
network = EfficientNetB0(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "dpn26":
network = DPN26(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "dpn92":
network = DPN92(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnext29_2":
network = ResNeXt29_2x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnext29_4":
network = ResNeXt29_4x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnext29_8":
network = ResNeXt29_8x64d(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "resnext29_32":
network = ResNeXt29_32x4d(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "senet18":
network = SENet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "shufflenet_G2":
network = ShuffleNetG2(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "shufflenet_G3":
network = ShuffleNetG3(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "vgg11":
network = vgg11(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "vgg13":
network = vgg13(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "vgg16":
network = vgg16(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "vgg19":
network = vgg19(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "preactresnet18":
network = PreActResNet18(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "preactresnet34":
network = PreActResNet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "preactresnet50":
network = PreActResNet50(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "preactresnet101":
network = PreActResNet101(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "preactresnet152":
network = PreActResNet152(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet28":
network = wideresnet28(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet28drop":
network = wideresnet28drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet34":
network = wideresnet34(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet34drop":
network = wideresnet34drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet40":
network = wideresnet40(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "wideresnet40drop":
network = wideresnet40drop(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == "carlinet":
network = carlinet(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch == 'efficient_densenet':
depth = 40
block_config = [(depth - 4) // 6 for _ in range(3)]
network = EfficientDenseNet(IN_CHANNELS[dataset], block_config=block_config,
num_classes=CLASS_NUM[dataset], small_inputs=dataset != "ImageNet", efficient=False)
return network
@staticmethod
def construct_imagenet_model(arch, dataset):
os.environ["TORCH_HOME"] = "{}/train_pytorch_model/real_image_model/ImageNet-pretrained".format(PROJECT_PATH)
if arch == 'efficient_densenet':
depth = 40
block_config = [(depth - 4) // 6 for _ in range(3)]
return EfficientDenseNet(IN_CHANNELS[dataset],block_config=block_config, num_classes=CLASS_NUM[dataset], small_inputs=False, efficient=False)
elif arch == "ghost_net":
network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])
return network
model = vision_models.__dict__[arch](pretrained=False)
return model
@staticmethod
def construct_tiny_imagenet_model(arch, dataset):
if not arch.startswith("densenet") and not arch.startswith("resnext") and arch in torch_models.__dict__:
network = torch_models.__dict__[arch](pretrained=False)
num_classes = CLASS_NUM[dataset]
if arch.startswith("resnet"):
num_ftrs = network.fc.in_features
network.fc = nn.Linear(num_ftrs, num_classes)
elif arch.startswith("densenet"):
if arch == "densenet161":
network = densenet161(pretrained=False)
elif arch == "densenet121":
network = densenet121(pretrained=False)
elif arch == "densenet169":
network = densenet169(pretrained=False)
elif arch == "densenet201":
network = densenet201(pretrained=False)
elif arch == "resnext32_4":
network = resnext101_32x4d(pretrained=None)
elif arch == "resnext64_4":
network = resnext101_64x4d(pretrained=None)
elif arch == "ghost_net":
network = ghost_net(IN_CHANNELS[dataset], CLASS_NUM[dataset])
elif arch.startswith("inception"):
network = inception_v3(pretrained=False)
elif arch == "WRN-28-10-drop":
network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset],depth=28,num_classes=CLASS_NUM[dataset],widen_factor=10, dropRate=0.3)
elif arch == "WRN-40-10-drop":
network = tiny_imagenet_wrn(in_channels=IN_CHANNELS[dataset], depth=40, num_classes=CLASS_NUM[dataset],
widen_factor=10, dropRate=0.3)
elif arch.startswith("vgg"):
network.avgpool = Identity()
network.classifier[0] = nn.Linear(512 * 2 * 2, 4096) # 64 /2**5 = 2
network.classifier[-1] = nn.Linear(4096, num_classes)
elif "pcl_resnet" in arch:
network = PrototypeConformityLossResNet(in_channels=IN_CHANNELS[dataset], depth=pretrained_cifar_model_conf[dataset][arch]["depth"], num_classes=CLASS_NUM[dataset])
elif arch == "DenoiseResNet50":
network = DenoiseResNet50(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
elif arch == "DenoiseResNet101":
network = DenoiseResNet101(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
elif arch == "DenoiseResNet152":
network = DenoiseResNet152(in_channels=IN_CHANNELS[dataset], num_classes=CLASS_NUM[dataset], whether_denoising=True)
return network
| [
"torch.nn.Linear",
"torch.no_grad",
"torch.FloatTensor",
"torch.load"
] | 1.7.1 | machanic/TangentAttack | 17c1a8e93f9bbd03e209e8650631af744a0ff6b8 |
1.1 | # -*- coding: utf-8 -*-
import sys
sys.path.insert(0,"../../src2")
import math
import functools
import time
import torch
import numpy as np
from scipy.special import gamma
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import emcee
from source_1d_likelihood_fn import compute_log_likelihood_2
np.random.seed(100)
torch.manual_seed(100)
#%%
def logit_t(x,a=0,b=1):
return torch.log(((x-a)/(b-a))/(1.0-(x-a)/(b-a)))
def sigmoid(x,a=0,b=1):
return (b-a)*1.0/(1.0+np.exp(-x)) + a
def dsigmoid(x,a=0,b=1):
return (b-a)*np.exp(x)/((1+np.exp(x))**2)
def exp(x):
return np.exp(x)
def dexp(x):
return np.exp(x)
def unwarped_logjoint_np(x0,Ts,q0,rho):
ll = compute_log_likelihood_2(x0,Ts,q0,rho)
ll += -np.log(1+(q0/10.0)**2)
ll += -np.log(1+(rho/0.1)**2)
return ll
def logjoint_np(x):
x0,Ts,q0,rho = x[0],x[1],x[2],x[3]
ll = unwarped_logjoint_np(sigmoid(x0),sigmoid(Ts,b=0.4),
exp(q0),exp(rho)) + \
np.log(dsigmoid(x0)) + np.log(dsigmoid(Ts,b=0.4)) + \
np.log(dexp(q0)) + np.log(dexp(rho))
return ll
counter=0
def logjoint_emcee(x):
global counter
counter += 1
print(counter)
return logjoint_np(x)
#%%
ndim, nwalkers = 4, 10
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, logjoint_emcee)
sampler.run_mcmc(p0, 10000)
np.savez("testheat_1a_emcee",sampler=sampler)
#%%
| [
"torch.manual_seed",
"torch.log"
] | 1.1 | DFNaiff/Dissertation | 8db72a0e588042a582053625ec58cde6a661f2a9 |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..functional import epipolar as E
class PerspectiveTransformerLayer(nn.Module):
def __init__(self, bv_size, pv_size, intrinsics, translate_z = -10.0, rotation_order='xyz', device='cuda:0', dtype=torch.float32):
'''
`translate_z` is a hyperparameter to be chose in range (-Inf, 1.0), the perspective view will be roughly scaled (1-translate_z) times.
'''
super(PerspectiveTransformerLayer, self).__init__()
self.dtype = dtype
self.dev = torch.device(device) if device else None
self.rot_order = rotation_order
self.bv_size, self.pv_size = bv_size, pv_size
self.register_buffer('intrinsics', self._prepare_intrinsics(intrinsics))
self.register_buffer('inv_intrinsics', torch.inverse(self.intrinsics))
self.register_buffer('n', torch.tensor([[0], [0], [1]], device=self.dev, dtype=self.dtype))
self.register_buffer('tz', torch.tensor([translate_z], device=self.dev, dtype=self.dtype))
self.register_buffer('bv_grid', self._prepare_coord_grid(*bv_size))
bv_pivot, pv_pivot = self._prepare_pivots(bv_size, pv_size, self.inv_intrinsics)
self.register_buffer('bv_pivot', bv_pivot)
self.register_buffer('pv_pivot', pv_pivot)
def _prepare_intrinsics(self, intrinsics):
if isinstance(intrinsics, list) or isinstance(intrinsics, np.array):
intrinsics = torch.tensor(intrinsics, requires_grad=False, device=self.dev, dtype=self.dtype)
assert isinstance(intrinsics, torch.Tensor)
assert intrinsics.shape == (3, 3)
return intrinsics
def _prepare_pivots(self, bv_size, pv_size, inv_intrinsics):
bv_pivot = torch.tensor([[bv_size[1]/2.0], [bv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)
pv_pivot = torch.tensor([[pv_size[1]/2.0], [pv_size[0]], [1.0]], device=self.dev, dtype=self.dtype)
bv_pivot = inv_intrinsics @ bv_pivot
pv_pivot = inv_intrinsics @ pv_pivot
return bv_pivot, pv_pivot
def _prepare_coord_grid(self, H, W):
xgrid = torch.arange(W, requires_grad=False, device=self.dev, dtype=self.dtype).repeat(H, 1).view((H, W, 1, 1))
ygrid = torch.arange(H, requires_grad=False, device=self.dev, dtype=self.dtype).unsqueeze_(1).repeat(1, W).view(H, W, 1, 1)
grid = torch.cat((xgrid, ygrid, torch.ones_like(xgrid, device=self.dev, dtype=self.dtype)), dim=-2)
return grid
def forward(self, pv, rx=0.0, ry=0.0, rz=0.0):
'''
REFERENCES:
- Homography: refers to https://en.wikipedia.org/wiki/Homography_(computer_vision)
- Bilinear Interpolation: refers to https://medium.com/@shanlins/spatial-transformer-networks-stn-and-its-implementation-2638d58d41f8
'''
B, C, Hp, Wp, Hb, Wb = *pv.shape, *self.bv_size
# get constrained homography
R = E.torch.make_rotation_matrix(rx, ry, rz, self.rot_order, device=pv.device, dtype=self.dtype)
H = E.torch.make_constrained_homography(R, self.tz, self.intrinsics, self.inv_intrinsics, self.bv_pivot, self.pv_pivot)
# get coordinates on perspective view for each grid: `pv_coord` with shape (B, Hb, Wb, 2, 1)
bv_grid = self.bv_grid.expand(B, Hb, Wb, 3, 1)
pv_coord = torch.matmul(H[:, None, None, :, :], bv_grid)
pv_coord = pv_coord[:, :, :, 0:2, :] / pv_coord[:, :, :, 2:3, :]
# gather pixels acoording to `pv_coord`
x = pv_coord[:,None,:,:,0,0] # with shape (B, 1, Hb, Wb)
y = pv_coord[:,None,:,:,1,0]
mask = (~((x >= 0) & (x < Wp) & (y >= 0) & (y < Hp))).expand(B, C, Hb, Wb)
x0 = x.clamp_(0, Wp-2).to(torch.long)
y0 = y.clamp_(0, Hp-2).to(torch.long)
offset_00 = y0 * Wp + x0
offset_01 = offset_00 + 1
offset_10 = offset_00 + Wp
offset_11 = offset_10 + 1
pv = pv.view(B, C, Hp*Wp) # with shape (B, C, Hp*Wp)
pvmap = [
torch.gather(pv, -1, offset_00.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),
torch.gather(pv, -1, offset_01.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),
torch.gather(pv, -1, offset_10.expand(B, C, Hb, Wb).view(B, C, Hb*Wb)),
torch.gather(pv, -1, offset_11.expand(B, C, Hb, Wb).view(B, C, Hb*Wb))] # pv maps: with shape (B, C, Hb*Wb)
# combine pv pixels
x0, x1, y0, y1 = (x - x0.to(self.dtype)), ((x0+1).to(self.dtype) - x), (y - y0.to(self.dtype)), ((y0+1).to(self.dtype) - y)
weights = [(x1 * y1), (x0 * y1), (x1 * y0), (x0 * y0)] # weight : with shape (B, 1, Hb, Wb)
bvmap = sum([w.expand(B, C, Hb, Wb) * p.view(B, C, Hb, Wb) for w, p in zip(weights, pvmap)]) # bvmap with shape (B, C, Hb, Wb)
#__import__('pdb').set_trace()
bvmap[mask] = 0.0
return bvmap
| [
"torch.device",
"torch.arange",
"torch.inverse",
"torch.tensor",
"torch.ones_like",
"torch.matmul"
] | 1.1.0 | huangyuyao/bevutils | 24e5c4954b17ed58e27697447ab667c65f59b7e0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.