version
stringclasses 24
values | code
stringlengths 396
135k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 6
64
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.5 | import abc
from typing import List, Tuple, Dict, Set, Union
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from ..classifiers import CNN1DFeaturizer, GRUFeaturizer, BasicFeaturizer, TransformerFeaturizer
from .Fasttext1DCNN import Fasttext1DCNNModel
from transformers import AutoModelWithLMHead, AutoTokenizer, AutoModel, DistilBertTokenizer, LongformerTokenizer
from transformers import AlbertModel, AlbertTokenizer, AlbertForSequenceClassification, DistilBertModel, LongformerModel
import torchvision.models as models
from torchnlp.word_to_vector import CharNGram
from torchnlp.word_to_vector import BPEmb
from ...utils import get_device, GaussianNoise, random_word_mask, load_stored_params, ExpandContract, Transformer, PositionalEncoding, LambdaLayer, get_global, \
get_regularization_layers, WordMasking
from ...training import fb_1d_loss_builder
import os
import random
import math
class AlbertClassifer(Fasttext1DCNNModel):
def __init__(self, classifier_dims, num_classes,
gaussian_noise, dropout,
internal_dims, n_layers,
featurizer,
n_tokens_in=64, n_tokens_out=16,
use_as_super=False, **kwargs):
embedding_dims = 768
super(AlbertClassifer, self).__init__(classifier_dims, num_classes, embedding_dims, gaussian_noise, dropout,
internal_dims, n_layers,
featurizer, final_layer_builder,
n_tokens_in, n_tokens_out, True, **kwargs)
self.word_masking_proba = kwargs["word_masking_proba"] if "word_masking_proba" in kwargs else 0.0
if not use_as_super:
model = kwargs["model"] if "model" in kwargs else 'albert-base-v2'
global_dir = get_global("models_dir")
model = os.path.join(global_dir, model) if model in os.listdir(global_dir) else model
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.model = AutoModel.from_pretrained(model)
print("Pick stored Model", model, "Model Class = ", type(self.model), "Tokenizer Class = ", type(self.tokenizer))
if featurizer == "cnn":
self.featurizer = CNN1DFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,
classifier_dims, internal_dims, n_layers, gaussian_noise, dropout)
elif featurizer == "gru":
self.featurizer = GRUFeaturizer(n_tokens_in, embedding_dims, n_tokens_out, classifier_dims,
internal_dims, n_layers, gaussian_noise, dropout)
elif featurizer == "basic":
self.featurizer = BasicFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,
classifier_dims,
internal_dims, n_layers, gaussian_noise, dropout)
elif featurizer == "transformer":
self.attention_drop_proba = kwargs["attention_drop_proba"] if "attention_drop_proba" in kwargs else 0.0
n_encoders = kwargs.pop("n_encoders", n_layers)
n_decoders = kwargs.pop("n_decoders", n_layers)
self.featurizer = TransformerFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,
classifier_dims,
internal_dims, n_encoders, n_decoders,
gaussian_noise, dropout, self.attention_drop_proba)
else:
raise NotImplementedError()
self.final_layer = fb_1d_loss_builder(classifier_dims, n_tokens_out, num_classes, dropout, **kwargs)
if "stored_model" in kwargs:
load_stored_params(self, kwargs["stored_model"])
self.word_masking = WordMasking(tokenizer=self.tokenizer, **kwargs)
self.reg_layers = get_regularization_layers(self)
def tokenise(self, texts: List[str]):
tokenizer = self.tokenizer
n_tokens_in = self.n_tokens_in
texts = self.word_masking(texts)
converted_texts = tokenizer.batch_encode_plus(texts, add_special_tokens=True, pad_to_max_length=True, max_length=n_tokens_in, truncation=True)
input_ids, attention_mask = converted_texts["input_ids"], converted_texts["attention_mask"]
return torch.tensor(input_ids).to(self.device), torch.tensor(attention_mask).to(self.device)
def get_word_vectors(self, texts: List[str]):
input_ids, attention_mask = self.tokenise(texts)
outputs = self.model(input_ids, attention_mask=attention_mask)
last_hidden_states = outputs[0]
pooled_output = outputs[1]
return last_hidden_states
| [
"torch.tensor"
] | 1.5.0 | faizanahemad/facebook-hateful-memes | 1f7febf65f5fc4ed4aeb476d5383437f677fbc19 |
1.7 | import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch
import numpy as np
import random
import predict # import predict.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
check_requirements, print_mutation, set_logging, one_cycle, colorstr
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel
from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
import json
from PIL import Image
import os
import shutil
from os import path
import sys
sys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) ))
from utils.general import xyxy2xywh
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
save_dir, epochs, batch_size, total_batch_size, weights, rank = \
Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank
# Directories
wdir = save_dir / 'weights'
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / 'last.pt'
best = wdir / 'best.pt'
results_file = save_dir / 'results.txt'
# Save run settings
with open(save_dir / 'hyp.yaml', 'w') as f:
yaml.safe_dump(hyp, f, sort_keys=False)
with open(save_dir / 'opt.yaml', 'w') as f:
yaml.safe_dump(vars(opt), f, sort_keys=False)
# Configure
# plots = not opt.evolve # create plots
plots = True # create plots
cuda = device.type != 'cpu'
init_seeds(1 + rank)
with open(opt.data) as f:
data_dict = yaml.safe_load(f) # data dict
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {'wandb': None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)
loggers['wandb'] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset
# Model
pretrained = weights.endswith('.pt')
if pretrained:
# with torch_distributed_zero_first(rank):
# weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict['train']
test_path = data_dict['val']
# Freeze
freeze = ['1', '2', '3', '4', '5', '6' '7', '8', '9', '10', '11'] # parameter names to freeze (full or partial)
freeze = ['model.' + number + '.' for number in freeze]
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze) and opt.fine_tune is True:
print('freezing %s' % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
else:
lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
if pretrained:
# Optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# EMA
if ema and ckpt.get('ema'):
ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
ema.updates = ckpt['updates']
# Results
if ckpt.get('training_results') is not None:
results_file.write_text(ckpt['training_results']) # write results.txt
# Epochs
start_epoch = ckpt['epoch'] + 1
if opt.resume:
assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
if epochs < start_epoch:
logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info('Using SyncBatchNorm()')
# Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
world_size=opt.world_size, workers=opt.workers,
image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), task='train', epoch_parts=opt.epoch_parts)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
world_size=opt.world_size, workers=opt.workers,
pad=0.5, prefix=colorstr('val: '))[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram('classes', c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
# nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
# Model parameters
hyp['box'] *= 3. / nl # scale to layers
hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
hyp['label_smoothing'] = opt.label_smoothing
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
f'Using {dataloader.num_workers} dataloader workers\n'
f'Logging results to {save_dir}\n'
f'Starting training for {epochs} epochs...')
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
if rank != -1:
loss *= opt.world_size # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.6g' * 6) % (
'%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f'train_batch{ni}.jpg' # filename
Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
if tb_writer:
tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
save_dir.glob('train*.jpg') if x.exists()]})
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
final_epoch = epoch + 1 == epochs
if (epoch+1) % opt.save_period != 0:
wandb_logger.current_epoch = epoch + 1
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
wandb_logger.end_epoch()
# Write
with open(results_file, 'a') as f:
f.write(s + '\n') # append metrics, val_loss
else:
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = predict.test(data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=is_coco and final_epoch,
verbose=nc < 50,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco)
# Write
with open(results_file, 'a') as f:
f.write(s + '%10.4g' * 8 % results + '\n') # append metrics, val_loss
# Log
tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.75', 'metrics/mAP_0.5:0.95',
'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
'x/lr0', 'x/lr1', 'x/lr2'] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected], [email protected]]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': results_file.read_text(),
'model': deepcopy(de_parallel(model)).half(),
'ema': deepcopy(ema.ema).half(),
'updates': ema.updates,
'optimizer': optimizer.state_dict(),
'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\n')
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
if (save_dir / f).exists()]})
if not opt.evolve:
if is_coco: # COCO dataset
for m in [last, best] if best.exists() else [last]: # speed, mAP tests
results, _, _ = predict.test(opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco)
# Strip optimizers
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if wandb_logger.wandb: # Log the stripped model
wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model',
name='run_' + wandb_logger.wandb_run.id + '_model',
aliases=['latest', 'best', 'stripped'])
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
def data_prepare():
random.seed(100)
names = ['eye_opened', 'eye_closed', 'mouth_opened', 'mouth_closed', 'face', 'phone', 'cigar']
path_train_dir = '/DATA/Final_DATA/task03_train'
new_dir = '../drowsy_face'
# generate raw_train.json, raw_val.json
generate_raw_json = True
if generate_raw_json == True:
print('generate raw_train.json, raw_val.json')
if os.path.exists(new_dir):
shutil.rmtree(new_dir)
os.makedirs(new_dir + '/images/train')
os.makedirs(new_dir + '/images/val')
os.makedirs(new_dir + '/labels/train')
os.makedirs(new_dir + '/labels/val')
with open(path_train_dir + '/labels.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
num_data = len(json_anno) # 273224
val_idx = random.sample(list(range(num_data)), 20000)
json_anno_val = []
json_anno_train = []
for idx, json_img in enumerate(tqdm(json_anno)):
if idx in val_idx:
json_anno_val.append(json_img)
else:
json_anno_train.append(json_img)
json_data_val = {}
json_data_val['annotations'] = json_anno_val
json_data_train = {}
json_data_train['annotations'] = json_anno_train
if os.path.isfile(new_dir + '/raw_val.json'):
os.remove(new_dir + '/raw_val.json')
if os.path.isfile(new_dir + '/raw_train.json'):
os.remove(new_dir + '/raw_train.json')
with open(new_dir + '/raw_val.json', 'w') as f_val:
json.dump(json_data_val, f_val)
with open(new_dir + '/raw_train.json', 'w') as f_train:
json.dump(json_data_train, f_train)
# generate drowsy_face/train, drowsy_face/val
generate_drowsy_face = True
if generate_drowsy_face == True:
print('generate drowsy_face/train, drowsy_face/val')
with open(new_dir + '/raw_val.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir + '/labels/val/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir + '/images/val/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
with open(new_dir + '/raw_train.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir + '/labels/train/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir + '/images/train/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
# generate diet_train.json
generate_diet_json = True
if generate_diet_json == True:
print('generate diet_train.json')
json_anno_diet = []
with open(path_train_dir + '/labels.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
fidx = 0
for img_info in tqdm(json_anno):
file_name = img_info['file_name']
cigar_check = 0
phone_check = 0
eye_closed_check = 0
mouth_closed_check = 0
mouth_opened_check = 0
for annotation_info in img_info['objects']:
if annotation_info['class'] == 'cigar':
cigar_check = 1
elif annotation_info['class'] == 'phone':
phone_check = 1
elif annotation_info['class'] == 'eye_closed':
eye_closed_check = 1
elif annotation_info['class'] == 'mouth_closed':
mouth_closed_check = 1
elif annotation_info['class'] == 'mouth_opened':
mouth_opened_check = 1
if cigar_check or phone_check:
json_anno_diet.append(img_info)
elif eye_closed_check and mouth_closed_check:
json_anno_diet.append(img_info)
elif eye_closed_check and mouth_opened_check:
json_anno_diet.append(img_info)
elif mouth_opened_check:
fidx = fidx + 1
if fidx % 3 == 0:
json_anno_diet.append(img_info)
json_data_diet = {}
json_data_diet['annotations'] = json_anno_diet
if os.path.isfile(new_dir + '/diet_train.json'):
os.remove(new_dir + '/diet_train.json')
with open(new_dir + '/diet_train.json', 'w') as f_diet:
json.dump(json_data_diet, f_diet)
# generate drowsy_face_diet/train
generate_drowsy_face_diet = True
if generate_drowsy_face_diet == True:
print('generate drowsy_face_diet/train')
new_dir_diet = '../drowsy_face_diet'
if os.path.exists(new_dir_diet):
shutil.rmtree(new_dir_diet)
os.makedirs(new_dir_diet + '/images/train')
os.makedirs(new_dir_diet + '/labels/train')
with open(new_dir + '/diet_train.json') as f:
json_data = json.load(f)
json_anno = json_data["annotations"]
for json_img in tqdm(json_anno):
img_id = json_img['file_name']
txt_dir = new_dir_diet + '/labels/train/' + img_id.split('.')[0] + '.txt'
img_dir = new_dir_diet + '/images/train/' + img_id
f_txt = open(txt_dir, 'w')
img_ = Image.open(path_train_dir + '/images/' + img_id)
img_size = img_.size
objects_yolo = ''
for img_obj in json_img['objects']:
class_id = str(names.index(img_obj['class']))
img_pos = img_obj['position']
xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]
f_txt.write(f"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
f_txt.close()
shutil.copy(path_train_dir + '/images/' + img_id, img_dir)
# count classes
def count_classes(annotations):
class_dict = {
'eye_opened': 0,
'eye_closed': 0,
'mouth_opened': 0,
'mouth_closed': 0,
'face': 0,
'phone': 0,
'cigar': 0
}
for img_info in tqdm(annotations):
for annotation_info in img_info['objects']:
class_dict[annotation_info['class']] = class_dict[annotation_info['class']] + 1
print(class_dict)
count_jsons = True
if count_jsons == True:
print('count classes')
with open(new_dir + '/diet_train.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('diet_train.json')
count_classes(annotations)
with open(new_dir + '/raw_train.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('raw_train.json')
count_classes(annotations)
with open(new_dir + '/raw_val.json', 'r') as annotation_file:
annotations = json.load(annotation_file)
annotations = annotations['annotations']
print('raw_val.json')
count_classes(annotations)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--random_seed', type=int, default=0, help='')
parser.add_argument('--weights', type=str, default='', help='initial weights path')
parser.add_argument('--cfg', type=str, default='models/hub/yolov5l6.yaml', help='model.yaml path')
parser.add_argument('--data', type=str, default='data/drowsy_face.yaml', help='data.yaml path')
parser.add_argument('--hyp', type=str, default='data/hyp.scratch-p6.yaml', help='hyperparameters path')
parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs')
parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', default='', action='store_true', help='cache images for faster training')
parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
parser.add_argument('--project', default='runs/train', help='save to project/name')
parser.add_argument('--entity', default=None, help='W&B entity')
parser.add_argument('--name', default='final', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--quad', action='store_true', help='quad dataloader')
parser.add_argument('--linear-lr', action='store_true', help='linear LR')
parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
parser.add_argument('--bbox_interval', type=int, default=300, help='Set bounding-box image logging interval for W&B')
parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
## for baseline training
parser.add_argument('--no_data_prepare', action='store_true')
parser.add_argument('--epochs', type=int, default=300)
parser.add_argument('--epoch_parts', type=int, default=15, help='Log model after every "save_period" epoch')
parser.add_argument('--save_period', type=int, default=300, help='Log model after every "save_period" epoch')
## for fine-tuning
parser.add_argument('--fine_tune', action='store_true', help='fine_tune')
parser.add_argument('--epochs_tune', type=int, default=50)
parser.add_argument('--epoch_parts_tune', type=int, default=50, help='Log model after every "save_period" epoch')
parser.add_argument('--save_period_tune', type=int, default=50, help='Log model after every "save_period" epoch')
opt = parser.parse_args()
if not opt.no_data_prepare:
data_prepare()
# Reproducibility
torch.manual_seed(opt.random_seed)
torch.cuda.manual_seed(opt.random_seed)
torch.cuda.manual_seed_all(opt.random_seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(opt.random_seed)
random.seed(opt.random_seed)
# Set DDP variables
opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_requirements(exclude=('pycocotools', 'thop'))
# Resume
wandb_run = check_wandb_resume(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
opt = argparse.Namespace(**yaml.safe_load(f)) # replace
opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \
'', ckpt, True, opt.total_batch_size, *apriori # reinstate
logger.info('Resuming training from %s' % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device('cuda', opt.local_rank)
dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
print("### base train completed")
print("### fine-tuning start")
opt.fine_tune = True
opt.weights = opt.save_dir + '/weights/last.pt'
opt.data = 'data/drowsy_face_tuning.yaml'
opt.hyp = 'data/hyp.finetune-simple.yaml'
opt.epochs = opt.epochs_tune
opt.epoch_parts = opt.epoch_parts_tune
opt.save_period = opt.save_period_tune
opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
opt.name = 'evolve' if opt.evolve else opt.name
opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.safe_load(f) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr('tensorboard: ')
logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
| [
"torch.cuda.manual_seed",
"torch.cuda.amp.autocast",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.tensor",
"torch.utils.tensorboard.SummaryWriter",
"torch.distributed.broadcast",
"torch.zeros",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.save",
"torch.optim.SGD",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.cuda.memory_reserved",
"torch.cuda.amp.GradScaler",
"torch.distributed.destroy_process_group",
"torch.nn.functional.interpolate",
"torch.optim.Adam",
"torch.optim.lr_scheduler.LambdaLR"
] | 1.7.0 | PJunhyuk/2021AICompetition-03 | dbeea7dec3f009f1f1485984dcdfa54eb6b4f75e |
1.6 | import os
import csv
import time
import wandb
import numpy as np
import pandas as pd
import seaborn as sns
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from os.path import join
from pathlib import Path
from pprint import pprint
from config import setSeed, getConfig
from collections import Counter, defaultdict
from main.utils import *
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from customLoader import *
from torchvision.transforms import transforms
from models.CustomVQVAE import VQVAE_PL
from pytorch_lightning.loggers import WandbLogger
from mod.q_functions import parse_arch
from sklearn.cluster import KMeans
class VQVAE(VQVAE_PL):
def __init__(self, conf):
super(VQVAE, self).__init__(conf['data_type'], **conf['vqvae'])
self.experiment = conf['experiment']
self.batch_size = conf['batch_size']
self.lr = conf['lr']
self.split = conf['split']
self.num_clusters = conf['vqvae']['num_embeddings']
self.delay = conf['delay']
self.trajectories = conf['trajectories']
self.trajectories_train, self.trajectories_val = get_train_val_split(self.trajectories, self.split)
self.conf = {
'k_std': conf['k_std'],
'k_mean': conf['k_mean'],
'data_type': conf['data_type']
}
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (1.0,1.0,1.0))
])
self.test = conf['test']
self.type = self.test['type']
self.shuffle = self.test['shuffle']
self.limit = self.test['limit']
def on_train_start(self):
embeddings = []
print("Computing embeddings...")
for batch in self.trainer.train_dataloader:
z_1 = self.model.compute_embedding(batch, self.device)
embeddings.append(z_1.detach().cpu().numpy())
e = np.concatenate(np.array(embeddings))
print("Computing kmeans...")
kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(e)
kmeans_tensor = torch.from_numpy(kmeans.cluster_centers_).to(self.device)
self.model._vq_vae._embedding.weight = nn.Parameter(kmeans_tensor)
self.model._vq_vae._ema_w = nn.Parameter(kmeans_tensor)
def training_step(self, batch, batch_idx):
loss = self.model(batch, batch_idx, self.logger, "train")
return loss
def validation_step(self, batch, batch_idx):
loss = self.model(batch, batch_idx, self.logger, "val")
return loss
def on_epoch_end(self):
self.model.log_reconstructions(self.trainer.train_dataloader, self.logger)
def configure_optimizers(self):
return torch.optim.Adam(params=self.parameters(), lr=self.lr, weight_decay=1e-5)
def train_dataloader(self):
train_dataset = CustomMinecraftData(self.trajectories_train, transform=self.transform, delay=self.delay, **self.conf)
train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)
return train_dataloader
def val_dataloader(self):
val_dataset = CustomMinecraftData(self.trajectories_val, transform=self.transform, delay=self.delay, **self.conf)
val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=2)
return val_dataloader
def _construct_map(self):
construct_map(self)
| [
"torch.from_numpy",
"torch.nn.Parameter",
"torch.utils.data.DataLoader"
] | 1.6.0 | imatge-upc/pixelcoordEDL | 353632feed6ac8c93758c1a2a1b7a477e7ff053c |
1.7 | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class ScalarMix(nn.Module):
def __init__(self, n_layers, dropout=0):
super(ScalarMix, self).__init__()
self.n_layers = n_layers
self.dropout = dropout
self.weights = nn.Parameter(torch.zeros(n_layers))
self.gamma = nn.Parameter(torch.tensor([1.0]))
self.dropout = nn.Dropout(dropout)
def extra_repr(self):
s = f"n_layers={self.n_layers}"
if self.dropout.p > 0:
s += f", dropout={self.dropout.p}"
return s
def forward(self, tensors):
normed_weights = self.dropout(self.weights.softmax(-1))
weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))
return self.gamma * weighted_sum
| [
"torch.zeros",
"torch.nn.Dropout",
"torch.tensor"
] | 1.7.1 | njzr/DadmaTools | b26ad8aa834f642d49bd120bd7cf1fdf40741be1 |
1.0 | import torch
import unittest
import qtorch
from qtorch.quant import block_quantize, fixed_point_quantize, float_quantize
from qtorch import FixedPoint, BlockFloatingPoint, FloatingPoint
class TestDevice(unittest.TestCase):
"""
invariant: cuda and cpp implementation should behave the same
"""
def error(self, cuda_t, cpu_t):
return ((cuda_t.cpu()-cpu_t)**2).sum().item()
def test_fixed_point(self):
for wl, fl in [(5,4), (3,2)]:
for rounding in ["nearest"]:
t_max = 1-(2**(-fl))
to_quantize_cuda = torch.linspace(-t_max, t_max, steps=20, device='cuda')
to_quantize_cpu = to_quantize_cuda.clone().to("cpu")
fixed_quantized_cuda = fixed_point_quantize(to_quantize_cuda, wl=wl, fl=fl, rounding=rounding)
fixed_quantized_cpu = fixed_point_quantize(to_quantize_cpu, wl=wl, fl=fl, rounding=rounding)
mse = self.error(fixed_quantized_cuda, fixed_quantized_cpu)
self.assertTrue(mse<1e-15)
# self.assertTrue(torch.eq(fixed_quantized_cuda.cpu(), fixed_quantized_cpu).all().item())
def test_block_floating_point(self):
for wl in [5, 3]:
for rounding in ["nearest"]:
for dim in [-1, 0, 1]:
t_max = 1-(2**(-4))
to_quantize_cuda = torch.linspace(-t_max, t_max, steps=20, device='cuda')
to_quantize_cpu = to_quantize_cuda.clone().to("cpu")
block_quantized_cuda = block_quantize(to_quantize_cuda, wl=wl, rounding=rounding)
block_quantized_cpu = block_quantize(to_quantize_cpu, wl=wl, rounding=rounding)
mse = self.error(block_quantized_cuda, block_quantized_cpu)
self.assertTrue(mse<1e-15)
# self.assertTrue(torch.eq(block_quantized_cuda.cpu(), block_quantized_cpu).all().item())
def test_floating_point(self):
for man, exp in [(2, 5), (6, 9)]:
for rounding in ["nearest"]:
to_quantize_cuda = torch.rand(20).cuda()
to_quantize_cpu = to_quantize_cuda.clone().to("cpu")
float_quantized_cuda = float_quantize(to_quantize_cuda, man=man, exp=exp, rounding=rounding)
float_quantized_cpu = float_quantize(to_quantize_cpu, man=man, exp=exp, rounding=rounding)
mse = self.error(float_quantized_cuda, float_quantized_cpu)
self.assertTrue(mse<1e-15)
if __name__ == "__main__":
unittest.main()
| [
"torch.rand",
"torch.linspace"
] | 1.0.0 | drcut/QPyTorch | 63c293178e8ce9e6e5b218dee96536e9c4ad1e5c |
1.1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import pycocotools.mask as mask_utils
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Mask(object):
"""
This class is unfinished and not meant for use yet
It is supposed to contain the mask for an object as
a 2d tensor
"""
def __init__(self, masks, size, mode):
self.masks = masks
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 2
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
flip_idx = list(range(dim)[::-1])
flipped_masks = self.masks.index_select(dim, flip_idx)
return Mask(flipped_masks, self.size, self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped_masks = self.masks[:, box[1] : box[3], box[0] : box[2]]
return Mask(cropped_masks, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
pass
class Polygons(object):
"""
This class holds a set of polygons that represents a single instance
of an object mask. The object can be represented as a set of
polygons
"""
def __init__(self, polygons, size, mode):
# assert isinstance(polygons, list), '{}'.format(polygons)
if isinstance(polygons, list):
polygons = [torch.as_tensor(p, dtype=torch.float32) for p in polygons]
elif isinstance(polygons, Polygons):
polygons = polygons.polygons
self.polygons = polygons
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped_polygons = []
width, height = self.size
if method == FLIP_LEFT_RIGHT:
dim = width
idx = 0
elif method == FLIP_TOP_BOTTOM:
dim = height
idx = 1
for poly in self.polygons:
p = poly.clone()
TO_REMOVE = 1
p[idx::2] = dim - poly[idx::2] - TO_REMOVE
flipped_polygons.append(p)
return Polygons(flipped_polygons, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
# TODO chck if necessary
w = max(w, 1)
h = max(h, 1)
cropped_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] = p[0::2] - box[0] # .clamp(min=0, max=w)
p[1::2] = p[1::2] - box[1] # .clamp(min=0, max=h)
cropped_polygons.append(p)
return Polygons(cropped_polygons, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
if ratios[0] == ratios[1]:
ratio = ratios[0]
scaled_polys = [p * ratio for p in self.polygons]
return Polygons(scaled_polys, size, mode=self.mode)
ratio_w, ratio_h = ratios
scaled_polygons = []
for poly in self.polygons:
p = poly.clone()
p[0::2] *= ratio_w
p[1::2] *= ratio_h
scaled_polygons.append(p)
return Polygons(scaled_polygons, size=size, mode=self.mode)
def convert(self, mode):
width, height = self.size
if mode == "mask":
rles = mask_utils.frPyObjects(
[p.numpy() for p in self.polygons], height, width
)
rle = mask_utils.merge(rles)
mask = mask_utils.decode(rle)
mask = torch.from_numpy(mask)
# TODO add squeeze?
return mask
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_polygons={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={}, ".format(self.size[1])
s += "mode={})".format(self.mode)
return s
class SegmentationMask(object):
"""
This class stores the segmentations for all objects in the image
"""
def __init__(self, polygons, size, mode=None):
"""
Arguments:
polygons: a list of list of lists of numbers. The first
level of the list correspond to individual instances,
the second level to all the polygons that compose the
object, and the third level to the polygon coordinates.
"""
assert isinstance(polygons, list)
self.polygons = [Polygons(p, size, mode) for p in polygons]
self.size = size
self.mode = mode
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented"
)
flipped = []
for polygon in self.polygons:
flipped.append(polygon.transpose(method))
return SegmentationMask(flipped, size=self.size, mode=self.mode)
def crop(self, box):
w, h = box[2] - box[0], box[3] - box[1]
cropped = []
for polygon in self.polygons:
cropped.append(polygon.crop(box))
return SegmentationMask(cropped, size=(w, h), mode=self.mode)
def resize(self, size, *args, **kwargs):
scaled = []
for polygon in self.polygons:
scaled.append(polygon.resize(size, *args, **kwargs))
return SegmentationMask(scaled, size=size, mode=self.mode)
def to(self, *args, **kwargs):
return self
def __getitem__(self, item):
if isinstance(item, (int, slice)):
selected_polygons = [self.polygons[item]]
else:
# advanced indexing on a single dimension
selected_polygons = []
if isinstance(item, torch.Tensor) and item.dtype == torch.uint8:
item = item.nonzero()
item = item.squeeze(1) if item.numel() > 0 else item
item = item.tolist()
for i in item:
selected_polygons.append(self.polygons[i])
return SegmentationMask(selected_polygons, size=self.size, mode=self.mode)
def __len__(self):
return len(self.polygons)
def __iter__(self):
return iter(self.polygons)
def __repr__(self):
s = self.__class__.__name__ + "("
s += "num_instances={}, ".format(len(self.polygons))
s += "image_width={}, ".format(self.size[0])
s += "image_height={})".format(self.size[1])
return s
| [
"torch.as_tensor",
"torch.from_numpy"
] | 1.1.0 | Loranet-Technologies/traffic-analysis | e1e50b6c36b3da6279678c679500a8cf4e62ccef |
1.0 | import copy
import json
import math
import numpy as np
import os
import pathlib
import sklearn.metrics
import torch
import tqdm
import models
#here = pathlib.Path(__file__).resolve().parent
here = pathlib.Path("/dfs/scratch1/ksaab/ncde_results")
def _add_weight_regularisation(loss_fn, regularise_parameters, scaling=0.03):
def new_loss_fn(pred_y, true_y):
total_loss = loss_fn(pred_y, true_y)
for parameter in regularise_parameters.parameters():
if parameter.requires_grad:
total_loss = total_loss + scaling * parameter.norm()
return total_loss
return new_loss_fn
class _SqueezeEnd(torch.nn.Module):
def __init__(self, model):
super(_SqueezeEnd, self).__init__()
self.model = model
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs).squeeze(-1)
def _count_parameters(model):
"""Counts the number of parameters in a model."""
return sum(param.numel() for param in model.parameters() if param.requires_grad_)
class _AttrDict(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, item):
return self[item]
def _evaluate_metrics(dataloader, model, times, loss_fn, num_classes, device, kwargs):
with torch.no_grad():
total_accuracy = 0
total_confusion = torch.zeros(num_classes, num_classes).numpy() # occurs all too often
total_dataset_size = 0
total_loss = 0
true_y_cpus = []
pred_y_cpus = []
for batch in dataloader:
batch = tuple(b.to(device) for b in batch)
*coeffs, true_y, lengths = batch
batch_size = true_y.size(0)
pred_y = model(times, coeffs, lengths, **kwargs)
if num_classes == 2:
thresholded_y = (pred_y > 0).to(true_y.dtype)
else:
thresholded_y = torch.argmax(pred_y, dim=1)
true_y_cpu = true_y.detach().cpu()
pred_y_cpu = pred_y.detach().cpu()
if num_classes == 2:
# Assume that our datasets aren't so large that this breaks
true_y_cpus.append(true_y_cpu)
pred_y_cpus.append(pred_y_cpu)
thresholded_y_cpu = thresholded_y.detach().cpu()
total_accuracy += (thresholded_y == true_y).sum().to(pred_y.dtype)
total_confusion += sklearn.metrics.confusion_matrix(true_y_cpu, thresholded_y_cpu,
labels=range(num_classes))
total_dataset_size += batch_size
total_loss += loss_fn(pred_y, true_y) * batch_size
total_loss /= total_dataset_size # assume 'mean' reduction in the loss function
total_accuracy /= total_dataset_size
metrics = _AttrDict(accuracy=total_accuracy.item(), confusion=total_confusion, dataset_size=total_dataset_size,
loss=total_loss.item())
if num_classes == 2:
true_y_cpus = torch.cat(true_y_cpus, dim=0)
pred_y_cpus = torch.cat(pred_y_cpus, dim=0)
metrics.auroc = sklearn.metrics.roc_auc_score(true_y_cpus, pred_y_cpus)
metrics.average_precision = sklearn.metrics.average_precision_score(true_y_cpus, pred_y_cpus)
return metrics
class _SuppressAssertions:
def __init__(self, tqdm_range):
self.tqdm_range = tqdm_range
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is AssertionError:
self.tqdm_range.write('Caught AssertionError: ' + str(exc_val))
return True
def _train_loop(train_dataloader, val_dataloader, model, times, optimizer, loss_fn, max_epochs, num_classes, device,
kwargs, step_mode):
model.train()
best_model = model
best_train_loss = math.inf
best_train_accuracy = 0
best_val_accuracy = 0
best_train_accuracy_epoch = 0
best_train_loss_epoch = 0
history = []
breaking = False
if step_mode:
epoch_per_metric = 10
plateau_terminate = 100
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2)
else:
epoch_per_metric = 10
plateau_terminate = 50
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1, mode='max')
tqdm_range = tqdm.tqdm(range(max_epochs))
tqdm_range.write('Starting training for model:\n\n' + str(model) + '\n\n')
for epoch in tqdm_range:
if breaking:
break
for batch in train_dataloader:
batch = tuple(b.to(device) for b in batch)
if breaking:
break
with _SuppressAssertions(tqdm_range):
*train_coeffs, train_y, lengths = batch
pred_y = model(times, train_coeffs, lengths, **kwargs)
loss = loss_fn(pred_y, train_y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
if epoch % epoch_per_metric == 0 or epoch == max_epochs - 1:
model.eval()
train_metrics = _evaluate_metrics(train_dataloader, model, times, loss_fn, num_classes, device, kwargs)
val_metrics = _evaluate_metrics(val_dataloader, model, times, loss_fn, num_classes, device, kwargs)
model.train()
if train_metrics.loss * 1.0001 < best_train_loss:
best_train_loss = train_metrics.loss
best_train_loss_epoch = epoch
if train_metrics.accuracy > best_train_accuracy * 1.001:
best_train_accuracy = train_metrics.accuracy
best_train_accuracy_epoch = epoch
if val_metrics.accuracy > best_val_accuracy:
best_val_accuracy = val_metrics.accuracy
del best_model # so that we don't have three copies of a model simultaneously
best_model = copy.deepcopy(model)
tqdm_range.write('Epoch: {} Train loss: {:.3} Train accuracy: {:.3} Val loss: {:.3} '
'Val accuracy: {:.3}'
''.format(epoch, train_metrics.loss, train_metrics.accuracy, val_metrics.loss,
val_metrics.accuracy))
if step_mode:
scheduler.step(train_metrics.loss)
else:
scheduler.step(val_metrics.accuracy)
history.append(_AttrDict(epoch=epoch, train_metrics=train_metrics, val_metrics=val_metrics))
if epoch > best_train_loss_epoch + plateau_terminate:
tqdm_range.write('Breaking because of no improvement in training loss for {} epochs.'
''.format(plateau_terminate))
breaking = True
if epoch > best_train_accuracy_epoch + plateau_terminate:
tqdm_range.write('Breaking because of no improvement in training accuracy for {} epochs.'
''.format(plateau_terminate))
breaking = True
for parameter, best_parameter in zip(model.parameters(), best_model.parameters()):
parameter.data = best_parameter.data
return history
class _TensorEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, (torch.Tensor, np.ndarray)):
return o.tolist()
else:
super(_TensorEncoder, self).default(o)
def _save_results(name, result):
loc = here / 'results' / name
if not os.path.exists(loc):
os.mkdir(loc)
num = -1
for filename in os.listdir(loc):
try:
num = max(num, int(filename))
except ValueError:
pass
result_to_save = result.copy()
del result_to_save['train_dataloader']
del result_to_save['val_dataloader']
del result_to_save['test_dataloader']
result_to_save['model'] = str(result_to_save['model'])
num += 1
with open(loc / str(num), 'w') as f:
json.dump(result_to_save, f, cls=_TensorEncoder)
def main(name, times, train_dataloader, val_dataloader, test_dataloader, device, make_model, num_classes, max_epochs,
lr, kwargs, step_mode, pos_weight=torch.tensor(1)):
times = times.to(device)
if device != 'cpu':
torch.cuda.reset_max_memory_allocated(device)
baseline_memory = torch.cuda.memory_allocated(device)
else:
baseline_memory = None
model, regularise_parameters = make_model()
if num_classes == 2:
model = _SqueezeEnd(model)
loss_fn = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
else:
loss_fn = torch.nn.functional.cross_entropy
loss_fn = _add_weight_regularisation(loss_fn, regularise_parameters)
model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
history = _train_loop(train_dataloader, val_dataloader, model, times, optimizer, loss_fn, max_epochs,
num_classes, device, kwargs, step_mode)
model.eval()
train_metrics = _evaluate_metrics(train_dataloader, model, times, loss_fn, num_classes, device, kwargs)
val_metrics = _evaluate_metrics(val_dataloader, model, times, loss_fn, num_classes, device, kwargs)
test_metrics = _evaluate_metrics(test_dataloader, model, times, loss_fn, num_classes, device, kwargs)
if device != 'cpu':
memory_usage = torch.cuda.max_memory_allocated(device) - baseline_memory
else:
memory_usage = None
result = _AttrDict(times=times,
memory_usage=memory_usage,
baseline_memory=baseline_memory,
num_classes=num_classes,
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
test_dataloader=test_dataloader,
model=model.to('cpu'),
parameters=_count_parameters(model),
history=history,
train_metrics=train_metrics,
val_metrics=val_metrics,
test_metrics=test_metrics)
if name is not None:
_save_results(name, result)
return result
def make_model(name, input_channels, output_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers,
use_intensity, initial):
if name == 'ncde':
def make_model():
vector_field = models.FinalTanh(input_channels=input_channels, hidden_channels=hidden_channels,
hidden_hidden_channels=hidden_hidden_channels,
num_hidden_layers=num_hidden_layers)
model = models.NeuralCDE(func=vector_field, input_channels=input_channels, hidden_channels=hidden_channels,
output_channels=output_channels, initial=initial)
return model, vector_field
elif name == 'gruode':
def make_model():
vector_field = models.GRU_ODE(input_channels=input_channels, hidden_channels=hidden_channels)
model = models.NeuralCDE(func=vector_field, input_channels=input_channels,
hidden_channels=hidden_channels, output_channels=output_channels, initial=initial)
return model, vector_field
elif name == 'dt':
def make_model():
model = models.GRU_dt(input_channels=input_channels, hidden_channels=hidden_channels,
output_channels=output_channels, use_intensity=use_intensity)
return model, model
elif name == 'decay':
def make_model():
model = models.GRU_D(input_channels=input_channels, hidden_channels=hidden_channels,
output_channels=output_channels, use_intensity=use_intensity)
return model, model
elif name == 'odernn':
def make_model():
model = models.ODERNN(input_channels=input_channels, hidden_channels=hidden_channels,
hidden_hidden_channels=hidden_hidden_channels, num_hidden_layers=num_hidden_layers,
output_channels=output_channels, use_intensity=use_intensity)
return model, model
else:
raise ValueError("Unrecognised model name {}. Valid names are 'ncde', 'gruode', 'dt', 'decay' and 'odernn'."
"".format(name))
return make_model
| [
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.cuda.reset_max_memory_allocated",
"torch.cuda.memory_allocated",
"torch.cuda.max_memory_allocated",
"torch.tensor",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.BCEWithLogitsLoss",
"torch.argmax"
] | 1.0.0 | khaledsaab/NeuralCDE | 559d9d6fdb137afd14965725ea4845cf31e9235c |
0.4 | import torch
import numpy as np
from utilities.data_structures.Replay_Buffer import Replay_Buffer
from utilities.Utility_Functions import abstract
@abstract
class HER_Base(object):
"""Contains methods needed to turn an algorithm into a hindsight experience replay (HER) algorithm"""
def __init__(self, buffer_size, batch_size, HER_sample_proportion):
self.HER_memory = Replay_Buffer(buffer_size, batch_size, self.config.seed)
self.ordinary_buffer_batch_size = int(batch_size * (1.0 - HER_sample_proportion))
self.HER_buffer_batch_size = batch_size - self.ordinary_buffer_batch_size
def reset_game(self):
"""Resets the game information so we are ready to play a new episode"""
self.state_dict = self.environment.reset()
self.observation = self.state_dict["observation"]
self.desired_goal = self.state_dict["desired_goal"]
self.achieved_goal = self.state_dict["achieved_goal"]
self.state = self.create_state_from_observation_and_desired_goal(self.observation, self.desired_goal)
self.next_state = None
self.action = None
self.reward = None
self.done = False
self.episode_states = []
self.episode_rewards = []
self.episode_actions = []
self.episode_next_states = []
self.episode_dones = []
self.episode_desired_goals = []
self.episode_achieved_goals = []
self.episode_observations = []
self.episode_next_desired_goals = []
self.episode_next_achieved_goals = []
self.episode_next_observations = []
self.total_episode_score_so_far = 0
def track_changeable_goal_episodes_data(self):
"""Saves the data from the recent episodes in a way compatible with changeable goal environments"""
self.episode_rewards.append(self.reward)
self.episode_actions.append(self.action)
self.episode_dones.append(self.done)
self.episode_states.append(self.state)
self.episode_next_states.append(self.next_state)
self.episode_desired_goals.append(self.state_dict["desired_goal"])
self.episode_achieved_goals.append(self.state_dict["achieved_goal"])
self.episode_observations.append(self.state_dict["observation"])
self.episode_next_desired_goals.append(self.next_state_dict["desired_goal"])
self.episode_next_achieved_goals.append(self.next_state_dict["achieved_goal"])
self.episode_next_observations.append(self.next_state_dict["observation"])
def conduct_action_in_changeable_goal_envs(self, action):
"""Adapts conduct_action from base agent so that can handle changeable goal environments"""
self.next_state_dict, self.reward, self.done, _ = self.environment.step(action)
self.total_episode_score_so_far += self.reward
if self.hyperparameters["clip_rewards"]:
self.reward = max(min(self.reward, 1.0), -1.0)
self.observation = self.next_state_dict["observation"]
self.desired_goal = self.next_state_dict["desired_goal"]
self.achieved_goal = self.next_state_dict["achieved_goal"]
self.next_state = self.create_state_from_observation_and_desired_goal(self.observation, self.desired_goal)
def create_state_from_observation_and_desired_goal(self, observation, desired_goal):
return np.concatenate((observation, desired_goal))
def save_alternative_experience(self):
"""Saves the experiences as if the final state visited in the episode was the goal state"""
new_goal = self.achieved_goal
new_states = [self.create_state_from_observation_and_desired_goal(observation, new_goal) for observation in self.episode_observations]
new_next_states = [self.create_state_from_observation_and_desired_goal(observation, new_goal) for observation in
self.episode_next_observations]
new_rewards = [self.environment.compute_reward(next_achieved_goal, new_goal, None) for next_achieved_goal in self.episode_next_achieved_goals]
if self.hyperparameters["clip_rewards"]:
new_rewards = [max(min(reward, 1.0), -1.0) for reward in new_rewards]
self.HER_memory.add_experience(new_states, self.episode_actions, new_rewards, new_next_states, self.episode_dones)
def sample_from_HER_and_Ordinary_Buffer(self):
"""Samples from the ordinary replay buffer and HER replay buffer according to a proportion specified in config"""
states, actions, rewards, next_states, dones = self.memory.sample(self.ordinary_buffer_batch_size)
HER_states, HER_actions, HER_rewards, HER_next_states, HER_dones = self.HER_memory.sample(self.HER_buffer_batch_size)
states = torch.cat((states, HER_states))
actions = torch.cat((actions, HER_actions))
rewards = torch.cat((rewards, HER_rewards))
next_states = torch.cat((next_states, HER_next_states))
dones = torch.cat((dones, HER_dones))
return states, actions, rewards, next_states, dones
| [
"torch.cat"
] | 0.4.1 | p-christ/Deep-Reinforcement-Learning-PyTorch-Algorithms | 135d3e2e06bbde2868047d738e3fc2d73fd8cc93 |
0.4 | import torch
from torch import optim
from agents.Base_Agent import Base_Agent
from agents.DQN_agents.DDQN import DDQN
class Dueling_DDQN(DDQN):
"""A dueling double DQN agent as described in the paper http://proceedings.mlr.press/v48/wangf16.pdf"""
agent_name = "Dueling DDQN"
def __init__(self, config):
DDQN.__init__(self, config)
self.q_network_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size + 1)
self.q_network_optimizer = optim.Adam(self.q_network_local.parameters(), lr=self.hyperparameters["learning_rate"], eps=1e-4)
self.q_network_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size + 1)
Base_Agent.copy_model_over(from_model=self.q_network_local, to_model=self.q_network_target)
def pick_action(self, state=None):
"""Uses the local Q network and an epsilon greedy policy to pick an action"""
# PyTorch only accepts mini-batches and not single observations so we have to use unsqueeze to add
# a "fake" dimension to make it a mini-batch rather than a single observation
if state is None: state = self.state
state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)
if len(state.shape) < 2: state = state.unsqueeze(0)
self.q_network_local.eval()
with torch.no_grad():
action_values = self.q_network_local(state)
action_values = action_values[:, :-1] #because we treat the last output element as state-value and rest as advantages
self.q_network_local.train()
action = self.exploration_strategy.perturb_action_for_exploration_purposes({"action_values": action_values,
"turn_off_exploration": self.turn_off_exploration,
"episode_number": self.episode_number})
return action
def compute_q_values_for_next_states(self, next_states):
"""Computes the q_values for next state we will use to create the loss to train the Q network. Double DQN
uses the local index to pick the maximum q_value action and then the target network to calculate the q_value.
The reasoning behind this is that it will help stop the network from overestimating q values"""
max_action_indexes = self.q_network_local(next_states)[:, :-1].detach().argmax(1)
duelling_network_output = self.q_network_target(next_states)
q_values = self.calculate_duelling_q_values(duelling_network_output)
Q_targets_next = q_values.gather(1, max_action_indexes.unsqueeze(1))
return Q_targets_next
def calculate_duelling_q_values(self, duelling_q_network_output):
"""Calculates the q_values using the duelling network architecture. This is equation (9) in the paper
referenced at the top of the class"""
state_value = duelling_q_network_output[:, -1]
avg_advantage = torch.mean(duelling_q_network_output[:, :-1], dim=1)
q_values = state_value.unsqueeze(1) + (duelling_q_network_output[:, :-1] - avg_advantage.unsqueeze(1))
return q_values
def compute_expected_q_values(self, states, actions):
"""Computes the expected q_values we will use to create the loss to train the Q network"""
duelling_network_output = self.q_network_local(states)
q_values = self.calculate_duelling_q_values(duelling_network_output)
Q_expected = q_values.gather(1, actions.long())
return Q_expected
| [
"torch.no_grad",
"torch.mean",
"torch.from_numpy"
] | 0.4.1 | p-christ/Deep-Reinforcement-Learning-PyTorch-Algorithms | 135d3e2e06bbde2868047d738e3fc2d73fd8cc93 |
1.7 | """
A torch implement for U-Net.
* see: U-Net: Convolutional Networks for Biomedical Image Segmentation
* author: JamzumSum
* create: 2021-1-11
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.layers import BlurPool, MaxBlurPool2d, Swish
from common.support import SelfInitialed
from misc import CheckpointSupport
from misc.decorators import autoPropertyClass
@autoPropertyClass
class ChannelInference(nn.Module):
ic: int
oc: int
def __init__(self, ic: int, oc: int):
super().__init__()
class ChannelNorm(ChannelInference, nn.GroupNorm):
def __init__(self, ic, channels=16, *args, **kwargs):
nn.GroupNorm.__init__(self, max(1, ic // channels), ic, *args, **kwargs)
ChannelInference.__init__(self, ic, ic)
def norm_layer(norm: str, ndim=2):
return {
"batchnorm": [nn.BatchNorm1d, nn.BatchNorm2d][ndim - 1],
"groupnorm": ChannelNorm,
"none": nn.Identity,
}[norm]
class ParallelLayers(nn.Module):
def __init__(self, layers: list, dim=1):
super().__init__()
for i, layer in enumerate(layers):
self.add_module(f"P{i}", layer)
self.layers = layers
self.dim = dim
def forward(self, X):
r = [f(X) for f in self.layers]
if self.dim is None: return r
return torch.cat(r, dim=self.dim)
@autoPropertyClass
class ConvStack2(ChannelInference):
"""
[N, ic, H, W] -> [N, oc, H, W]
"""
res: bool
def __init__(self, ic, oc, *, res=False, norm="batchnorm", padding_mode='same'):
super().__init__(ic, oc)
# nonlinear = Swish if ic < oc else nn.ReLU
nonlinear = nn.PReLU
bias = norm == "none"
self.pad = {'same': 1, 'none': 0}[padding_mode]
self.CBR = nn.Sequential(
nn.Conv2d(ic, oc, 3, 1, self.pad, bias=bias),
norm_layer(norm)(oc),
nonlinear(),
)
self.CB = nn.Sequential(
nn.Conv2d(oc, oc, 3, 1, self.pad, bias=bias),
norm_layer(norm)(oc)
)
if res:
self.downsample = (
nn.Sequential(nn.Conv2d(ic, oc, 1, bias=bias),
norm_layer(norm)(oc)) if ic != oc else nn.Identity()
)
def forward(self, X):
r = self.CBR(X)
if self.res:
ds = self.downsample(X)
if self.pad == 0: ds = ds[..., 2:-2, 2:-2]
r = ds + self.CB(r)
else:
r = self.CB(r)
return torch.relu(r)
class DownConv(ChannelInference):
"""
[N, C, H, W] -> [N, C, H//2, W//2]
"""
def __init__(self, ic, mode="maxpool", blur=False):
"""
Args:
ic (int): input channel
mode (str, optional): `maxpool`/`avgpool`. Defaults to "maxpool".
blur (str, optional): `none`. blur kernel before pooling.
"""
super().__init__(ic, ic)
f = {
("maxpool", False): nn.MaxPool2d,
("avgpool", False): nn.AvgPool2d,
('maxpool', True): partial(MaxBlurPool2d, ic=ic),
('avgpool', True): partial(BlurPool, channels=ic),
}[(mode, blur)]
self.pool = f(kernel_size=2, stride=2)
def forward(self, X):
return self.pool(X)
class UpConv(ChannelInference, nn.Sequential):
"""
[N, C, H, W] -> [N, C//2, H*2, W*2]
"""
def __init__(self, ic, norm="batchnorm", transConv=False):
ChannelInference.__init__(self, ic, ic // 2)
bias = norm == "none"
if transConv:
layers = [nn.ConvTranspose2d(ic, self.oc, 2, 2, bias=False)]
else:
# NOTE: Since 2x2 conv cannot be aligned when the shape is odd,
# 0318: conv here is mainly object to reduce channel size. Hence use a conv1x1 instead.
layers = [
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(ic, ic // 2, 1, bias=bias),
]
layers.append(norm_layer(norm)(self.oc))
nn.Sequential.__init__(self, *layers)
def forward(self, X):
return nn.Sequential.forward(self, X)
@autoPropertyClass
class BareUNet(ChannelInference):
"""
[N, ic, H, W] -> [N, fc * 2^level, H, W], [N, fc, H, W]
"""
level: int
fc: int
cps: CheckpointSupport
cat: bool
backbone_only: bool
def __init__(
self,
ic,
level=4,
fc=64,
*,
cps=None,
residual=False,
norm='batchnorm',
transConv=False,
padding_mode='none',
antialias=True,
backbone_only=False,
cat=True,
):
super().__init__(ic, fc * 2 ** level)
uniarg = dict(res=residual, norm=norm, padding_mode=padding_mode)
self.L1 = ConvStack2(ic, fc, **uniarg)
cc = self.L1.oc
for i in range(level):
dsample = DownConv(cc, blur=antialias)
cc = dsample.oc
conv = ConvStack2(cc, cc * 2, **uniarg)
cc = conv.oc
self.add_module(f"D{i + 1}", dsample)
self.add_module(f"L{i + 2}", conv)
if backbone_only: return
for i in range(level):
usample = UpConv(cc, norm=norm, transConv=transConv)
cc = usample.oc
conv = ConvStack2(cc * 2 if self.cat else cc, cc, **uniarg)
cc = conv.oc
self.add_module(f"U{i + 1}", usample)
self.add_module(f"L{i + self.level + 2}", conv)
def add_module(self, name, model):
return nn.Module.add_module(self, name, self.cps(model))
def catoradd(self, X, Y):
"""Crop X. Then cat X & Y or add them.
Args:
X (Tensor): [N, C, H, W]
Y (Tensor): [N, C, H, W]
Returns:
Tensor: [N, 2C, H, W] if cat, else [N, C, H, W]
"""
top = (X.size(-2) - Y.size(-2)) // 2
left = (X.size(-1) - Y.size(-1)) // 2
X = X[..., top:top + Y.size(-2), left:left + Y.size(-1)]
return torch.cat([X, Y], dim=1) if self.cat else X + Y
def _L(self, i) -> ConvStack2:
return self._modules[f"L{i}"]
def _D(self, i) -> DownConv:
return self._modules[f"D{i}"]
def _U(self, i) -> UpConv:
return self._modules[f"U{i}"]
def forward(self, X, expand=True):
"""
X: [N, C, H, W]
O: [N, fc, H, W], [N, oc, H, W]
"""
xn = [self.L1(X)]
L = self.level
for i in range(1, L + 1):
xn.append(
self._L(i + 1)(self._D(i)(xn[-1])) # [N, t * fc, H//t, W//t], t = 2^i
)
if not expand:
return xn[L], None
for i in range(L):
xn.append(
self._L(L + i + 2)(
self.catoradd(
xn[L - i - 1],
self._U(i + 1)(xn[L + i]),
) # [N, t*fc, H//t, W//t], t = 2^(level - i - 1)
)
)
return xn[L], xn[-1]
@autoPropertyClass
class UNet(BareUNet):
"""Add multiple parallel header along with original segment header.
illustrate:
finalx ---conv-> seg1 (original header)
--tanh--conv--> seg2 (additional header 1)
--tanh--conv--> seg3 (additional header 2)
...
return:
[bottomx, *header_outputs]
e.g. bottomx, seg
bottomx, seg1, add_seg1, add_seg2, ...
"""
oc: int
def __init__(self, ic, oc, level=4, fc=64, *, headeroc=None, **kwargs):
super().__init__(ic, level, fc, **kwargs)
headers = [nn.Sequential(nn.Conv2d(fc, oc, 1), nn.Sigmoid())]
if not self.backbone_only and headeroc:
headers.extend(
nn.Sequential(nn.Tanh(), nn.Conv2d(fc, oc, 1), nn.Sigmoid())
for oc in headeroc
)
if not self.backbone_only:
self.headers = ParallelLayers(headers, None)
@staticmethod
def padback(X, shape):
top = shape[-2] - X.size(-2)
left = shape[-1] - X.size(-1)
return F.pad(X, [left // 2, left - left // 2, top // 2, top - top // 2])
def forward(self, X, expand: bool = True) -> dict:
assert not (expand and self.backbone_only)
bottomx, finalx = super().forward(X, expand)
d = {"bottom": bottomx}
if not expand:
return d
d['seg'] = [self.padback(i, X.shape) for i in self.headers(finalx)]
return d
| [
"torch.cat",
"torch.nn.Identity",
"torch.relu",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"torch.nn.ConvTranspose2d",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.Sequential.__init__",
"torch.nn.functional.pad",
"torch.nn.Sequential.forward"
] | 1.7 | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 |
1.6 | import os
import torch
import argparse
from configs import args
from data import make_data_loader
from model import bulid_MGN_resnet
from processor import inference
def test(args):
print('Start Testing ------')
train_loader, val_loader, class_num, _, _, num_query = make_data_loader(args)
device = torch.device(args.cuda)
model = bulid_MGN_resnet(args)
model.to(device)
model.load_param(args.test_weight)
inference(args, model, val_loader, num_query, device)
if __name__ == '__main__':
test(args) | [
"torch.device"
] | 1.6.0 | WangTaoAs/MGN_ReID | 916244c39b57b8068c34a7bfa1803781193bb554 |
1.10 | # import
from src.project_parameters import ProjectParameters
from src.model import create_model
import torch
from DeepLearningTemplate.data_preparation import parse_transforms, AudioLoader
from DeepLearningTemplate.predict import AudioPredictDataset
from typing import TypeVar, Any
T_co = TypeVar('T_co', covariant=True)
from os.path import isfile
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
# class
class AudioPredictDataset(AudioPredictDataset):
def __init__(self, root, loader, transform) -> None:
super().__init__(root, loader, transform)
def __getitem__(self, index) -> T_co:
sample = super().__getitem__(index)
# convert the range of the sample to 0~1
sample = (sample - sample.min()) / (sample.max() - sample.min())
return sample
class Predict:
def __init__(self, project_parameters) -> None:
self.model = create_model(project_parameters=project_parameters).eval()
if project_parameters.device == 'cuda' and torch.cuda.is_available():
self.model = self.model.cuda()
self.transform = parse_transforms(
transforms_config=project_parameters.transforms_config)['predict']
self.device = project_parameters.device
self.batch_size = project_parameters.batch_size
self.num_workers = project_parameters.num_workers
self.classes = project_parameters.classes
self.loader = AudioLoader(sample_rate=project_parameters.sample_rate)
self.in_chans=project_parameters.in_chans
def predict(self, inputs) -> Any:
result = []
fake_samples = []
if isfile(path=inputs):
# predict the file
sample = self.loader(path=inputs)
in_chans, _ = sample.shape
if in_chans != self.in_chans:
sample = sample.mean(0)
sample = torch.cat(
[sample[None] for idx in range(self.in_chans)])
# the transformed sample dimension is (1, in_chans, freq, time)
sample = self.transform(sample)[None]
# convert the range of the sample to 0~1
sample = (sample - sample.min()) / (sample.max() - sample.min())
if self.device == 'cuda' and torch.cuda.is_available():
sample = sample.cuda()
with torch.no_grad():
score, sample_hat = self.model(sample)
result.append([score.item()])
fake_samples.append(sample_hat.cpu().data.numpy())
else:
# predict the file from folder
dataset = AudioPredictDataset(root=inputs,
loader=self.loader,
transform=self.transform)
pin_memory = True if self.device == 'cuda' and torch.cuda.is_available(
) else False
data_loader = DataLoader(dataset=dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=pin_memory)
with torch.no_grad():
for sample in tqdm(data_loader):
if self.device == 'cuda' and torch.cuda.is_available():
sample = sample.cuda()
score, sample_hat = self.model(sample)
result.append(score.tolist())
fake_samples.append(sample_hat.cpu().data.numpy())
result = np.concatenate(result, 0).reshape(-1, 1)
fake_samples = np.concatenate(fake_samples, 0)
print(', '.join(self.classes))
print(result)
return result, fake_samples
if __name__ == '__main__':
# project parameters
project_parameters = ProjectParameters().parse()
# predict file
result = Predict(project_parameters=project_parameters).predict(
inputs=project_parameters.root)
| [
"torch.no_grad",
"torch.cuda.is_available",
"torch.utils.data.DataLoader"
] | 1.10.1 | fastyangmh/AudioGANomaly | d877f050606765b17bb6755bd70277857326b5e1 |
1.5 | #!/usr/bin/env python3
"""Training module for DCGAN."""
import argparse
import logging
logging.root.setLevel(logging.INFO)
import os
from typing import Any, Dict, List, Tuple
import model
import utils
import math
import numpy as np
import torch
import torch.cuda
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torchvision.utils import save_image
# Constants #
RESULTS_DIR = 'results'
NUM_CHANNELS = 3
OPTIMAL_D_SCORE = 0.5
FAKE_LABEL = 0
REAL_LABEL = 1
SOFT_COEFF = 0.25
MIN_LR = 10e-5
MAX_LR = 1.0
CHECKPOINT_FREQ = 500
IMG_SAVE_COEF = 0.98
GAN_ERROR_THRESHOLD = 0.98
GRID_SIZE = 64
LOGGING_FREQ = 10
NUM_FAKES = 500
# Create figures directory
FIGURES_DIR = os.path.join(RESULTS_DIR, 'figures')
MODEL_DIR = os.path.join(RESULTS_DIR, 'model')
os.makedirs(FIGURES_DIR, exist_ok=True)
os.makedirs(MODEL_DIR, exist_ok=True)
# Public Functions #
def parse_args():
"""Parses command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'dataroot',
help='The root of the directory whose image data to process.',
type=str,
)
parser.add_argument(
'name',
help='The base name of this batch of synthesized images, e.g. "metal".',
type=str,
)
parser.add_argument(
'--batch-size',
help='The batch size for batch training.',
type=int,
default=128,
)
parser.add_argument(
'--beta1',
help='The Beta1 parameter for Adam Optimization.',
type=float,
default=0.5,
)
parser.add_argument(
'--beta2',
help='The Beta2 parameter for Adam Optimization.',
type=float,
default=0.999,
)
parser.add_argument(
'--image-size',
help='The size of the images.',
type=int,
default=64,
)
parser.add_argument(
'--learning-rate',
help='The learning rate to apply during parameter updates.',
type=float,
default=0.0002,
)
parser.add_argument(
'--netD-checkpoint',
help='Initializes the Discriminator from the specified checkpoint.',
type=str,
)
parser.add_argument(
'--netG-checkpoint',
help='Initializes the Generator from the specified checkpoint.',
type=str,
)
parser.add_argument(
'--num-epochs',
help='The number of training epochs to run.',
type=int,
default=5,
)
parser.add_argument(
'--num-gpus',
help='The number of GPUs available for training. Use 0 for CPU.',
type=int,
default=1,
)
parser.add_argument(
'--num-trials',
help='The number of trials to use during hyperparameter searching.',
type=int,
default=10,
)
parser.add_argument(
'--num-trial-cpus',
help='The number of CPUs available during hyperparameter searching.',
type=int,
default=1,
)
parser.add_argument(
'--num-trial-gpus',
help='The number of GPUs available during hyperparameter searching.',
type=int,
default=1,
)
parser.add_argument(
'--num-workers',
help='The number of parallel workers for the DataLoader.',
type=int,
default=2,
)
# Perform some basic argument validation
args = parser.parse_args()
if not os.path.exists(args.dataroot) and os.path.isdir(args.dataroot):
raise ValueError(f'{args.dataroot} is not a valid directory.')
return args
def synthesize_training_data(
netG: model.Generator, fixed_noise: torch.Tensor, epoch: int):
"""Saves synthesized images given a latent vector and a generator model."""
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
for s in range(NUM_FAKES):
save_image(
fake[s,:,:,:],
os.path.join(FIGURES_DIR, f'fake_out_{epoch}_{s}.png'))
def load_checkpoint(
model: nn.Module,
optimizer: optim.Optimizer,
filepath: str) -> Tuple[int, float]:
"""Loads model and optimizer state from the provided .model file."""
if not os.path.exists(filepath):
raise ValueError(f'Filepath: {filepath} does not exist!')
logging.info(f'Loading checkpoint: {filepath}...')
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
return (checkpoint['epoch'], checkpoint['loss'])
def save_checkpoint(
model: nn.Module,
optimizer: optim.Optimizer,
epoch: int,
loss: float,
filepath: str):
"""Saves model and optimizer state to a filepath."""
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'loss': loss,
'optimizer_state_dict': optimizer.state_dict(),
}, filepath)
def train(config: Dict[str, Any]) -> Tuple[List[float], List[float], List[torch.Tensor]]:
"""The primary function for DCGAN training.
Note: Per GANHacks, Discriminator training is conducted in *two* separate
batch training sessions: one with all-real data, and one with all-fake data.
See more at: https://github.com/soumith/ganhacks.forward
Args:
config: A dict with the following parameters:
* netG: The Generator to train.
* netG_checkpoint: An optional .model checkpoint to load from.
* netD: The Discriminator to train.
* netD_checkpoint: An optional .model checkpoint to load from.
* dataloader: The PyTorch DataLoader used to iterate through data.
* device: The device that the models are loaded onto.
* learning_rate: The learning rate to apply during updates.
* num_epochs: The number of training epochs.
* beta1: The Beta1 parameter of Adam optimization.
* beta2: The Beta2 parameter of Adam optimization.
* pre_epoch: An optional hook for processing prior-to the epoch.
* post_epoch: An optional hook for processing post-epoch.
Returns:
A tuple of lists containing the loss of the Generator and the
Discriminator, respectively, from each training iteration, along with
a list of images.
"""
# Set parameters
netG = config['netG']
netD = config['netD']
dataloader = config['dataloader']
device = config['device']
learning_rate = config['learning_rate']
num_epochs = config['num_epochs']
beta1 = config['beta1']
beta2 = config['beta2']
# Retrieve optional configuration parameters
pre_epoch = config.get('pre_epoch')
post_epoch = config.get('post_epoch')
netG_checkpoint = config.get('netG_checkpoint')
netD_checkpoint = config.get('netD_checkpoint')
# Batch of input latent vectors
fixed_noise = torch.randn(
NUM_FAKES, netG.latent_vector_size, 1, 1, device=device)
# Setup loss function and optimizers
lossF = nn.BCELoss()
optD = optim.Adam(netD.parameters(), lr=learning_rate, betas=(beta1, beta2))
optG = optim.Adam(netG.parameters(), lr=learning_rate, betas=(beta1, beta2))
# Load from saved state, if provided
checkpoint_epochs = []
if netG_checkpoint is not None:
G_epoch, _ = load_checkpoint(netG, optG, netG_checkpoint)
checkpoint_epochs.append(G_epoch)
if netD_checkpoint is not None:
D_epoch, _ = load_checkpoint(netD, optD, netD_checkpoint)
checkpoint_epochs.append(D_epoch)
# Dump model configuration
logging.info(f'Generator:\n{netG}')
logging.info(f'Discriminator:\n{netD}')
# Main training loop
img_list = []
G_losses = []
D_losses = []
D_batch_scores = []
iters = 0
logging.info('Starting training...')
epoch = min(checkpoint_epochs) if checkpoint_epochs else 0
while epoch < num_epochs:
logging.info(f'Starting epoch: {epoch}...')
# Call into pre-epoch handler, if present
if pre_epoch is not None:
pre_epoch(
epoch=epoch,
G_losses=G_losses,
D_losses=D_losses,
D_batch_scores=D_batch_scores)
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Real data
netD.zero_grad()
## Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), REAL_LABEL, device=device)
utils.add_label_noise(label, p_flip=0.05)
r_label_soft = (
REAL_LABEL +
(torch.randn((b_size,), device=device)*SOFT_COEFF))
r_label_noisy_soft = torch.mul(label, r_label_soft)
## Forward pass real data through discriminator
output = netD(real_cpu).view(-1)
## Calculate loss on all-real batch; calculate gradients
errD_real = lossF(output, r_label_noisy_soft)
errD_real.backward()
D_x = output.mean().item()
## Fake data
noise = torch.randn(
b_size, netG.latent_vector_size, 1, 1, device=device)
## Generate fake image batch with G
fake = netG(noise)
label.fill_(FAKE_LABEL)
utils.add_label_noise(label, p_flip=0.05)
f_label_noisy_soft = (
label +
torch.abs(torch.randn((b_size,), device=device))*SOFT_COEFF)
## Classify all fake batch with D
output = netD(fake.detach()).view(-1)
## Calculate D's loss on the all-fake batch
errD_fake = lossF(output, f_label_noisy_soft)
## Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
## Add the gradients from the all-real and all-fake batches; Update
errD = errD_real + errD_fake
optD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(REAL_LABEL) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake
# batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = lossF(output, label)
# Calculate gradients for G; Update
errG.backward()
D_G_z2 = output.mean().item()
optG.step()
# Save losses for plotting
G_losses.append(errG.item())
D_losses.append(errD.item())
# Save discriminator output
D_batch_scores.append(D_x)
# Output training stats
if i % LOGGING_FREQ == 0:
logging.info(f'[{epoch}/{num_epochs}][{i}/{len(dataloader)}]\t'
f'Loss_D: {errD.item():.4f}\tLoss_G: '
f'{errG.item():.4f}\t'
f'D(x): {D_x:.4f}\tD(G(z)): '
f'{D_G_z1:.4f} / {D_G_z2:.4f}')
# Save checkpoint; dump model and optimizer states along with grid
if ((iters % CHECKPOINT_FREQ == 0) or
((epoch == num_epochs - 1) and (i == len(dataloader) - 1))):
img_list.append(
vutils.make_grid(
fake[0:GRID_SIZE], padding=2, normalize=True))
save_checkpoint(
netG, optG, epoch, errG.item(),
os.path.join(MODEL_DIR, f'modelG_{epoch}.model'))
save_checkpoint(
netD, optD, epoch, errD.item(),
os.path.join(MODEL_DIR, f'modelD_{epoch}.model'))
# If we're sufficiently late into training, and the generator is having
# success fooling the discriminator, synthesize training images
if ((epoch >= math.floor(IMG_SAVE_COEF * num_epochs)) and
(errG.item() <= GAN_ERROR_THRESHOLD)):
synthesize_training_data(fixed_noise, epoch)
iters += 1
# Call into post-epoch handler, if present
epoch += 1
if post_epoch is not None:
post_epoch(
epoch=epoch,
G_losses=G_losses,
D_losses=D_losses,
avg_D_batch_scores=D_batch_scores)
return (G_losses, D_losses, img_list)
def main():
"""main."""
args = parse_args()
device = torch.device((
'cuda:0' if torch.cuda.is_available and args.num_gpus > 0 else 'cpu'))
logging.info(f'Running with device: {device}')
# Initialize models
netG = model.Generator().to(device)
netD = model.Discriminator().to(device)
if device.type == 'cuda' and args.num_gpus > 1:
netG = nn.DataParallel(netG, list(range(args.num_gpus)))
netD = nn.DataParallel(netD, list(range(args.num_gpus)))
# Apply DCGAN paper weight-reinitialization
# See more: https://arxiv.org/pdf/1511.06434.pdf
netG.apply(utils.dcgan_weights_reinit)
netD.apply(utils.dcgan_weights_reinit)
# Load dataset and resize
dataset = utils.data_synthesis(
os.path.abspath(args.dataroot),
image_size=(args.image_size, args.image_size, NUM_CHANNELS),
custom_transforms=[
transforms.ColorJitter(
brightness=0.05,
contrast=0.05,
saturation=0.05,
hue=0.03,
),
transforms.RandomCrop(size=args.image_size),
transforms.RandomHorizontalFlip(p=0.9),
transforms.RandomVerticalFlip(p=0.9),
transforms.Lambda(lambd=lambda img: img) # Identity transform
]
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
)
config = {
'netG': netG,
'netG_checkpoint': args.netG_checkpoint,
'netD': netD,
'netD_checkpoint': args.netD_checkpoint,
'dataloader': dataloader,
'device': device,
'learning_rate': args.learning_rate,
'num_epochs': args.num_epochs,
'beta1': args.beta1,
'beta2': args.beta2,
}
logging.info('Beginning training loop...')
G_losses, D_losses, img_list = train(config)
utils.plot_results(
device=device,
dataloader=dataloader,
G_losses=G_losses,
D_losses=D_losses,
img_list=img_list,
name=args.name,
outdir=FIGURES_DIR,
)
if __name__ == '__main__':
main()
| [
"torch.device",
"torch.mul",
"torch.no_grad",
"torch.full",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.load",
"torch.randn"
] | 1.5.0 | Cam2337/RecycleNet-DCGAN | a6691d1e3e03e286192a1791fd323bd2f442ad9f |
1.2 | import torch
import torch.nn as nn
import math
__all__ = ['pyramidnet272']
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def calc_prob(curr_layer, total_layers, p_l):
"""Calculates drop prob depending on the current layer."""
return 1 - (float(curr_layer) / total_layers) * p_l
class Bottleneck(nn.Module):
outchannel_ratio = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, prob=1.):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
if stride == 1:
self.conv2 = nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,
padding=1, bias=False)
else:
self.conv2 = nn.Sequential(nn.ZeroPad2d((0, 1, 0, 1)),
nn.Conv2d(planes, (planes * 1), kernel_size=3, stride=stride,
padding=0, bias=False))
self.bn3 = nn.BatchNorm2d((planes * 1))
self.conv3 = nn.Conv2d((planes * 1), planes * Bottleneck.outchannel_ratio, kernel_size=1, bias=False)
self.bn4 = nn.BatchNorm2d(planes * Bottleneck.outchannel_ratio)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.prob = prob
self.padding = None
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn4(out)
# shake drop inference
# we may support shake drop training in a future version
assert not self.training
out = out * self.prob
if self.downsample is not None:
shortcut = self.downsample(x)
featuremap_size = shortcut.size()[2:4]
else:
shortcut = x
featuremap_size = out.size()[2:4]
batch_size = out.size()[0]
residual_channel = out.size()[1]
shortcut_channel = shortcut.size()[1]
if residual_channel != shortcut_channel:
self.padding = torch.zeros(batch_size, residual_channel - shortcut_channel,
featuremap_size[0], featuremap_size[1])
self.padding = self.padding.to(x.device)
out += torch.cat((shortcut, self.padding), 1)
else:
out += shortcut
return out
class PyramidNet(nn.Module):
def __init__(self, depth, alpha, num_classes):
super(PyramidNet, self).__init__()
self.inplanes = 16
n = int((depth - 2) / 9)
block = Bottleneck
self.addrate = alpha / (3 * n * 1.0)
self.input_featuremap_dim = self.inplanes
self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)
self.featuremap_dim = self.input_featuremap_dim
self.p_l = 0.5
self.layer_num = 1
self.total_layers = n * 3
self.layer1 = self.pyramidal_make_layer(block, n)
self.layer2 = self.pyramidal_make_layer(block, n, stride=2)
self.layer3 = self.pyramidal_make_layer(block, n, stride=2)
self.final_featuremap_dim = self.input_featuremap_dim
self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)
self.relu_final = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(self.final_featuremap_dim, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def pyramidal_make_layer(self, block, block_depth, stride=1):
downsample = None
if stride != 1: # or self.inplanes != int(round(featuremap_dim_1st)) * block.outchannel_ratio:
downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)
layers = []
self.featuremap_dim = self.featuremap_dim + self.addrate
prob = calc_prob(self.layer_num, self.total_layers, self.p_l)
layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, prob))
self.layer_num += 1
for i in range(1, block_depth):
temp_featuremap_dim = self.featuremap_dim + self.addrate
prob = calc_prob(self.layer_num, self.total_layers, self.p_l)
layers.append(
block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1,
prob=prob))
self.layer_num += 1
self.featuremap_dim = temp_featuremap_dim
self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def pyramidnet272(**kwargs):
return PyramidNet(depth=272, alpha=200, **kwargs)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.ZeroPad2d"
] | 1.2.0 | AllenChen1998/QueryNet | 1ab74d7f4cc9d25af30abe0631581cf7be81a07f |
3 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG),
# acting on behalf of its Max Planck Institute for Intelligent Systems and the
# Max Planck Institute for Biological Cybernetics. All rights reserved.
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights
# on this computer program. You can only use this computer program if you have closed a license agreement
# with MPG or you get the right to use the computer program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and liable to prosecution.
# Contact: [email protected]
#
#
# If you use this code in a research publication please consider citing the following:
#
# Expressive Body Capture: 3D Hands, Face, and Body from a Single Image <https://arxiv.org/abs/1904.05866>
#
#
# Code Developed by:
# Nima Ghorbani <https://nghorbani.github.io/>
#
# 2021.02.12
from typing import List, Dict
from psbody.mesh import Mesh
from body_visualizer.tools.mesh_tools import rotateXYZ
from body_visualizer.mesh.psbody_mesh_cube import points_to_cubes
from body_visualizer.mesh.psbody_mesh_sphere import points_to_spheres
from torch import nn
import torch
from human_body_prior.tools.model_loader import load_model
import numpy as np
from body_visualizer.tools.vis_tools import colors
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from psbody.mesh import MeshViewers
from human_body_prior.tools.omni_tools import log2file
from human_body_prior.models.vposer_model import VPoser
from human_body_prior.tools.omni_tools import flatten_list
def visualize(points, bm_f, mvs, kpts_colors, verbosity=2, logger=None):
from human_body_prior.tools.omni_tools import log2file
if logger is None: logger = log2file()
def view(opt_objs, body_v, virtual_markers, opt_it):
if verbosity <= 0: return
opt_objs_cpu = {k: c2c(v) for k, v in opt_objs.items()}
total_loss = np.sum([np.sum(v) for k, v in opt_objs_cpu.items()])
message = 'it {} -- [total loss = {:.2e}] - {}'.format(opt_it, total_loss, ' | '.join(['%s = %2.2e' % (k, np.sum(v)) for k, v in opt_objs_cpu.items()]))
logger(message)
if verbosity>1:
bs = body_v.shape[0]
np.random.seed(100)
frame_ids = list(range(bs)) if bs <= len(mvs) else np.random.choice(bs , size=len(mvs), replace=False).tolist()
if bs > len(mvs): message += ' -- [frame_ids: {}]'.format(frame_ids)
for dispId, fId in enumerate(frame_ids): # check for the number of frames in mvs and show a randomly picked number of frames in body if there is more to show than row*cols available
new_body_v = rotateXYZ(body_v[fId], [-90,0,0])
orig_mrk_mesh = points_to_spheres(rotateXYZ(c2c(points[fId]), [-90,0,0]), radius=0.01, color=kpts_colors)
virtual_markers_mesh = points_to_cubes(rotateXYZ(virtual_markers[fId], [-90,0,0]), radius=0.01, color=kpts_colors)
new_body_mesh = Mesh(new_body_v, bm_f, vc=colors['grey'])
# linev = rotateXYZ(np.hstack((c2c(points[fId]), virtual_markers[fId])).reshape((-1, 3)), [-90,0,0])
# linee = np.arange(len(linev)).reshape((-1, 2))
# ll = Lines(v=linev, e=linee)
# ll.vc = (ll.v * 0. + 1) * np.array([0.00, 0.00, 1.00])
# mvs[dispId].set_dynamic_lines([ll])
# orig_mrk_mesh = points_to_spheres(data_pc, radius=0.01, vc=colors['blue'])
mvs[dispId].set_dynamic_meshes([orig_mrk_mesh, virtual_markers_mesh])
mvs[dispId].set_static_meshes([new_body_mesh])
mvs[0].set_titlebar(message)
# if out_dir is not None: mv.save_snapshot(os.path.join(out_dir, '%05d_it_%.5d.png' %(frame_id, opt_it)))
return view
class AdamInClosure():
def __init__(self, var_list, lr, max_iter=100, tolerance_change=1e-5):
self.optimizer = torch.optim.Adam(var_list, lr)
self.max_iter = max_iter
self.tolerance_change = tolerance_change
def step(self, closure):
prev_loss = None
for it in range(self.max_iter):
loss = closure()
self.optimizer.step()
if prev_loss is None:
prev_loss = loss
continue
if torch.isnan(loss):
# breakpoint()
break
if abs(loss - prev_loss) < self.tolerance_change:
print('abs(loss - prev_loss) < self.tolerance_change')
break
def zero_grad(self):
self.optimizer.zero_grad()
def ik_fit(optimizer, source_kpts_model, static_vars, vp_model, extra_params={}, on_step=None, gstep=0):
data_loss = extra_params.get('data_loss', torch.nn.SmoothL1Loss(reduction='mean'))
# data_loss =
# data_loss = torch.nn.L1Loss(reduction='mean')#change with SmoothL1
def fit(weights, free_vars):
fit.gstep += 1
optimizer.zero_grad()
free_vars['pose_body'] = vp_model.decode(free_vars['poZ_body'])['pose_body'].contiguous().view(-1, 63)
nonan_mask = torch.isnan(free_vars['poZ_body']).sum(-1) == 0
opt_objs = {}
res = source_kpts_model(free_vars)
opt_objs['data'] = data_loss(res['source_kpts'], static_vars['target_kpts'])
opt_objs['betas'] = torch.pow(free_vars['betas'][nonan_mask],2).sum()
opt_objs['poZ_body'] = torch.pow(free_vars['poZ_body'][nonan_mask],2).sum()
opt_objs = {k: opt_objs[k]*v for k, v in weights.items() if k in opt_objs.keys()}
loss_total = torch.sum(torch.stack(list(opt_objs.values())))
# breakpoint()
loss_total.backward()
if on_step is not None:
on_step(opt_objs, c2c(res['body'].v), c2c(res['source_kpts']), fit.gstep)
fit.free_vars = {k:v for k,v in free_vars.items()}# if k in IK_Engine.fields_to_optimize}
# fit.nonan_mask = nonan_mask
fit.final_loss = loss_total
return loss_total
fit.gstep = gstep
fit.final_loss = None
fit.free_vars = {}
# fit.nonan_mask = None
return fit
class IK_Engine(nn.Module):
def __init__(self,
vposer_expr_dir: str,
data_loss,
optimizer_args: dict={'type':'ADAM'},
stepwise_weights: List[Dict]=[{'data': 10., 'poZ_body': .01, 'betas': .5}],
display_rc: tuple = (2,1),
verbosity: int = 1,
logger=None,
):
'''
:param vposer_expr_dir: The vposer directory that holds the settings and model snapshot
:param data_loss: should be a pytorch callable (source, target) that returns the accumulated loss
:param optimizer_args: arguments for optimizers
:param stepwise_weights: list of dictionaries. each list element defines weights for one full step of optimization
if a weight value is left out, its respective object item will be removed as well. imagine optimizing without data term!
:param display_rc: number of row and columns in case verbosity > 1
:param verbosity: 0: silent, 1: text, 2: text/visual. running 2 over ssh would need extra work
:param logger: an instance of human_body_prior.tools.omni_tools.log2file
'''
super(IK_Engine, self).__init__()
assert isinstance(stepwise_weights, list), ValueError('stepwise_weights should be a list of dictionaries.')
assert np.all(['data' in l for l in stepwise_weights]), ValueError('The term data should be available in every weight of anealed optimization step: {}'.format(stepwise_weights))
self.data_loss = torch.nn.SmoothL1Loss(reduction='mean') if data_loss is None else data_loss
self.stepwise_weights = stepwise_weights
self.verbosity = verbosity
self.optimizer_args = optimizer_args
self.logger = log2file() if logger is None else logger
if verbosity>1:
mvs = MeshViewers(display_rc, keepalive=True)
self.mvs = flatten_list(mvs)
self.mvs[0].set_background_color(colors['white'])
else:
self.mvs=None
self.vp_model, _ = load_model(vposer_expr_dir,
model_code=VPoser,
remove_words_in_model_weights='vp_model.',
disable_grad=True)
def forward(self, source_kpts, target_kpts, initial_body_params={}):
'''
source_kpts is a function that given body parameters computes source key points that should match target key points
Try to reconstruct the bps signature by optimizing the body_poZ
'''
# if self.rt_ps.verbosity > 0: self.logger('Processing {} frames'.format(points.shape[0]))
bs = target_kpts.shape[0]
on_step = visualize(target_kpts,
kpts_colors=source_kpts.kpts_colors,
bm_f=source_kpts.bm_f,
mvs=self.mvs,
verbosity=self.verbosity,
logger=self.logger)
comp_device = target_kpts.device
# comp_device = self.vp_model.named_parameters().__next__()[1].device
if 'pose_body' not in initial_body_params:
initial_body_params['pose_body'] = torch.zeros([bs, 63], device=comp_device, dtype=torch.float, requires_grad=False)
if 'trans' not in initial_body_params:
initial_body_params['trans'] = torch.zeros([bs, 3], device=comp_device, dtype=torch.float, requires_grad=False)
if 'betas' not in initial_body_params:
initial_body_params['betas'] = torch.zeros([bs, 10], device=comp_device, dtype=torch.float, requires_grad=False)
if 'root_orient' not in initial_body_params:
initial_body_params['root_orient'] = torch.zeros([bs, 3], device=comp_device, dtype=torch.float, requires_grad=False)
initial_body_params['poZ_body'] = self.vp_model.encode(initial_body_params['pose_body']).mean
free_vars = {k: torch.nn.Parameter(v.detach(), requires_grad=True) for k,v in initial_body_params.items() if k in ['betas', 'trans', 'poZ_body', 'root_orient']}
static_vars = {
'target_kpts': target_kpts,
# 'trans': initial_body_params['trans'].detach(),
# 'betas': initial_body_params['betas'].detach(),
# 'poZ_body': initial_body_params['poZ_body'].detach()
}
if self.optimizer_args['type'].upper() == 'LBFGS':
optimizer = torch.optim.LBFGS(list(free_vars.values()),
lr=self.optimizer_args.get('lr', 1),
max_iter=self.optimizer_args.get('max_iter', 100),
tolerance_change=self.optimizer_args.get('tolerance_change', 1e-5),
max_eval=self.optimizer_args.get('max_eval', None),
history_size=self.optimizer_args.get('history_size', 100),
line_search_fn='strong_wolfe')
elif self.optimizer_args['type'].upper() == 'ADAM':
optimizer = AdamInClosure(list(free_vars.values()),
lr=self.optimizer_args.get('lr', 1e-3),
max_iter=self.optimizer_args.get('max_iter', 100),
tolerance_change=self.optimizer_args.get('tolerance_change', 1e-5),
)
else:
raise ValueError('optimizer_type not recognized.')
gstep = 0
closure = ik_fit(optimizer,
source_kpts_model=source_kpts,
static_vars=static_vars,
vp_model=self.vp_model,
extra_params={'data_loss': self.data_loss},
on_step=on_step,
gstep=gstep)
# try:
for wts in self.stepwise_weights:
optimizer.step(lambda: closure(wts, free_vars))
free_vars = closure.free_vars
# except:
#
# pass
# if closure.final_loss is None or torch.isnan(closure.final_loss) or torch.any(torch.isnan(free_vars['trans'])):
# if self.verbosity > 0:
# self.logger('NaN observed in the optimization results. you might want to restart the refinment procedure.')
# breakpoint()
# return None
return closure.free_vars#, closure.nonan_mask
| [
"torch.zeros",
"torch.isnan",
"torch.optim.Adam",
"torch.nn.SmoothL1Loss",
"torch.pow"
] | 3 | zephyr-fun/human_body_prior | 35571fe16fddca39553398f6b3eb6d18a23c985b |
0.3 | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from classifier import SimpleClassifier
class _netG(nn.Module):
def __init__(self, args):
super(_netG, self).__init__()
self.ninp = args.ninp
self.nhid = args.nhid
self.nlayers = args.nlayers
self.dropout = args.dropout
self.rnn = getattr(nn, 'LSTM')(self.ninp, self.nhid, self.nlayers, bidirectional=False, dropout=self.dropout, batch_first=True)
self.rnn_type = 'LSTM'
self.decoder =SimpleClassifier(self.nhid*2, self.nhid*4, args.vocab_size, self.dropout)
self.d = args.dropout
self.beta = 3
self.vocab_size = args.vocab_size
# self.init_weights()
self.w_q = nn.Linear(self.nhid*2, self.nhid)
self.ans_q = nn.Linear(self.nhid, self.nhid)
self.Wa_q = nn.Linear(self.nhid, 1)
self.w_h = nn.Linear(self.nhid*2, self.nhid)
self.ans_h = nn.Linear(self.nhid, self.nhid)
self.Wa_h = nn.Linear(self.nhid, 1)
self.w_i = nn.Linear(self.nhid*2, self.nhid)
self.ans_i = nn.Linear(self.nhid, self.nhid)
self.Wa_i = nn.Linear(self.nhid, 1)
self.concat = nn.Linear(self.nhid*3, self.nhid)
# self.fusion = nn.Linear(self.nhid*2, self.nhid*2)
def init_weights(self):
self.decoder.weight = nn.init.xavier_uniform(self.decoder.weight)
self.decoder.bias.data.fill_(0)
def forward(self, emb, question, history, image, hidden):
ques_length = question.size(1)
his_length = history.size(1)
img_length = image.size(1)
batch_size, ans_length, _ = emb.size()
question = question.contiguous()
seqLogprobs = []
for index in range(ans_length):
input_ans = emb[:, index, :].unsqueeze(1)
output, hidden = self.rnn(input_ans, hidden)
input_ans = output.squeeze(1)
ques_emb = self.w_q(question.view(-1, 2*self.nhid)).view(-1, ques_length, self.nhid)
input_ans_q = self.ans_q(input_ans).view(-1, 1, self.nhid)
atten_emb_q = F.tanh(ques_emb + input_ans_q.expand_as(ques_emb))
ques_atten_weight = F.softmax(self.Wa_q(F.dropout(atten_emb_q, self.d, training=self.training).view(-1, self.nhid)).view(-1, ques_length), 1)
ques_attn_feat = torch.bmm(ques_atten_weight.view(-1, 1, ques_length), ques_emb.view(-1,ques_length, self.nhid))
input_ans_h = self.ans_h(input_ans).view(-1, 1, self.nhid)
his_emb = self.w_h(history.view(-1, 2* self.nhid)).view(-1, his_length, self.nhid)
atten_emb_h = F.tanh(his_emb + input_ans_h.expand_as(his_emb))
his_atten_weight = F.softmax(self.Wa_h(F.dropout(atten_emb_h, self.d, training=self.training).view(-1, self.nhid)).view(-1, his_length), 1)
his_attn_feat = torch.bmm(his_atten_weight.view(-1, 1, his_length), his_emb.view(-1, his_length, self.nhid))
input_ans_i = self.ans_i(input_ans).view(-1, 1, self.nhid)
img_emb = self.w_i(image.view(-1, 2* self.nhid)).view(-1, img_length, self.nhid)
atten_emb_i = F.tanh(img_emb + input_ans_i.expand_as(img_emb))
img_atten_weight = F.softmax(self.Wa_i(F.dropout(atten_emb_i, self.d, training=self.training).view(-1, self.nhid)).view(-1, img_length), 1)
img_attn_feat = torch.bmm(img_atten_weight.view(-1, 1, img_length), img_emb.view(-1, img_length, self.nhid))
concat_feat = torch.cat((ques_attn_feat.view(-1, self.nhid), his_attn_feat.view(-1, self.nhid), img_attn_feat.view(-1, self.nhid)),1)
concat_feat = F.tanh(self.concat(F.dropout(concat_feat, self.d, training=self.training)))
fusion_feat = torch.cat((output.squeeze(1), concat_feat),1)
fusion_feat = F.dropout(fusion_feat, self.d, training=self.training)
decoded = self.decoder(fusion_feat.view(-1, 2*self.nhid))
logprob = F.log_softmax(self.beta * decoded, 1)
seqLogprobs.append(logprob)
return torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1).contiguous(), hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
def sample_beam(self, netW, input, hidden_state, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = input.size(1)
# assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq_all = torch.LongTensor(self.seq_length, batch_size, beam_size).zero_()
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
# copy the hidden state for beam_size time.
state = []
for state_tmp in hidden_state:
state.append(state_tmp[:, k, :].view(1, 1, -1).expand(1, beam_size, self.nhid).clone())
state = tuple(state)
beam_seq = torch.LongTensor(self.seq_length, beam_size).zero_()
beam_seq_logprobs = torch.FloatTensor(self.seq_length, beam_size).zero_()
beam_logprobs_sum = torch.zeros(beam_size) # running sum of logprobs for each beam
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = input.data.resize_(1, beam_size).fill_(self.vocab_size)
xt = netW(Variable(it, requires_grad=False))
else:
"""perform a beam merge. that is,
for every previous beam we now many new possibilities to branch out
we need to resort our beams to maintain the loop invariant of keeping
the top beam_size most likely sequences."""
logprobsf = logprobs.float() # lets go to CPU for more efficiency in indexing operations
ys, ix = torch.sort(logprobsf, 1,
True) # sorted array of logprobs along each previous beam (last true = descending)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 1: # at first time step only the first beam is active
rows = 1
for cc in range(cols): # for each column (word, essentially)
for qq in range(rows): # for each beam expansion
# compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[qq, cc]
if beam_seq[t - 2, qq] == self.vocab_size:
local_logprob.data.fill_(-9999)
candidate_logprob = beam_logprobs_sum[qq] + local_logprob
candidates.append({'c': ix.data[qq, cc], 'q': qq, 'p': candidate_logprob.data[0],
'r': local_logprob.data[0]})
candidates = sorted(candidates, key=lambda x: -x['p'])
# construct new beams
new_state = [_.clone() for _ in state]
if t > 1:
# well need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t - 1].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t - 1].clone()
for vix in range(beam_size):
v = candidates[vix]
# fork beam index q into index vix
if t > 1:
beam_seq[:t - 1, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t - 1, vix] = beam_seq_logprobs_prev[:, v['q']]
# rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][0, vix] = state[state_ix][0, v['q']] # dimension one is time step
# append new end terminal at the end of this beam
beam_seq[t - 1, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t - 1, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
if v['c'] == self.vocab_size or t == self.seq_length:
# END token special case here, or we reached the end.
# add the beam to a set of done beams
self.done_beams[k].append({'seq': beam_seq[:, vix].clone(),
'logps': beam_seq_logprobs[:, vix].clone(),
'p': beam_logprobs_sum[vix]
})
# encode as vectors
it = beam_seq[t - 1].view(1, -1)
xt = netW(Variable(it.cuda()))
if t >= 1:
state = new_state
output, state = self.rnn(xt, state)
output = F.dropout(output, self.d, training=self.training)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
logprobs = F.log_softmax(self.beta * decoded)
self.done_beams[k] = sorted(self.done_beams[k], key=lambda x: -x['p'])
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
for ii in range(beam_size):
seq_all[:, k, ii] = self.done_beams[k][ii]['seq']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def sample(self, netW, input, state, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
seq_length = opt.get('seq_length', 9)
self.seq_length = seq_length
if beam_size > 1:
return self.sample_beam(netW, input, state, opt)
batch_size = input.size(1)
seq = []
seqLogprobs = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = input.data
elif sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).cuda()
sampleLogprobs = logprobs.gather(1, Variable(it,
requires_grad=False)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = netW(Variable(it.view(-1, 1), requires_grad=False))
if t >= 1:
seq.append(it) # seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
it = torch.unsqueeze(it, 0)
output, state = self.rnn(xt, state)
output = F.dropout(output, self.d, training=self.training)
decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
logprobs = F.log_softmax(self.beta * decoded, 1)
return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.max",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable",
"torch.nn.functional.dropout",
"torch.FloatTensor",
"torch.nn.functional.log_softmax",
"torch.unsqueeze",
"torch.multinomial",
"torch.LongTensor",
"torch.div",
"torch.exp",
"torch.sort"
] | 0.3.1 | Unmesh-Kumar/DMRM | f1c24049bd527c9dcc5ab6e6727dfa6c8e794c02 |
1.7 | """
This code is from batra-mlp-lab's repository.
https://github.com/batra-mlp-lab/visdial-challenge-starter-pytorch
"""
import torch
from torch import nn
class GenerativeDecoder(nn.Module):
def __init__(self, config, vocabulary):
super().__init__()
self.config = config
self.word_embed = nn.Embedding(
len(vocabulary),
config["word_embedding_size"],
padding_idx=vocabulary.PAD_INDEX,
)
self.answer_rnn = nn.LSTM(
config["word_embedding_size"],
config["lstm_hidden_size"],
config["lstm_num_layers"],
batch_first=True,
dropout=config["lstm_dropout"],
)
self.lstm_to_words = nn.Linear(
self.config["lstm_hidden_size"], len(vocabulary)
)
self.dropout = nn.Dropout(p=config["lstm_dropout"])
self.logsoftmax = nn.LogSoftmax(dim=-1)
def forward(self, encoder_output, batch):
"""Given `encoder_output`, learn to autoregressively predict
ground-truth answer word-by-word during training.
During evaluation, assign log-likelihood scores to all answer options.
Parameters
----------
encoder_output: torch.Tensor
Output from the encoder through its forward pass.
(batch_size, num_rounds, lstm_hidden_size)
"""
if self.training:
ans_in = batch["ans_in"]
batch_size, num_rounds, max_sequence_length = ans_in.size()
ans_in = ans_in.view(batch_size * num_rounds, max_sequence_length)
# shape: (batch_size * num_rounds, max_sequence_length,
# word_embedding_size)
ans_in_embed = self.word_embed(ans_in)
# reshape encoder output to be set as initial hidden state of LSTM.
# shape: (lstm_num_layers, batch_size * num_rounds,
# lstm_hidden_size)
init_hidden = encoder_output.view(1, batch_size * num_rounds, -1)
init_hidden = init_hidden.repeat(
self.config["lstm_num_layers"], 1, 1
)
init_cell = torch.zeros_like(init_hidden)
# shape: (batch_size * num_rounds, max_sequence_length,
# lstm_hidden_size)
ans_out, (hidden, cell) = self.answer_rnn(
ans_in_embed, (init_hidden, init_cell)
)
ans_out = self.dropout(ans_out)
# shape: (batch_size * num_rounds, max_sequence_length,
# vocabulary_size)
ans_word_scores = self.lstm_to_words(ans_out)
return ans_word_scores
else:
ans_in = batch["opt_in"]
batch_size, num_rounds, num_options, max_sequence_length = (
ans_in.size()
)
ans_in = ans_in.view(
batch_size * num_rounds * num_options, max_sequence_length
)
# shape: (batch_size * num_rounds * num_options, max_sequence_length
# word_embedding_size)
ans_in_embed = self.word_embed(ans_in)
# reshape encoder output to be set as initial hidden state of LSTM.
# shape: (lstm_num_layers, batch_size * num_rounds * num_options,
# lstm_hidden_size)
init_hidden = encoder_output.view(batch_size, num_rounds, 1, -1)
init_hidden = init_hidden.repeat(1, 1, num_options, 1)
init_hidden = init_hidden.view(
1, batch_size * num_rounds * num_options, -1
)
init_hidden = init_hidden.repeat(
self.config["lstm_num_layers"], 1, 1
)
init_cell = torch.zeros_like(init_hidden)
# shape: (batch_size * num_rounds * num_options,
# max_sequence_length, lstm_hidden_size)
ans_out, (hidden, cell) = self.answer_rnn(
ans_in_embed, (init_hidden, init_cell)
)
# shape: (batch_size * num_rounds * num_options,
# max_sequence_length, vocabulary_size)
ans_word_scores = self.logsoftmax(self.lstm_to_words(ans_out))
# shape: (batch_size * num_rounds * num_options,
# max_sequence_length)
target_ans_out = batch["opt_out"].view(
batch_size * num_rounds * num_options, -1
)
# shape: (batch_size * num_rounds * num_options,
# max_sequence_length)
ans_word_scores = torch.gather(
ans_word_scores, -1, target_ans_out.unsqueeze(-1)
).squeeze()
ans_word_scores = (
ans_word_scores * (target_ans_out > 0).float().cuda()
) # ugly
ans_scores = torch.sum(ans_word_scores, -1)
ans_scores = ans_scores.view(batch_size, num_rounds, num_options)
return ans_scores
| [
"torch.nn.LogSoftmax",
"torch.nn.Dropout",
"torch.nn.LSTM",
"torch.zeros_like",
"torch.sum"
] | 1.7.0 | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 |
1.7 | import torch
def initialize_model_weights(model, initialization="he", lstm_initialization="he"):
if initialization == "he":
print("kaiming normal initialization.")
elif initialization == "xavier":
print("xavier normal initialization.")
else:
print("default initialization, no changes made.")
if(initialization):
for name, param in model.named_parameters():
# Bias params
if("bias" in name.split(".")[-1]):
param.data.zero_()
# Batchnorm weight params
elif("weight" in name.split(".")[-1] and len(param.size())==1):
continue
# LSTM weight params
elif("weight" in name.split(".")[-1] and "lstm" in name):
if "xavier" in lstm_initialization:
torch.nn.init.xavier_normal_(param)
elif "he" in lstm_initialization:
torch.nn.init.kaiming_normal_(param)
# Other weight params
elif("weight" in name.split(".")[-1] and "lstm" not in name):
if "xavier" in initialization:
torch.nn.init.xavier_normal_(param)
elif "he" in initialization:
torch.nn.init.kaiming_normal_(param) | [
"torch.nn.init.kaiming_normal_",
"torch.nn.init.xavier_normal_"
] | 1.7.0 | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 |
1.7 | """
Reasoning Visual Dialog with Sparse Graph Learning and Knowledge Transfer
Gi-Cheon Kang, Junseok Park, Hwaran Lee, Byoung-Tak Zhang, Jin-Hwa Kim
https://arxiv.org/abs/2004.06698
"""
import numpy as np
import torch, math
import torch.nn as nn
import torch.nn.functional as F
from .net_utils import MLP
from torch.autograd import Variable
from torch.nn.utils.weight_norm import weight_norm
class SANet(nn.Module):
def __init__(self, __C):
super(SANet, self).__init__()
self.n_head = 8
self.d_hid = __C['hidden_size']
self.d_hid_head = __C['hidden_size'] // 8
self.gs = GumbelSoftmax(d_in=self.d_hid_head, num_cls=2, dropout=__C['model_dropout'])
self.linear_q = nn.Linear(__C['hidden_size'], __C['hidden_size'])
self.linear_k = nn.Linear(__C['hidden_size'], __C['hidden_size'])
self.fc = nn.Linear(__C['hidden_size'], __C['hidden_size'])
def attention(self, q, k):
logit = q.unsqueeze(2) * k.unsqueeze(3)
logit = logit.transpose(2, 3)
attn = logit.sum(-1) / math.sqrt(self.d_hid_head)
binary = self.gs(logit)
attn = attn * binary
attn = F.normalize(attn, p=2, dim=-1)**2
return binary, attn
def forward(self, q, k):
n_batch = q.size(0)
q = self.linear_q(q).view(
n_batch,
-1,
self.n_head,
self.d_hid_head
).transpose(1, 2)
k = self.linear_k(k).view(
n_batch,
-1,
self.n_head,
self.d_hid_head
).transpose(1, 2)
binary, attn = self.attention(q, k)
binary = binary.mean(dim=1)
attn = attn.mean(dim=1)
return binary, attn
class GumbelSoftmax(nn.Module):
'''
Softmax Relaxation for Gumbel Max Trick
'''
def __init__(self, d_in, num_cls, dropout):
super().__init__()
self.linear_g = MLP(
in_size=d_in,
mid_size=d_in//2,
out_size=num_cls,
dropout_r=dropout,
use_relu=True
)
self.logsoftmax = nn.LogSoftmax(dim=-1)
def st_gumbel_softmax(self, x, temperature=0.5):
'''
Straight Throught Gumbel Softmax
'''
eps = 1e-20
noise = Variable(torch.rand(x.size()).cuda())
noise.data.add_(eps).log_().neg_()
noise.data.add_(eps).log_().neg_()
y = (x + noise) / temperature
y = F.softmax(y, dim=-1)
shape = y.size()
_, ind = y.max(dim=-1)
y_hard = torch.zeros_like(y).view(-1, shape[-1])
y_hard.scatter_(1, ind.view(-1, 1), 1)
y_hard = y_hard.view(*shape)
y_hard = (y_hard - y).detach() + y
return y_hard
def forward(self, rel):
x = self.linear_g(rel)
x = self.logsoftmax(x)
if self.training:
mask = self.st_gumbel_softmax(x)
else:
_, ind = x.detach().max(4, keepdim=True)
mask = x.detach().clone().zero_().scatter_(4, ind, 1)
mask = mask[:, :, :, :, -1]
return mask
| [
"torch.nn.Linear",
"torch.nn.functional.normalize",
"torch.nn.LogSoftmax",
"torch.nn.functional.softmax",
"torch.zeros_like"
] | 1.7.0 | gicheonkang/sglkt-visdial | b2927e8bc8e45c2d2a2a76fbf75a15f8ecb78b88 |
1.4 | import glob
import itertools
import numpy as np
import os
import pickle
import torch
from collections import defaultdict
# Project imports
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import get_inference_output_dir
def get_clean_results_dict(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 3, 5, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(list))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
'probabilistic_scoring_res_averaged_*.pkl'))[0]
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
prob_dict_name = 'probabilistic_scoring_res_averaged_*.pkl' if image_corruption_level == 'OpenIm' else 'probabilistic_scoring_res_odd_*.pkl'
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
prob_dict_name))[0]
with open(dictionary_file_name, "rb") as pickle_file:
res_dict = pickle.load(pickle_file)
if image_corruption_level != 'OpenIm OOD':
# True Positives Results
res_dict_clean['True Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['true_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Brier Score'].extend(
res_dict['true_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['True Positives']['Negative Log Likelihood (Regression)'].extend(
res_dict['true_positives_reg_analysis']['ignorance_score_mean'])
res_dict_clean['True Positives']['Mean Squared Error'].extend(
res_dict['true_positives_reg_analysis']['mean_squared_error'])
res_dict_clean['True Positives']['Energy Score'].extend(
res_dict['true_positives_reg_analysis']['energy_score_mean'])
res_dict_clean['True Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['True Positives']['Method Name'].extend(
[config_name] * res_dict['true_positives_reg_analysis']['energy_score_mean'].shape[0])
# Duplicates Results
res_dict_clean['Duplicates']['Negative Log Likelihood (Classification)'].extend(
res_dict['duplicates_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Brier Score'].extend(
res_dict['duplicates_cls_analysis']['brier_score_mean'])
res_dict_clean['Duplicates']['Negative Log Likelihood (Regression)'].extend(
res_dict['duplicates_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Duplicates']['Mean Squared Error'].extend(
res_dict['duplicates_reg_analysis']['mean_squared_error'])
res_dict_clean['Duplicates']['Energy Score'].extend(
res_dict['duplicates_reg_analysis']['energy_score_mean'])
res_dict_clean['Duplicates']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Duplicates']['Method Name'].extend(
[config_name] * res_dict['duplicates_reg_analysis']['energy_score_mean'].shape[0])
# Localization Error Results
res_dict_clean['Localization Errors']['Negative Log Likelihood (Classification)'].extend(
res_dict['localization_errors_cls_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Brier Score'].extend(
res_dict['localization_errors_cls_analysis']['brier_score_mean'])
res_dict_clean['Localization Errors']['Negative Log Likelihood (Regression)'].extend(
res_dict['localization_errors_reg_analysis']['ignorance_score_mean'])
res_dict_clean['Localization Errors']['Mean Squared Error'].extend(
res_dict['localization_errors_reg_analysis']['mean_squared_error'])
res_dict_clean['Localization Errors']['Energy Score'].extend(
res_dict['localization_errors_reg_analysis']['energy_score_mean'])
res_dict_clean['Localization Errors']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
res_dict_clean['Localization Errors']['Method Name'].extend(
[config_name] *
res_dict['localization_errors_reg_analysis']['energy_score_mean'].shape[0])
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].extend(
res_dict['false_positives_cls_analysis']['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].extend(
res_dict['false_positives_cls_analysis']['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].extend(
res_dict['false_positives_reg_analysis']['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].extend(
[image_corruption_level] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
res_dict_clean['False Positives']['Method Name'].extend(
[config_name] *
res_dict['false_positives_reg_analysis']['total_entropy_mean'].shape[0])
else:
# False Positives Results
res_dict_clean['False Positives']['Negative Log Likelihood (Classification)'].append(
res_dict['ignorance_score_mean'])
res_dict_clean['False Positives']['Brier Score'].append(
res_dict['brier_score_mean'])
res_dict_clean['False Positives']['Entropy'].append(
res_dict['total_entropy_mean'])
res_dict_clean['False Positives']['Image Corruption Level'].append(
image_corruption_level)
res_dict_clean['False Positives']['Method Name'].append(
config_name)
return res_dict_clean
def get_mAP_results(config_names,
configs_list,
inference_configs_list):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 1, 2, 3, 4, 5, 10]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
mAP_results = defaultdict(list)
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
text_file_name = glob.glob(
os.path.join(
inference_output_dir,
'mAP_res.txt'))[0]
with open(text_file_name, "r") as f:
mAP = f.read().strip('][\n').split(', ')[0]
mAP = float(mAP) * 100
mAP_results['Method Name'].append(config_name)
mAP_results['Image Corruption Level'].append(
image_corruption_level)
mAP_results['mAP'].append(mAP)
return mAP_results
def get_matched_results_dicts(config_names,
configs_list,
inference_configs_list,
iou_min=0.1,
iou_correct=0.5):
# Level 0 is coco validation set with no corruption, level 10 is open
# images, level 11 is open images ood
image_corruption_levels = [0, 10, 11]
test_dataset_coco = "coco_2017_custom_val"
test_dataset_open_images = "openimages_val"
test_dataset_open_images_odd = "openimages_odd_val"
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Initiate dataframe dict
res_dict_clean = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for config_name, config, inference_config_name in zip(
config_names, configs_list, inference_configs_list):
# Setup config
args.config_file = config
args.inference_config = inference_config_name
args.test_dataset = test_dataset_coco
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
cfg.defrost()
# Read coco dataset results
cfg.ACTUAL_TEST_DATASET = args.test_dataset
for image_corruption_level in image_corruption_levels:
# Build path to gt instances and inference output
args.image_corruption_level = image_corruption_level
if image_corruption_level == 0:
image_corruption_level = 'Val'
elif image_corruption_level == 10:
image_corruption_level = 'OpenIm'
elif image_corruption_level == 11:
image_corruption_level = 'OpenIm OOD'
else:
image_corruption_level = 'C' + str(image_corruption_level)
if 'OpenIm' not in image_corruption_level:
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
# Get matched results by either generating them or loading from
# file.
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
elif image_corruption_level == 'OpenIm':
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"matched_results_{}_{}_*.pth".format(
iou_min,
iou_correct)))[0]
matched_results = torch.load(
dictionary_file_name, map_location='cuda')
else:
args.image_corruption_level = 0
args.test_dataset = test_dataset_open_images if image_corruption_level == 'OpenIm' else test_dataset_open_images_odd
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
dictionary_file_name = glob.glob(
os.path.join(
inference_output_dir,
"preprocessed_predicted_instances_odd_*.pth"))[0]
preprocessed_predicted_instances = torch.load(
dictionary_file_name, map_location='cuda')
predicted_boxes = preprocessed_predicted_instances['predicted_boxes']
predicted_cov_mats = preprocessed_predicted_instances['predicted_covar_mats']
predicted_cls_probs = preprocessed_predicted_instances['predicted_cls_probs']
predicted_boxes = list(itertools.chain.from_iterable(
[predicted_boxes[key] for key in predicted_boxes.keys()]))
predicted_cov_mats = list(itertools.chain.from_iterable(
[predicted_cov_mats[key] for key in predicted_cov_mats.keys()]))
predicted_cls_probs = list(itertools.chain.from_iterable(
[predicted_cls_probs[key] for key in predicted_cls_probs.keys()]))
predicted_boxes = torch.stack(
predicted_boxes, 1).transpose(
0, 1)
predicted_cov_mats = torch.stack(
predicted_cov_mats, 1).transpose(0, 1)
predicted_cls_probs = torch.stack(
predicted_cls_probs,
1).transpose(
0,
1)
matched_results = {
'predicted_box_means': predicted_boxes,
'predicted_box_covariances': predicted_cov_mats,
'predicted_cls_probs': predicted_cls_probs}
if image_corruption_level != 'OpenIm OOD':
all_results_means = torch.cat(
(matched_results['true_positives']['predicted_box_means'],
matched_results['localization_errors']['predicted_box_means'],
matched_results['duplicates']['predicted_box_means'],
matched_results['false_positives']['predicted_box_means']))
all_results_covs = torch.cat(
(matched_results['true_positives']['predicted_box_covariances'],
matched_results['localization_errors']['predicted_box_covariances'],
matched_results['duplicates']['predicted_box_covariances'],
matched_results['false_positives']['predicted_box_covariances']))
all_gt_means = torch.cat(
(matched_results['true_positives']['gt_box_means'],
matched_results['localization_errors']['gt_box_means'],
matched_results['duplicates']['gt_box_means'],
matched_results['false_positives']['predicted_box_means']*np.NaN))
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
all_results_means.to('cpu'),
all_results_covs.to('cpu') +
1e-2 *
torch.eye(all_results_covs.shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
all_log_prob = -predicted_multivariate_normal_dists.log_prob(all_gt_means)
# Energy Score.
sample_set = predicted_multivariate_normal_dists.sample((3,)).to('cuda')
sample_set_1 = sample_set[:-1]
sample_set_2 = sample_set[1:]
energy_score = torch.norm(
(sample_set_1 - all_gt_means),
dim=2).mean(0) - 0.5 * torch.norm(
(sample_set_1 - sample_set_2),
dim=2).mean(0)
mse_loss = torch.nn.MSELoss(reduction='none')
mse = mse_loss(all_gt_means, all_results_means).mean(1)
res_dict_clean[config_name][image_corruption_level]['Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['MSE'].extend(
mse.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['NLL'].extend(
all_log_prob.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['ED'].extend(
energy_score.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.cat(
(matched_results['true_positives']['iou_with_ground_truth'],
matched_results['localization_errors']['iou_with_ground_truth'][:, 0],
matched_results['duplicates']['iou_with_ground_truth'],
torch.zeros(
matched_results['false_positives']['predicted_box_means'].shape[0]).to('cuda')*np.NaN)).cpu().numpy())
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['false_positives']['predicted_box_means'].to('cpu'),
matched_results['false_positives']['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['false_positives']['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
FP_Entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
FP_Entropy.cpu().numpy())
predicted_cat_dists_fp = matched_results['false_positives']['predicted_cls_probs']
if predicted_cat_dists_fp.shape[1] == 80:
predicted_cat_dists_fp, _ = predicted_cat_dists_fp.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=matched_results['false_positives']['predicted_cls_probs'])
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
if image_corruption_level == 'OpenIm':
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(matched_results['true_positives']['is_truncated'],
matched_results['localization_errors']['is_truncated'],
matched_results['duplicates']['is_truncated'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(matched_results['true_positives']['is_occluded'],
matched_results['localization_errors']['is_occluded'],
matched_results['duplicates']['is_occluded'],
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda'),
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(
torch.cat(
(torch.full((
matched_results['true_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['localization_errors']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['duplicates']['predicted_box_means'].shape[0],), -1,
dtype=torch.float32).to('cuda')*np.NaN,
torch.full((
matched_results['false_positives']['predicted_box_means'].shape[0],), -1, dtype=torch.float32).to('cuda')*np.NaN)).cpu().numpy())
else:
predicted_multivariate_normal_dists = torch.distributions.multivariate_normal.MultivariateNormal(
matched_results['predicted_box_means'].to('cpu'),
matched_results['predicted_box_covariances'].to('cpu') +
1e-2 *
torch.eye(matched_results['predicted_box_covariances'].shape[2]).to('cpu'))
predicted_multivariate_normal_dists.loc = predicted_multivariate_normal_dists.loc.to(
'cuda')
predicted_multivariate_normal_dists.scale_tril = predicted_multivariate_normal_dists.scale_tril.to(
'cuda')
predicted_multivariate_normal_dists._unbroadcasted_scale_tril = predicted_multivariate_normal_dists._unbroadcasted_scale_tril.to(
'cuda')
predicted_multivariate_normal_dists.covariance_matrix = predicted_multivariate_normal_dists.covariance_matrix.to(
'cuda')
predicted_multivariate_normal_dists.precision_matrix = predicted_multivariate_normal_dists.precision_matrix.to(
'cuda')
all_entropy = predicted_multivariate_normal_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['FP_Entropy'].extend(
all_entropy.cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['IOU With GT'].extend(torch.zeros(
matched_results['predicted_box_means'].shape[0]).cpu().numpy())
res_dict_clean[config_name][image_corruption_level]['Truncated'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
res_dict_clean[config_name][image_corruption_level]['Occluded'].extend(torch.full((
matched_results['predicted_box_means'].shape[0],), -1, dtype=torch.float32).cpu().numpy()*np.NaN)
all_results_cat = matched_results['predicted_cls_probs']
if all_results_cat.shape[1] == 80:
predicted_cat_dists_fp, _ = all_results_cat.max(dim=1)
predicted_cat_dists_fp = 1-predicted_cat_dists_fp
predicted_categorical_dists = torch.distributions.Bernoulli(
probs=predicted_cat_dists_fp)
else:
predicted_categorical_dists = torch.distributions.Categorical(
probs=all_results_cat)
all_pred_ent = predicted_categorical_dists.entropy()
res_dict_clean[config_name][image_corruption_level]['Cat_Entropy'].extend(
all_pred_ent.cpu().numpy())
return res_dict_clean
def mean_reject_outliers(x, outlierConstant=1.5):
a = np.array(x)
upper_quartile = np.percentile(a, 75)
lower_quartile = np.percentile(a, 25)
IQR = (upper_quartile - lower_quartile) * outlierConstant
quartileSet = (lower_quartile - IQR, upper_quartile + IQR)
result = a[np.where((a >= quartileSet[0]) & (a <= quartileSet[1]))]
return np.nanmean(result)
| [
"torch.zeros",
"torch.cat",
"torch.distributions.Categorical",
"torch.nn.MSELoss",
"torch.stack",
"torch.norm",
"torch.distributions.Bernoulli",
"torch.eye",
"torch.full",
"torch.load"
] | 1.4.0 | jskhu/probdet-1 | b8bda3bd7cdd573aa9f70a62453d147664211af6 |
1.3 | import os
import skimage
import torch
from warnings import warn
from .model_io import get_model
from .transform import process_aug_dict
from .datagen import InferenceTiler
from ..raster.image import stitch_images
from ..utils.core import get_data_paths
class Inferer(object):
"""Object for training `solaris` models using PyTorch or Keras."""
def __init__(self, config, custom_model_dict=None):
self.config = config
self.batch_size = self.config['batch_size']
self.framework = self.config['nn_framework']
self.model_name = self.config['model_name']
# check if the model was trained as part of the same pipeline; if so,
# use the output from that. If not, use the pre-trained model directly.
print("Inferer config", self.config)
if self.config['train']:
warn('Because the configuration specifies both training and '
'inference, solaris is switching the model weights path '
'to the training output path.')
self.model_path = self.config['training']['model_dest_path']
if custom_model_dict is not None:
custom_model_dict['weight_path'] = self.config[
'training']['model_dest_path']
else:
self.model_path = self.config.get('model_path', None)
self.model = get_model(self.model_name, self.framework,
self.model_path, pretrained=True,
custom_model_dict=custom_model_dict)
self.window_step_x = self.config['inference'].get('window_step_size_x',
None)
self.window_step_y = self.config['inference'].get('window_step_size_y',
None)
if self.window_step_x is None:
self.window_step_x = self.config['data_specs']['width']
if self.window_step_y is None:
self.window_step_y = self.config['data_specs']['height']
self.stitching_method = self.config['inference'].get(
'stitching_method', 'average')
self.output_dir = self.config['inference']['output_dir']
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
def __call__(self, infer_df=None):
"""Run inference.
Arguments
---------
infer_df : :class:`pandas.DataFrame` or `str`
A :class:`pandas.DataFrame` with a column, ``'image'``, specifying
paths to images for inference. Alternatively, `infer_df` can be a
path to a CSV file containing the same information. Defaults to
``None``, in which case the file path specified in the Inferer's
configuration dict is used.
"""
if infer_df is None:
infer_df = get_infer_df(self.config)
inf_tiler = InferenceTiler(
self.framework,
width=self.config['data_specs']['width'],
height=self.config['data_specs']['height'],
x_step=self.window_step_x,
y_step=self.window_step_y,
augmentations=process_aug_dict(
self.config['inference_augmentation'])
)
# check if final image was already processed...if so, assume the whole batch finished
fin = len(infer_df['image'])
im_path = infer_df['image'][fin-1]
outpath = os.path.join(self.output_dir, os.path.split(im_path)[1])
print("Checking for last %s" % outpath )
if os.path.exists(outpath):
print("file exists %s. assuming entire batch finished." % outpath )
return
for idx, im_path in enumerate(infer_df['image']):
print("processing %d/%d, %s" % (idx,len(infer_df['image']), im_path ) )
outpath = os.path.join(self.output_dir, os.path.split(im_path)[1])
if os.path.exists(outpath):
print("file exists %s" % outpath )
continue
inf_input, idx_refs, (
src_im_height, src_im_width) = inf_tiler(im_path)
if self.framework == 'keras':
subarr_preds = self.model.predict(inf_input,
batch_size=self.batch_size)
elif self.framework in ['torch', 'pytorch']:
with torch.no_grad():
self.model.eval()
if torch.cuda.is_available():
device = torch.device('cuda')
self.model = self.model.cuda()
else:
device = torch.device('cpu')
inf_input = torch.from_numpy(inf_input).float().to(device)
# add additional input data, if applicable
if self.config['data_specs'].get('additional_inputs',
None) is not None:
inf_input = [inf_input]
for i in self.config['data_specs']['additional_inputs']:
inf_input.append(
infer_df[i].iloc[idx].to(device))
subarr_preds = self.model(inf_input)
subarr_preds = subarr_preds.cpu().data.numpy()
stitched_result = stitch_images(subarr_preds,
idx_refs=idx_refs,
out_width=src_im_width,
out_height=src_im_height,
method=self.stitching_method)
skimage.io.imsave(os.path.join(self.output_dir,
os.path.split(im_path)[1]),
stitched_result)
def get_infer_df(config):
"""Get the inference df based on the contents of ``config`` .
This function uses the logic described in the documentation for the config
file to determine where to find images to be used for inference.
See the docs and the comments in solaris/data/config_skeleton.yml for
details.
Arguments
---------
config : dict
The loaded configuration dict for model training and/or inference.
Returns
-------
infer_df : :class:`dict`
:class:`dict` containing at least one column: ``'image'`` . The values
in this column correspond to the path to filenames to perform inference
on.
"""
infer_df = get_data_paths(config['inference_data_csv'], infer=True)
return infer_df
| [
"torch.device",
"torch.no_grad",
"torch.cuda.is_available",
"torch.from_numpy"
] | 1.3.1 | fractalsproject/solaris | ac1facb1daa661ddf6ab1ff13dba36ff88ef1c0f |
1.5 | import collections
import logging
import torch
import copy
import random
import numpy as np
from sklearn.svm import NuSVR
from sklearn.linear_model import BayesianRidge
from sklearn.ensemble import RandomForestRegressor
import time
from sklearn.model_selection import cross_val_score, train_test_split
from scipy import stats
from naslib.optimizers.core.metaclasses import MetaOptimizer
from naslib.search_spaces.core.query_metrics import Metric
from naslib.search_spaces.nasbench201.graph import NasBench201SearchSpace
from naslib.utils.utils import AttrDict, count_parameters_in_MB
from naslib.utils.logging import log_every_n_seconds
logger = logging.getLogger(__name__)
def loguniform(low=0, high=1, size=None):
return np.exp(np.random.uniform(np.log(low), np.log(high), size))
class LS_SVR(MetaOptimizer):
# training the models is not implemented
using_step_function = False
def __init__(self, config,
metric=Metric.VAL_ACCURACY,
all_curve=True,
model_name='svr',
best_hyper=None,
n_hypers=1000):
super().__init__()
self.n_hypers = n_hypers
self.all_curve = all_curve
self.model_name = model_name
self.best_hyper = best_hyper
self.name = 'ls-svr'
self.metric=metric
self.info = []
self.y_train = []
self.fidelity = config.search.single_fidelity
if config.search_space == 'nasbench101':
self.extrapolation = config.search.fidelity
self.top_n_percent = 0.2
elif config.search_space in ['nasbench201', 'nasbench211']:
self.extrapolation = config.search.fidelity // 2
self.top_n_percent = 0.5
elif config.search_space == 'darts':
self.extrapolation = config.search.fidelity // 2
self.top_n_percent = 0.2
elif config.search_space == 'nlp':
self.extrapolation = config.search.fidelity
self.top_n_percent = 0.2
else:
raise NotImplementedError('{} is not yet implemented yet'.format(config.search_space))
self.train_svr = True
self.config = config
self.epochs = config.search.epochs
self.performance_metric = metric
self.dataset = config.dataset
self.num_init = config.search.num_init
self.nbhd = []
self.chosen = None
self.best_arch = None
self.history = torch.nn.ModuleList()
def adapt_search_space(self, search_space, scope=None, dataset_api=None):
assert search_space.QUERYABLE, "Local search is currently only implemented for benchmarks."
self.search_space = search_space.clone()
self.scope = scope if scope else search_space.OPTIMIZER_SCOPE
self.dataset_api = dataset_api
def collate_inputs(self, VC_all_archs_list, AP_all_archs_list):
"""
Args:
VC_all_archs_list: a list of validation accuracy curves for all archs
AP_all_archs_list: a list of architecture features for all archs
Returns:
X: an collated array of all input information used for extrapolation model
"""
VC = np.vstack(VC_all_archs_list) # dimension: n_archs x n_epochs
DVC = np.diff(VC, n=1, axis=1)
DDVC = np.diff(DVC, n=1, axis=1)
mVC = np.mean(VC, axis=1)[:, None]
stdVC = np.std(VC, axis=1)[:, None]
mDVC = np.mean(DVC, axis=1)[:, None]
stdDVC = np.std(DVC, axis=1)[:, None]
mDDVC = np.mean(DDVC, axis=1)[:, None]
stdDDVC = np.std(DDVC, axis=1)[:, None]
if self.all_curve:
TS_list = [VC, DVC, DDVC, mVC, stdVC]
else:
TS_list = [mVC, stdVC, mDVC, stdDVC, mDDVC, stdDDVC]
if self.metric == Metric.TRAIN_LOSS:
sumVC = np.sum(VC, axis=1)[:, None]
TS_list += [sumVC]
TS = np.hstack(TS_list)
if len(AP_all_archs_list) != 0:
AP = np.vstack(AP_all_archs_list)
X = np.hstack([AP, TS])
else:
X = TS
return X
def get_data_reqs(self):
"""
Returns a dictionary with info about whether the predictor needs
extra info to train/query.
"""
reqs = {'requires_partial_lc':True,
'metric':self.metric,
'requires_hyperparameters':True,
'hyperparams':['flops', 'latency', 'params']
}
return reqs
def prepare_data(self, info):
# todo: this can be added at the top of collate_inputs
val_acc_curve = []
arch_params = []
for i in range(len(info)):
acc_metric = info[i]
val_acc_curve.append(acc_metric)
return self.collate_inputs(val_acc_curve, arch_params)
def fit(self, ytrain, info, learn_hyper=True):
# prepare training data
xtrain_data = self.prepare_data(info) # dimension: n_archs x n_epochs
y_train = np.array(ytrain)
# learn hyperparameters of the extrapolator by cross validation
if self.best_hyper is None or learn_hyper:
# specify model hyper-parameters
if self.model_name == 'svr':
C = loguniform(1e-5, 10, self.n_hypers)
nu = np.random.uniform(0, 1, self.n_hypers)
gamma = loguniform(1e-5, 10, self.n_hypers)
hyper = np.vstack([C, nu, gamma]).T
elif self.model_name == 'blr':
alpha_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
alpha_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
lambda_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
lambda_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
hyper = np.vstack([alpha_1, alpha_2, lambda_1, lambda_2]).T
elif self.model_name == 'rf':
n_trees = np.random.randint(10, 800, self.n_hypers)
frac_feature = np.random.uniform(0.1, 0.5, self.n_hypers)
hyper = np.vstack([n_trees, frac_feature]).T
print(f'start CV on {self.model_name}')
mean_score_list = []
t_start = time.time()
for i in range(self.n_hypers):
# define model
if self.model_name == 'svr':
model = NuSVR(C=hyper[i, 0], nu=hyper[i, 1], gamma=hyper[i, 2], kernel='rbf')
# model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')
elif self.model_name == 'blr':
model = BayesianRidge(alpha_1=hyper[i, 0], alpha_2=hyper[i, 1],
lambda_1=hyper[i, 2], lambda_2=hyper[i, 3])
elif self.model_name == 'rf':
model = RandomForestRegressor(n_estimators=int(hyper[i, 0]), max_features=hyper[i, 1])
# perform cross validation to learn the best hyper value
scores = cross_val_score(model, xtrain_data, y_train, cv=3)
mean_scores = np.mean(scores)
mean_score_list.append(mean_scores)
# print(f'hper={hyper[i]}, score={mean_scores}')
t_end = time.time()
best_hyper_idx = np.argmax(mean_score_list)
best_hyper = hyper[best_hyper_idx]
max_score = np.max(mean_score_list)
time_taken = t_end - t_start
print(f'{self.model_name}'
f'best_hyper={best_hyper}, score={max_score}, time={time_taken}')
self.best_hyper = best_hyper
# fit the extrapolator with the best hyperparameters to the training data
if self.model_name == 'svr':
best_model = NuSVR(C=self.best_hyper[0], nu=self.best_hyper[1], gamma=self.best_hyper[2], kernel='rbf')
# model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')
elif self.model_name == 'blr':
best_model = BayesianRidge(alpha_1=self.best_hyper[0], alpha_2=self.best_hyper[1],
lambda_1=self.best_hyper[2], lambda_2=self.best_hyper[3])
elif self.model_name == 'rf':
best_model = RandomForestRegressor(n_estimators=int(self.best_hyper[0]), max_features=self.best_hyper[1])
best_model.fit(xtrain_data, y_train)
self.best_model = best_model
def query(self, info):
data = self.prepare_data(info)
pred_on_test_set = self.best_model.predict(data)
return pred_on_test_set
def new_epoch(self, epoch):
if epoch < self.num_init:
# randomly sample initial architectures
model = torch.nn.Module() # hacky way to get arch and accuracy checkpointable
model.arch = self.search_space.clone()
model.arch.sample_random_architecture(dataset_api=self.dataset_api)
model.epoch = model.arch.get_max_epochs()
model.full_lc = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch+1,
dataset_api=self.dataset_api,
full_lc=True)
model.accuracy = model.full_lc[-1]
self.info.append(model.full_lc[:self.fidelity])
self.y_train.append(model.full_lc[self.extrapolation])
if not self.best_arch or model.accuracy > self.best_arch.accuracy:
self.best_arch = model
self._update_history(model)
else:
if len(self.nbhd) == 0 and self.chosen and self.best_arch.accuracy <= self.chosen.accuracy:
logger.info('Reached local minimum. Starting from new random architecture.')
model = torch.nn.Module() # hacky way to get arch and accuracy checkpointable
model.arch = self.search_space.clone()
model.arch.sample_random_architecture(dataset_api=self.dataset_api)
model.epoch = model.arch.get_max_epochs()
model.full_lc = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch + 1,
dataset_api=self.dataset_api,
full_lc=True)
model.accuracy = model.full_lc[-1]
self.info.append(model.full_lc[:self.fidelity])
self.y_train.append(model.full_lc[self.extrapolation])
self.train_svr = True
self.chosen = model
self.best_arch = model
self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)
else:
if len(self.nbhd) == 0:
logger.info('Start a new iteration. Pick the best architecture and evaluate its neighbors.')
if self.train_svr:
self.fit(self.y_train, self.info)
self.train_svr = False
self.chosen = self.best_arch
self.nbhd = self.chosen.arch.get_nbhd(dataset_api=self.dataset_api)
model = self.nbhd.pop()
model.epoch = self.fidelity
model.partial_lc = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch,
dataset_api=self.dataset_api,
full_lc=True)
model.accuracy = model.partial_lc[-1]
prediction = self.query(np.array(model.partial_lc).reshape(1, -1))
topk = np.sort(np.array(self.y_train))[-int(len(self.y_train) * self.top_n_percent):]
if prediction > min(topk):
model.epoch = model.arch.get_max_epochs()
model.full_lc = model.arch.query(self.performance_metric,
self.dataset,
epoch=model.epoch+1,
dataset_api=self.dataset_api,
full_lc=True)
self.info.append(model.full_lc[:self.fidelity])
self.y_train.append(model.full_lc[self.extrapolation])
self.train_svr = True
model.accuracy = model.full_lc[-1]
if model.accuracy > self.best_arch.accuracy:
self.best_arch = model
logger.info('Found new best architecture.')
self._update_history(model)
def _update_history(self, child):
self.history.append(child)
def train_statistics(self):
best_arch, best_arch_epoch = self.get_final_architecture()
latest_arch, latest_arch_epoch = self.get_latest_architecture()
return (
best_arch.query(Metric.TRAIN_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch-1),
best_arch.query(Metric.VAL_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),
best_arch.query(Metric.TEST_ACCURACY, self.dataset, dataset_api=self.dataset_api, epoch=best_arch_epoch),
latest_arch.query(Metric.TRAIN_TIME, self.dataset, dataset_api=self.dataset_api, epoch=latest_arch_epoch),
)
def test_statistics(self):
best_arch, epoch = self.get_final_architecture()
return best_arch.query(Metric.RAW, self.dataset, dataset_api=self.dataset_api, epoch=epoch)
def get_final_architecture(self):
# Returns the sampled architecture with the lowest validation error.
best_arch = max(self.history, key=lambda x: x.accuracy)
return best_arch.arch, best_arch.epoch
def get_latest_architecture(self):
# Returns the architecture from the most recent epoch
latest_arch = self.history[-1]
return latest_arch.arch, latest_arch.epoch
def get_op_optimizer(self):
raise NotImplementedError()
def get_checkpointables(self):
return {'model': self.history}
def get_model_size(self):
return count_parameters_in_MB(self.history) | [
"torch.nn.ModuleList",
"torch.nn.Module"
] | 1.5.0 | automl/nas-bench-x11 | ebf64ce3c30cc2ad0909508b5e25652011179956 |
1.4 | import os
import gym
import torch
import pprint
import argparse
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tianshou.utils import BasicLogger
from tianshou.env import DummyVectorEnv
from tianshou.utils.net.common import Net
from tianshou.data import Collector, VectorReplayBuffer
from tianshou.utils.net.discrete import Actor, Critic
from tianshou.policy import A2CPolicy, ImitationPolicy
from tianshou.trainer import onpolicy_trainer, offpolicy_trainer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='CartPole-v0')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--il-lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.9)
parser.add_argument('--epoch', type=int, default=10)
parser.add_argument('--step-per-epoch', type=int, default=50000)
parser.add_argument('--il-step-per-epoch', type=int, default=1000)
parser.add_argument('--episode-per-collect', type=int, default=16)
parser.add_argument('--step-per-collect', type=int, default=16)
parser.add_argument('--update-per-step', type=float, default=1 / 16)
parser.add_argument('--repeat-per-collect', type=int, default=1)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--hidden-sizes', type=int,
nargs='*', default=[64, 64])
parser.add_argument('--imitation-hidden-sizes', type=int,
nargs='*', default=[128])
parser.add_argument('--training-num', type=int, default=16)
parser.add_argument('--test-num', type=int, default=100)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
# a2c special
parser.add_argument('--vf-coef', type=float, default=0.5)
parser.add_argument('--ent-coef', type=float, default=0.0)
parser.add_argument('--max-grad-norm', type=float, default=None)
parser.add_argument('--gae-lambda', type=float, default=1.)
parser.add_argument('--rew-norm', action="store_true", default=False)
args = parser.parse_known_args()[0]
return args
def test_a2c_with_il(args=get_args()):
torch.set_num_threads(1) # for poor CPU
env = gym.make(args.task)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
# you can also use tianshou.env.SubprocVectorEnv
# train_envs = gym.make(args.task)
train_envs = DummyVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.training_num)])
# test_envs = gym.make(args.task)
test_envs = DummyVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.test_num)])
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net = Net(args.state_shape, hidden_sizes=args.hidden_sizes,
device=args.device)
actor = Actor(net, args.action_shape, device=args.device).to(args.device)
critic = Critic(net, device=args.device).to(args.device)
optim = torch.optim.Adam(set(
actor.parameters()).union(critic.parameters()), lr=args.lr)
dist = torch.distributions.Categorical
policy = A2CPolicy(
actor, critic, optim, dist,
discount_factor=args.gamma, gae_lambda=args.gae_lambda,
vf_coef=args.vf_coef, ent_coef=args.ent_coef,
max_grad_norm=args.max_grad_norm, reward_normalization=args.rew_norm,
action_space=env.action_space)
# collector
train_collector = Collector(
policy, train_envs,
VectorReplayBuffer(args.buffer_size, len(train_envs)),
exploration_noise=True)
test_collector = Collector(policy, test_envs)
# log
log_path = os.path.join(args.logdir, args.task, 'a2c')
writer = SummaryWriter(log_path)
logger = BasicLogger(writer)
def save_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
def stop_fn(mean_rewards):
return mean_rewards >= env.spec.reward_threshold
# trainer
result = onpolicy_trainer(
policy, train_collector, test_collector, args.epoch,
args.step_per_epoch, args.repeat_per_collect, args.test_num, args.batch_size,
episode_per_collect=args.episode_per_collect, stop_fn=stop_fn, save_fn=save_fn,
logger=logger)
assert stop_fn(result['best_reward'])
if __name__ == '__main__':
pprint.pprint(result)
# Let's watch its performance!
env = gym.make(args.task)
policy.eval()
collector = Collector(policy, env)
result = collector.collect(n_episode=1, render=args.render)
rews, lens = result["rews"], result["lens"]
print(f"Final reward: {rews.mean()}, length: {lens.mean()}")
policy.eval()
# here we define an imitation collector with a trivial policy
if args.task == 'CartPole-v0':
env.spec.reward_threshold = 190 # lower the goal
net = Net(args.state_shape, hidden_sizes=args.hidden_sizes,
device=args.device)
net = Actor(net, args.action_shape, device=args.device).to(args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.il_lr)
il_policy = ImitationPolicy(net, optim, mode='discrete')
il_test_collector = Collector(
il_policy,
DummyVectorEnv([lambda: gym.make(args.task) for _ in range(args.test_num)])
)
train_collector.reset()
result = offpolicy_trainer(
il_policy, train_collector, il_test_collector, args.epoch,
args.il_step_per_epoch, args.step_per_collect, args.test_num,
args.batch_size, stop_fn=stop_fn, save_fn=save_fn, logger=logger)
assert stop_fn(result['best_reward'])
if __name__ == '__main__':
pprint.pprint(result)
# Let's watch its performance!
env = gym.make(args.task)
il_policy.eval()
collector = Collector(il_policy, env)
result = collector.collect(n_episode=1, render=args.render)
rews, lens = result["rews"], result["lens"]
print(f"Final reward: {rews.mean()}, length: {lens.mean()}")
if __name__ == '__main__':
test_a2c_with_il()
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"torch.set_num_threads"
] | 1.4.0 | ultmaster/tianshou | 3ac67d9974b6bd3e3d7feac7738ca6de33b317c7 |
1.4 | #!/usr/bin/env python3
import os
import gym
import torch
import datetime
import argparse
import numpy as np
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.tensorboard import SummaryWriter
from torch.distributions import Independent, Normal
from tianshou.policy import PGPolicy
from tianshou.utils import BasicLogger
from tianshou.env import SubprocVectorEnv
from tianshou.utils.net.common import Net
from tianshou.trainer import onpolicy_trainer
from tianshou.utils.net.continuous import ActorProb
from tianshou.data import Collector, ReplayBuffer, VectorReplayBuffer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='HalfCheetah-v3')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--buffer-size', type=int, default=4096)
parser.add_argument('--hidden-sizes', type=int, nargs='*', default=[64, 64])
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=30000)
parser.add_argument('--step-per-collect', type=int, default=2048)
parser.add_argument('--repeat-per-collect', type=int, default=1)
# batch-size >> step-per-collect means caculating all data in one singe forward.
parser.add_argument('--batch-size', type=int, default=99999)
parser.add_argument('--training-num', type=int, default=64)
parser.add_argument('--test-num', type=int, default=10)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
parser.add_argument('--resume-path', type=str, default=None)
# reinforce special
parser.add_argument('--rew-norm', type=int, default=True)
# "clip" option also works well.
parser.add_argument('--action-bound-method', type=str, default="tanh")
parser.add_argument('--lr-decay', type=int, default=True)
return parser.parse_args()
def test_reinforce(args=get_args()):
env = gym.make(args.task)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.action_space.shape or env.action_space.n
args.max_action = env.action_space.high[0]
print("Observations shape:", args.state_shape)
print("Actions shape:", args.action_shape)
print("Action range:", np.min(env.action_space.low),
np.max(env.action_space.high))
# train_envs = gym.make(args.task)
train_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.training_num)],
norm_obs=True)
# test_envs = gym.make(args.task)
test_envs = SubprocVectorEnv(
[lambda: gym.make(args.task) for _ in range(args.test_num)],
norm_obs=True, obs_rms=train_envs.obs_rms, update_obs_rms=False)
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net_a = Net(args.state_shape, hidden_sizes=args.hidden_sizes,
activation=nn.Tanh, device=args.device)
actor = ActorProb(net_a, args.action_shape, max_action=args.max_action,
unbounded=True, device=args.device).to(args.device)
torch.nn.init.constant_(actor.sigma_param, -0.5)
for m in actor.modules():
if isinstance(m, torch.nn.Linear):
# orthogonal initialization
torch.nn.init.orthogonal_(m.weight, gain=np.sqrt(2))
torch.nn.init.zeros_(m.bias)
# do last policy layer scaling, this will make initial actions have (close to)
# 0 mean and std, and will help boost performances,
# see https://arxiv.org/abs/2006.05990, Fig.24 for details
for m in actor.mu.modules():
if isinstance(m, torch.nn.Linear):
torch.nn.init.zeros_(m.bias)
m.weight.data.copy_(0.01 * m.weight.data)
optim = torch.optim.Adam(actor.parameters(), lr=args.lr)
lr_scheduler = None
if args.lr_decay:
# decay learning rate to 0 linearly
max_update_num = np.ceil(
args.step_per_epoch / args.step_per_collect) * args.epoch
lr_scheduler = LambdaLR(
optim, lr_lambda=lambda epoch: 1 - epoch / max_update_num)
def dist(*logits):
return Independent(Normal(*logits), 1)
policy = PGPolicy(actor, optim, dist, discount_factor=args.gamma,
reward_normalization=args.rew_norm, action_scaling=True,
action_bound_method=args.action_bound_method,
lr_scheduler=lr_scheduler, action_space=env.action_space)
# collector
if args.training_num > 1:
buffer = VectorReplayBuffer(args.buffer_size, len(train_envs))
else:
buffer = ReplayBuffer(args.buffer_size)
train_collector = Collector(policy, train_envs, buffer, exploration_noise=True)
test_collector = Collector(policy, test_envs)
# log
t0 = datetime.datetime.now().strftime("%m%d_%H%M%S")
log_file = f'seed_{args.seed}_{t0}-{args.task.replace("-", "_")}_reinforce'
log_path = os.path.join(args.logdir, args.task, 'reinforce', log_file)
writer = SummaryWriter(log_path)
writer.add_text("args", str(args))
logger = BasicLogger(writer, update_interval=10)
def save_fn(policy):
torch.save(policy.state_dict(), os.path.join(log_path, 'policy.pth'))
# trainer
result = onpolicy_trainer(
policy, train_collector, test_collector, args.epoch, args.step_per_epoch,
args.repeat_per_collect, args.test_num, args.batch_size,
step_per_collect=args.step_per_collect, save_fn=save_fn, logger=logger,
test_in_train=False)
# Let's watch its performance!
policy.eval()
test_envs.seed(args.seed)
test_collector.reset()
result = test_collector.collect(n_episode=args.test_num, render=args.render)
print(f'Final reward: {result["rews"].mean()}, length: {result["lens"].mean()}')
if __name__ == '__main__':
test_reinforce()
| [
"torch.nn.init.constant_",
"torch.distributions.Normal",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.init.zeros_",
"torch.utils.tensorboard.SummaryWriter"
] | 1.4.0 | ultmaster/tianshou | 3ac67d9974b6bd3e3d7feac7738ca6de33b317c7 |
1.4 | import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.utils.data.distributed
import numpy as np
import pdb
kernel_size = 5
class Discriminator(nn.Module):
"""
Known to work well as a GAN discriminator
"""
def __init__(self, num_classes=1, args=None):
super().__init__()
#self.embed_size = 1
#s0 = self.s0 = args.smallest_res
nf = self.nf = 64 #args.ndf
#nf_max = self.nf_max = args.ndf_max
# Submodules
nlayers = 1
self.nf0 = nf * 2**nlayers
blocks = [
ResnetBlock(nf, nf),
ResnetBlock(nf, nf),
#ResnetBlock(nf, nf),
]
for i in range(nlayers):
nf0 = nf * 2**i
nf1 = nf * 2**(i+1)
blocks += [
#nn.AvgPool2d(2, stride=2, padding=0),
nn.MaxPool2d(4, stride=4, padding=0),
ResnetBlock(nf0, nf1),
ResnetBlock(nf1, nf1),
#ResnetBlock(nf1, nf1),
]
# Initial up-channeling conv
self.conv_img = nn.Conv2d(3, 1*nf, kernel_size=kernel_size, padding=kernel_size//2)
self.resnet = nn.Sequential(*blocks)
# Final stage is standard avg-pool followed by linear
self.pool_max = nn.MaxPool2d(4, stride=4, padding=0)
self.pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(self.nf0, num_classes)
self.norm = nn.InstanceNorm2d(3, affine=False, eps=0.0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
batch_size = x.size(0)
out = x
out = self.norm(out)
#pdb.set_trace()
out = self.conv_img(out)
out = self.resnet(out)
out = self.pool_max(out)
out = self.pool(out)
out = out.view(batch_size, self.nf0)
out = self.fc(actvn(out))
return out
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, fhidden=None):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
# Submodules
self.norm_0 = nn.GroupNorm(self.fin//32, self.fin)
self.conv_0 = nn.Conv2d(self.fin, self.fhidden,
kernel_size, stride=1, padding=kernel_size//2, bias=False)
self.norm_1 = nn.GroupNorm(self.fhidden//32, self.fhidden)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout,
kernel_size, stride=1, padding=kernel_size//2, bias=False)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout,
1, stride=1, padding=0, bias=False)
def forward(self, x):
x_s = self._shortcut(x)
dx = self.conv_0(actvn(self.norm_0(x)))
dx = self.conv_1(actvn(self.norm_1(dx)))
out = x_s + dx
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
def actvn(x):
return F.relu(x)
#return F.leaky_relu(x, 2e-1)
| [
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.init.kaiming_normal_",
"torch.nn.GroupNorm",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"torch.nn.functional.relu"
] | 1.4.0 | sbhadra2020/fastMRI | a2b25fed53621c5d5c648993af13971b2d365fc3 |
1.7 | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multiple choice model."""
import torch
from megatron import get_args, print_rank_last
from megatron import mpu
from megatron.model.bert_model import bert_attention_mask_func, bert_extended_attention_mask, bert_position_ids
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
class MultipleChoiceBase(MegatronModule):
def __init__(self, num_tokentypes=2):
super(MultipleChoiceBase, self).__init__(share_word_embeddings=False)
args = get_args()
init_method = init_method_normal(args.init_method_std)
self.language_model, self._language_model_key = get_language_model(
attention_mask_func=bert_attention_mask_func,
num_tokentypes=num_tokentypes,
add_pooler=True,
init_method=init_method,
scaled_init_method=scaled_init_method_normal(args.init_method_std,
args.num_layers))
# Multi-choice head.
if mpu.is_pipeline_last_stage():
self.multichoice_dropout = torch.nn.Dropout(args.hidden_dropout)
self.multichoice_head = get_linear_layer(args.hidden_size, 1,
init_method)
self._multichoice_head_key = 'multichoice_head'
def forward(self, model_input, attention_mask, tokentype_ids=None):
# [batch, choices, sequence] --> [batch * choices, sequence] -->
# transformer --> [batch, choices] --> softmax
# Ensure the shape is [batch-size, choices, sequence]
assert len(attention_mask.shape) == 3
num_choices = attention_mask.shape[1]
# Reshape and treat choice dimension the same as batch.
attention_mask = attention_mask.view(-1, attention_mask.size(-1))
extended_attention_mask = bert_extended_attention_mask(attention_mask)
kwargs = {}
if mpu.is_pipeline_first_stage():
input_ids = model_input
# Do the same as attention_mask for input_ids, tokentype_ids
assert len(input_ids.shape) == 3
assert len(tokentype_ids.shape) == 3
input_ids = input_ids.view(-1, input_ids.size(-1))
tokentype_ids = tokentype_ids.view(-1, tokentype_ids.size(-1))
position_ids = bert_position_ids(input_ids)
args = [input_ids, position_ids, extended_attention_mask]
kwargs['tokentype_ids'] = tokentype_ids
else:
args = [model_input, extended_attention_mask]
lm_output = self.language_model(*args, **kwargs)
if mpu.is_pipeline_last_stage():
_, pooled_output = lm_output
multichoice_output = self.multichoice_dropout(pooled_output)
multichoice_logits = self.multichoice_head(multichoice_output)
# Reshape back to separate choices.
multichoice_logits = multichoice_logits.view(-1, num_choices)
return multichoice_logits
return lm_output
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(
destination, prefix, keep_vars)
if mpu.is_pipeline_last_stage():
state_dict_[self._multichoice_head_key] \
= self.multichoice_head.state_dict(
destination, prefix, keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if mpu.is_pipeline_last_stage():
if self._multichoice_head_key in state_dict:
self.multichoice_head.load_state_dict(
state_dict[self._multichoice_head_key], strict=strict)
else:
print_rank_last('***WARNING*** could not find {} in the checkpoint, '
'initializing to random'.format(
self._multichoice_head_key))
class MultipleChoice(MultipleChoiceBase):
def __init__(self, num_tokentypes=2):
super(MultipleChoice, self).__init__(
num_tokentypes=num_tokentypes)
def forward(self, input_ids, attention_mask,
tokentype_ids=None):
return super(MultipleChoice, self).forward(
input_ids,
attention_mask,
tokentype_ids=tokentype_ids)
class MultipleChoiceFirstStage(MultipleChoiceBase):
def __init__(self, num_tokentypes=2):
super(MultipleChoiceFirstStage, self).__init__(
num_tokentypes=num_tokentypes)
def forward(self, input_ids, attention_mask,
tokentype_ids=None):
return super(MultipleChoiceFirstStage, self).forward(
input_ids,
attention_mask,
tokentype_ids=tokentype_ids)
class MultipleChoiceIntermediateStage(MultipleChoiceBase):
def __init__(self, num_tokentypes=2):
super(MultipleChoiceIntermediateStage, self).__init__(
num_tokentypes=num_tokentypes)
def forward(self, hidden_state, attention_mask):
return super(MultipleChoiceIntermediateStage, self).forward(
hidden_state,
attention_mask)
class MultipleChoiceLastStage(MultipleChoiceBase):
def __init__(self, num_tokentypes=2):
super(MultipleChoiceLastStage, self).__init__(
num_tokentypes=num_tokentypes)
def forward(self, hidden_state, attention_mask):
return super(MultipleChoiceLastStage, self).forward(
hidden_state,
attention_mask)
| [
"torch.nn.Dropout"
] | 1.7.0 | thu-pacman/AIPerf-MoE | fda4f381b4b974721b187cece968dd7bc96a81f4 |
1.7 | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron global variables."""
import os
import sys
import time
import torch
from megatron.tokenizer import build_tokenizer
from .arguments import parse_args
from .microbatches import build_num_microbatches_calculator
_GLOBAL_ARGS = None
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
_GLOBAL_TOKENIZER = None
_GLOBAL_TENSORBOARD_WRITER = None
_GLOBAL_ADLR_AUTORESUME = None
_GLOBAL_TIMERS = None
def get_args():
"""Return arguments."""
_ensure_var_is_initialized(_GLOBAL_ARGS, 'args')
return _GLOBAL_ARGS
def get_num_microbatches():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get()
def get_current_global_batch_size():
return _GLOBAL_NUM_MICROBATCHES_CALCULATOR.get_current_global_batch_size()
def update_num_microbatches(consumed_samples, consistency_check=True):
_GLOBAL_NUM_MICROBATCHES_CALCULATOR.update(consumed_samples,
consistency_check)
def get_tokenizer():
"""Return tokenizer."""
_ensure_var_is_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
return _GLOBAL_TOKENIZER
def get_tensorboard_writer():
"""Return tensorboard writer. It can be None so no need
to check if it is initialized."""
return _GLOBAL_TENSORBOARD_WRITER
def get_adlr_autoresume():
"""ADLR autoresume object. It can be None so no need
to check if it is initialized."""
return _GLOBAL_ADLR_AUTORESUME
def get_timers():
"""Return timers."""
_ensure_var_is_initialized(_GLOBAL_TIMERS, 'timers')
return _GLOBAL_TIMERS
def set_global_variables(extra_args_provider=None, args_defaults={},
ignore_unknown_args=False):
"""Set args, tokenizer, tensorboard-writer, adlr-autoresume, and timers."""
args = _parse_args(extra_args_provider=extra_args_provider,
defaults=args_defaults,
ignore_unknown_args=ignore_unknown_args)
_build_num_microbatches_calculator(args)
_ = _build_tokenizer(args)
_set_tensorboard_writer(args)
_set_adlr_autoresume(args)
_set_timers()
def _parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
"""Parse entire arguments."""
global _GLOBAL_ARGS
_ensure_var_is_not_initialized(_GLOBAL_ARGS, 'args')
_GLOBAL_ARGS = parse_args(extra_args_provider=extra_args_provider,
defaults=defaults,
ignore_unknown_args=ignore_unknown_args)
return _GLOBAL_ARGS
def _build_num_microbatches_calculator(args):
global _GLOBAL_NUM_MICROBATCHES_CALCULATOR
_ensure_var_is_not_initialized(_GLOBAL_NUM_MICROBATCHES_CALCULATOR,
'num microbatches calculator')
_GLOBAL_NUM_MICROBATCHES_CALCULATOR = build_num_microbatches_calculator(
args)
def _build_tokenizer(args):
"""Initialize tokenizer."""
global _GLOBAL_TOKENIZER
_ensure_var_is_not_initialized(_GLOBAL_TOKENIZER, 'tokenizer')
_GLOBAL_TOKENIZER = build_tokenizer(args)
return _GLOBAL_TOKENIZER
def rebuild_tokenizer(args):
global _GLOBAL_TOKENIZER
_GLOBAL_TOKENIZER = None
return _build_tokenizer(args)
def _set_tensorboard_writer(args):
"""Set tensorboard writer."""
global _GLOBAL_TENSORBOARD_WRITER
_ensure_var_is_not_initialized(_GLOBAL_TENSORBOARD_WRITER,
'tensorboard writer')
if hasattr(args, 'tensorboard_dir') and \
args.tensorboard_dir and args.rank == (args.world_size -1):
try:
from torch.utils.tensorboard import SummaryWriter
print('> setting tensorboard ...')
_GLOBAL_TENSORBOARD_WRITER = SummaryWriter(
log_dir=args.tensorboard_dir)
except ModuleNotFoundError:
print('WARNING: TensorBoard writing requested but is not '
'available (are you using PyTorch 1.1.0 or later?), '
'no TensorBoard logs will be written.', flush=True)
def _set_adlr_autoresume(args):
"""Initialize ADLR autoresume."""
global _GLOBAL_ADLR_AUTORESUME
_ensure_var_is_not_initialized(_GLOBAL_ADLR_AUTORESUME, 'adlr autoresume')
if args.adlr_autoresume:
if args.rank == 0:
print('enabling autoresume ...', flush=True)
sys.path.append(os.environ.get('SUBMIT_SCRIPTS', '.'))
try:
from userlib.auto_resume import AutoResume
except BaseException:
print('ADLR autoresume is not available, exiting ...')
sys.exit()
_GLOBAL_ADLR_AUTORESUME = AutoResume
def _set_timers():
"""Initialize timers."""
global _GLOBAL_TIMERS
_ensure_var_is_not_initialized(_GLOBAL_TIMERS, 'timers')
_GLOBAL_TIMERS = Timers()
def _ensure_var_is_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is not None, '{} is not initialized.'.format(name)
def _ensure_var_is_not_initialized(var, name):
"""Make sure the input variable is not None."""
assert var is None, '{} is already initialized.'.format(name)
class _Timer:
"""Timer."""
def __init__(self, name):
self.name_ = name
self.elapsed_ = 0.0
self.started_ = False
self.start_time = time.time()
def start(self):
"""Start the timer."""
assert not self.started_, 'timer has already been started'
torch.cuda.synchronize()
self.start_time = time.time()
self.started_ = True
def stop(self):
"""Stop the timer."""
assert self.started_, 'timer is not started'
torch.cuda.synchronize()
self.elapsed_ += (time.time() - self.start_time)
self.started_ = False
def reset(self):
"""Reset timer."""
self.elapsed_ = 0.0
self.started_ = False
def elapsed(self, reset=True):
"""Calculate the elapsed time."""
started_ = self.started_
# If the timing in progress, end it first.
if self.started_:
self.stop()
# Get the elapsed time.
elapsed_ = self.elapsed_
# Reset the elapsed time
if reset:
self.reset()
# If timing was in progress, set it back.
if started_:
self.start()
return elapsed_
class Timers:
"""Group of timers."""
def __init__(self):
self.timers = {}
def __call__(self, name):
if name not in self.timers:
self.timers[name] = _Timer(name)
return self.timers[name]
def write(self, names, writer, iteration, normalizer=1.0, reset=False):
"""Write timers to a tensorboard writer"""
# currently when using add_scalars,
# torch.utils.add_scalars makes each timer its own run, which
# polutes the runs list, so we just add each as a scalar
assert normalizer > 0.0
for name in names:
value = self.timers[name].elapsed(reset=reset) / normalizer
writer.add_scalar(name + '-time', value, iteration)
def log(self, names, normalizer=1.0, reset=True):
"""Log a group of timers."""
assert normalizer > 0.0
string = 'time (ms)'
for name in names:
elapsed_time = self.timers[name].elapsed(
reset=reset) * 1000.0 / normalizer
string += ' | {}: {:.2f}'.format(name, elapsed_time)
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == (
torch.distributed.get_world_size() - 1):
print(string, flush=True)
else:
print(string, flush=True)
| [
"torch.distributed.get_world_size",
"torch.cuda.synchronize",
"torch.distributed.is_initialized",
"torch.distributed.get_rank",
"torch.utils.tensorboard.SummaryWriter"
] | 1.7.0 | thu-pacman/AIPerf-MoE | fda4f381b4b974721b187cece968dd7bc96a81f4 |
1.6 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pickle
from typing import List, Optional
from unittest import mock
import cloudpickle
import numpy as np
import pytest
import torch
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.helpers.simple_models import ClassificationModel
_logger = logging.getLogger(__name__)
class EarlyStoppingTestRestore(EarlyStopping):
# this class has to be defined outside the test function, otherwise we get pickle error
def __init__(self, expected_state, *args, **kwargs):
super().__init__(*args, **kwargs)
self.expected_state = expected_state
# cache the state for each epoch
self.saved_states = []
def on_train_start(self, trainer, pl_module):
if self.expected_state:
assert self.on_save_checkpoint(trainer, pl_module, {}) == self.expected_state
def on_train_epoch_end(self, trainer, pl_module):
super().on_train_epoch_end(trainer, pl_module)
self.saved_states.append(self.on_save_checkpoint(trainer, pl_module, {}).copy())
def test_resume_early_stopping_from_checkpoint(tmpdir):
"""
Prevent regressions to bugs:
https://github.com/PyTorchLightning/pytorch-lightning/issues/1464
https://github.com/PyTorchLightning/pytorch-lightning/issues/1463
"""
seed_everything(42)
model = ClassificationModel()
dm = ClassifDataModule()
checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor="train_loss", save_top_k=1)
early_stop_callback = EarlyStoppingTestRestore(None, monitor="train_loss")
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[early_stop_callback, checkpoint_callback],
num_sanity_val_steps=0,
max_epochs=4,
)
trainer.fit(model, datamodule=dm)
assert len(early_stop_callback.saved_states) == 4
checkpoint_filepath = checkpoint_callback.kth_best_model_path
# ensure state is persisted properly
checkpoint = torch.load(checkpoint_filepath)
# the checkpoint saves "epoch + 1"
early_stop_callback_state = early_stop_callback.saved_states[checkpoint["epoch"] - 1]
assert 4 == len(early_stop_callback.saved_states)
assert checkpoint["callbacks"]["EarlyStoppingTestRestore"] == early_stop_callback_state
# ensure state is reloaded properly (assertion in the callback)
early_stop_callback = EarlyStoppingTestRestore(early_stop_callback_state, monitor="train_loss")
new_trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
resume_from_checkpoint=checkpoint_filepath,
callbacks=[early_stop_callback],
)
with pytest.raises(MisconfigurationException, match=r"You restored a checkpoint with current_epoch"):
new_trainer.fit(model)
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
def test_early_stopping_no_extraneous_invocations(tmpdir):
"""Test to ensure that callback methods aren't being invoked outside of the callback handler."""
model = ClassificationModel()
dm = ClassifDataModule()
early_stop_callback = EarlyStopping(monitor="train_loss")
expected_count = 4
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[early_stop_callback],
limit_train_batches=4,
limit_val_batches=4,
max_epochs=expected_count,
)
trainer.fit(model, datamodule=dm)
assert trainer.early_stopping_callback == early_stop_callback
assert trainer.early_stopping_callbacks == [early_stop_callback]
assert len(trainer.dev_debugger.early_stopping_history) == expected_count
@pytest.mark.parametrize(
"loss_values, patience, expected_stop_epoch",
[([6, 5, 5, 5, 5, 5], 3, 4), ([6, 5, 4, 4, 3, 3], 1, 3), ([6, 5, 6, 5, 5, 5], 3, 4)],
)
def test_early_stopping_patience(tmpdir, loss_values: list, patience: int, expected_stop_epoch: int):
"""Test to ensure that early stopping is not triggered before patience is exhausted."""
class ModelOverrideValidationReturn(BoringModel):
validation_return_values = torch.tensor(loss_values)
def validation_epoch_end(self, outputs):
loss = self.validation_return_values[self.current_epoch]
self.log("test_val_loss", loss)
model = ModelOverrideValidationReturn()
early_stop_callback = EarlyStopping(monitor="test_val_loss", patience=patience, verbose=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[early_stop_callback],
val_check_interval=1.0,
num_sanity_val_steps=0,
max_epochs=10,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
assert trainer.current_epoch == expected_stop_epoch
@pytest.mark.parametrize("validation_step_none", [True, False])
@pytest.mark.parametrize(
"loss_values, patience, expected_stop_epoch",
[([6, 5, 5, 5, 5, 5], 3, 4), ([6, 5, 4, 4, 3, 3], 1, 3), ([6, 5, 6, 5, 5, 5], 3, 4)],
)
def test_early_stopping_patience_train(
tmpdir, validation_step_none: bool, loss_values: list, patience: int, expected_stop_epoch: int
):
"""Test to ensure that early stopping is not triggered before patience is exhausted."""
class ModelOverrideTrainReturn(BoringModel):
train_return_values = torch.tensor(loss_values)
def training_epoch_end(self, outputs):
loss = self.train_return_values[self.current_epoch]
self.log("train_loss", loss)
model = ModelOverrideTrainReturn()
if validation_step_none:
model.validation_step = None
early_stop_callback = EarlyStopping(
monitor="train_loss", patience=patience, verbose=True, check_on_train_epoch_end=True
)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[early_stop_callback],
num_sanity_val_steps=0,
max_epochs=10,
progress_bar_refresh_rate=0,
)
trainer.fit(model)
assert trainer.current_epoch == expected_stop_epoch
def test_pickling(tmpdir):
early_stopping = EarlyStopping()
early_stopping_pickled = pickle.dumps(early_stopping)
early_stopping_loaded = pickle.loads(early_stopping_pickled)
assert vars(early_stopping) == vars(early_stopping_loaded)
early_stopping_pickled = cloudpickle.dumps(early_stopping)
early_stopping_loaded = cloudpickle.loads(early_stopping_pickled)
assert vars(early_stopping) == vars(early_stopping_loaded)
def test_early_stopping_no_val_step(tmpdir):
"""Test that early stopping callback falls back to training metrics when no validation defined."""
model = ClassificationModel()
dm = ClassifDataModule()
model.validation_step = None
model.val_dataloader = None
stopping = EarlyStopping(monitor="train_loss", min_delta=0.1, patience=0, check_on_train_epoch_end=True)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[stopping], overfit_batches=0.20, max_epochs=10)
trainer.fit(model, datamodule=dm)
assert trainer.state.finished, f"Training failed with {trainer.state}"
assert trainer.current_epoch < trainer.max_epochs - 1
@pytest.mark.parametrize(
"stopping_threshold,divergence_theshold,losses,expected_epoch",
[
(None, None, [8, 4, 2, 3, 4, 5, 8, 10], 5),
(2.9, None, [9, 8, 7, 6, 5, 6, 4, 3, 2, 1], 8),
(None, 15.9, [9, 4, 2, 16, 32, 64], 3),
],
)
def test_early_stopping_thresholds(tmpdir, stopping_threshold, divergence_theshold, losses, expected_epoch):
class CurrentModel(BoringModel):
def validation_epoch_end(self, outputs):
val_loss = losses[self.current_epoch]
self.log("abc", val_loss)
model = CurrentModel()
early_stopping = EarlyStopping(
monitor="abc", stopping_threshold=stopping_threshold, divergence_threshold=divergence_theshold
)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[early_stopping], overfit_batches=0.20, max_epochs=20)
trainer.fit(model)
assert trainer.current_epoch == expected_epoch, "early_stopping failed"
@pytest.mark.parametrize("stop_value", [torch.tensor(np.inf), torch.tensor(np.nan)])
def test_early_stopping_on_non_finite_monitor(tmpdir, stop_value):
losses = [4, 3, stop_value, 2, 1]
expected_stop_epoch = 2
class CurrentModel(BoringModel):
def validation_epoch_end(self, outputs):
val_loss = losses[self.current_epoch]
self.log("val_loss", val_loss)
model = CurrentModel()
early_stopping = EarlyStopping(monitor="val_loss", check_finite=True)
trainer = Trainer(default_root_dir=tmpdir, callbacks=[early_stopping], overfit_batches=0.20, max_epochs=10)
trainer.fit(model)
assert trainer.current_epoch == expected_stop_epoch
assert early_stopping.stopped_epoch == expected_stop_epoch
@pytest.mark.parametrize("step_freeze, min_steps, min_epochs", [(5, 1, 1), (5, 1, 3), (3, 15, 1)])
def test_min_steps_override_early_stopping_functionality(tmpdir, step_freeze: int, min_steps: int, min_epochs: int):
"""Excepted Behaviour:
IF `min_steps` was set to a higher value than the `trainer.global_step` when `early_stopping` is being triggered,
THEN the trainer should continue until reaching `trainer.global_step` == `min_steps`, and stop.
IF `min_epochs` resulted in a higher number of steps than the `trainer.global_step`
when `early_stopping` is being triggered,
THEN the trainer should continue until reaching
`trainer.global_step` == `min_epochs * len(train_dataloader)`, and stop.
This test validate this expected behaviour
IF both `min_epochs` and `min_steps` are provided and higher than the `trainer.global_step`
when `early_stopping` is being triggered,
THEN the highest between `min_epochs * len(train_dataloader)` and `min_steps` would be reached.
Caveat: IF min_steps is divisible by len(train_dataloader), then it will do min_steps + len(train_dataloader)
This test validate those expected behaviours
"""
_logger.disabled = True
original_loss_value = 10
limit_train_batches = 3
patience = 3
class Model(BoringModel):
def __init__(self, step_freeze):
super().__init__()
self._step_freeze = step_freeze
self._loss_value = 10.0
self._eps = 1e-1
self._count_decrease = 0
self._values = []
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
return {"test_val_loss": self._loss_value}
def validation_epoch_end(self, outputs):
_mean = np.mean([x["test_val_loss"] for x in outputs])
if self.trainer.global_step <= self._step_freeze:
self._count_decrease += 1
self._loss_value -= self._eps
self._values.append(_mean)
self.log("test_val_loss", _mean)
model = Model(step_freeze)
model.training_step_end = None
model.test_dataloader = None
early_stop_callback = EarlyStopping(monitor="test_val_loss", patience=patience, verbose=True)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=[early_stop_callback],
limit_train_batches=limit_train_batches,
limit_val_batches=2,
min_steps=min_steps,
min_epochs=min_epochs,
)
trainer.fit(model)
# Make sure loss was properly decreased
assert abs(original_loss_value - (model._count_decrease) * model._eps - model._loss_value) < 1e-6
pos_diff = (np.diff(model._values) == 0).nonzero()[0][0]
# Compute when the latest validation epoch end happened
latest_validation_epoch_end = (pos_diff // limit_train_batches) * limit_train_batches
if pos_diff % limit_train_batches == 0:
latest_validation_epoch_end += limit_train_batches
# Compute early stopping latest step
by_early_stopping = latest_validation_epoch_end + (1 + limit_train_batches) * patience
# Compute min_epochs latest step
by_min_epochs = min_epochs * limit_train_batches
# Make sure the trainer stops for the max of all minimum requirements
assert trainer.global_step == max(min_steps, by_early_stopping, by_min_epochs), (
trainer.global_step,
max(min_steps, by_early_stopping, by_min_epochs),
step_freeze,
min_steps,
min_epochs,
)
_logger.disabled = False
def test_early_stopping_mode_options():
with pytest.raises(MisconfigurationException, match="`mode` can be .* got unknown_option"):
EarlyStopping(mode="unknown_option")
class EarlyStoppingModel(BoringModel):
def __init__(self, expected_end_epoch: int, early_stop_on_train: bool):
super().__init__()
self.expected_end_epoch = expected_end_epoch
self.early_stop_on_train = early_stop_on_train
def _epoch_end(self) -> None:
losses = [8, 4, 2, 3, 4, 5, 8, 10]
loss = losses[self.current_epoch]
self.log("abc", torch.tensor(loss))
self.log("cba", torch.tensor(0))
def training_epoch_end(self, outputs):
if not self.early_stop_on_train:
return
self._epoch_end()
def validation_epoch_end(self, outputs):
if self.early_stop_on_train:
return
self._epoch_end()
def on_train_end(self) -> None:
assert self.trainer.current_epoch == self.expected_end_epoch, "Early Stopping Failed"
_ES_CHECK = dict(check_on_train_epoch_end=True)
_ES_CHECK_P3 = dict(patience=3, check_on_train_epoch_end=True)
_NO_WIN = dict(marks=RunIf(skip_windows=True))
@pytest.mark.parametrize(
"callbacks, expected_stop_epoch, check_on_train_epoch_end, accelerator, num_processes",
[
([EarlyStopping("abc"), EarlyStopping("cba", patience=3)], 3, False, None, 1),
([EarlyStopping("cba", patience=3), EarlyStopping("abc")], 3, False, None, 1),
pytest.param([EarlyStopping("abc"), EarlyStopping("cba", patience=3)], 3, False, "ddp_cpu", 2, **_NO_WIN),
pytest.param([EarlyStopping("cba", patience=3), EarlyStopping("abc")], 3, False, "ddp_cpu", 2, **_NO_WIN),
([EarlyStopping("abc", **_ES_CHECK), EarlyStopping("cba", **_ES_CHECK_P3)], 3, True, None, 1),
([EarlyStopping("cba", **_ES_CHECK_P3), EarlyStopping("abc", **_ES_CHECK)], 3, True, None, 1),
pytest.param(
[EarlyStopping("abc", **_ES_CHECK), EarlyStopping("cba", **_ES_CHECK_P3)], 3, True, "ddp_cpu", 2, **_NO_WIN
),
pytest.param(
[EarlyStopping("cba", **_ES_CHECK_P3), EarlyStopping("abc", **_ES_CHECK)], 3, True, "ddp_cpu", 2, **_NO_WIN
),
],
)
def test_multiple_early_stopping_callbacks(
tmpdir,
callbacks: List[EarlyStopping],
expected_stop_epoch: int,
check_on_train_epoch_end: bool,
accelerator: Optional[str],
num_processes: int,
):
"""Ensure when using multiple early stopping callbacks we stop if any signals we should stop."""
model = EarlyStoppingModel(expected_stop_epoch, check_on_train_epoch_end)
trainer = Trainer(
default_root_dir=tmpdir,
callbacks=callbacks,
overfit_batches=0.20,
max_epochs=20,
accelerator=accelerator,
num_processes=num_processes,
)
trainer.fit(model)
| [
"torch.tensor",
"torch.load"
] | 1.6 | Aiden-Jeon/pytorch-lightning | 963c26764682fa4cf64c93c5a7572ae0040e9c32 |
1.4 | from collections import namedtuple
from functools import partial
import pytest
import torch
from skimage.metrics import structural_similarity
from pytorch_lightning.metrics.functional import ssim
from pytorch_lightning.metrics.regression import SSIM
from tests.metrics.utils import BATCH_SIZE, MetricTester, NUM_BATCHES
torch.manual_seed(42)
Input = namedtuple('Input', ["preds", "target", "multichannel"])
_inputs = []
for size, channel, coef, multichannel, dtype in [
(12, 3, 0.9, True, torch.float),
(13, 1, 0.8, False, torch.float32),
(14, 1, 0.7, False, torch.double),
(15, 3, 0.6, True, torch.float64),
]:
preds = torch.rand(NUM_BATCHES, BATCH_SIZE, channel, size, size, dtype=dtype)
_inputs.append(Input(
preds=preds,
target=preds * coef,
multichannel=multichannel,
))
def _sk_metric(preds, target, data_range, multichannel):
c, h, w = preds.shape[-3:]
sk_preds = preds.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
sk_target = target.view(-1, c, h, w).permute(0, 2, 3, 1).numpy()
if not multichannel:
sk_preds = sk_preds[:, :, :, 0]
sk_target = sk_target[:, :, :, 0]
return structural_similarity(
sk_target,
sk_preds,
data_range=data_range,
multichannel=multichannel,
gaussian_weights=True,
win_size=11,
sigma=1.5,
use_sample_covariance=False
)
@pytest.mark.parametrize(
"preds, target, multichannel",
[(i.preds, i.target, i.multichannel) for i in _inputs],
)
class TestSSIM(MetricTester):
atol = 6e-5
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
def test_ssim(self, preds, target, multichannel, ddp, dist_sync_on_step):
self.run_class_metric_test(
ddp,
preds,
target,
SSIM,
partial(_sk_metric, data_range=1.0, multichannel=multichannel),
metric_args={"data_range": 1.0},
dist_sync_on_step=dist_sync_on_step,
)
def test_ssim_functional(self, preds, target, multichannel):
self.run_functional_metric_test(
preds,
target,
ssim,
partial(_sk_metric, data_range=1.0, multichannel=multichannel),
metric_args={"data_range": 1.0},
)
@pytest.mark.parametrize(
['pred', 'target', 'kernel', 'sigma'],
[
pytest.param([1, 16, 16], [1, 16, 16], [11, 11], [1.5, 1.5]), # len(shape)
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5]), # len(kernel), len(sigma)
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5, 1.5]), # len(kernel), len(sigma)
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11], [1.5]), # len(kernel), len(sigma)
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, 1.5]), # invalid kernel input
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 10], [1.5, 1.5]), # invalid kernel input
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, -11], [1.5, 1.5]), # invalid kernel input
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 11], [1.5, 0]), # invalid sigma input
pytest.param([1, 1, 16, 16], [1, 1, 16, 16], [11, 0], [1.5, -1.5]), # invalid sigma input
],
)
def test_ssim_invalid_inputs(pred, target, kernel, sigma):
pred_t = torch.rand(pred)
target_t = torch.rand(target, dtype=torch.float64)
with pytest.raises(TypeError):
ssim(pred_t, target_t)
pred = torch.rand(pred)
target = torch.rand(target)
with pytest.raises(ValueError):
ssim(pred, target, kernel, sigma)
| [
"torch.manual_seed",
"torch.rand"
] | 1.4 | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
from pytorch_lightning.metrics.functional.precision_recall_curve import (
_binary_clf_curve,
_precision_recall_curve_update,
)
def _roc_update(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
return _precision_recall_curve_update(preds, target, num_classes, pos_label)
def _roc_compute(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: int,
pos_label: int,
sample_weights: Optional[Sequence] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],
List[torch.Tensor]]]:
if num_classes == 1:
fps, tps, thresholds = _binary_clf_curve(
preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label
)
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
thresholds = torch.cat([thresholds[0][None] + 1, thresholds])
if fps[-1] <= 0:
raise ValueError("No negative samples in targets, false positive value should be meaningless")
fpr = fps / fps[-1]
if tps[-1] <= 0:
raise ValueError("No positive samples in targets, true positive value should be meaningless")
tpr = tps / tps[-1]
return fpr, tpr, thresholds
# Recursively call per class
fpr, tpr, thresholds = [], [], []
for c in range(num_classes):
preds_c = preds[:, c]
res = roc(
preds=preds_c,
target=target,
num_classes=1,
pos_label=c,
sample_weights=sample_weights,
)
fpr.append(res[0])
tpr.append(res[1])
thresholds.append(res[2])
return fpr, tpr, thresholds
def roc(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
sample_weights: Optional[Sequence] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],
List[torch.Tensor]]]:
"""
Computes the Receiver Operating Characteristic (ROC).
Args:
preds: predictions from model (logits or probabilities)
target: ground truth values
num_classes: integer with number of classes. Not nessesary to provide
for binary problems.
pos_label: integer determining the positive class. Default is ``None``
which for binary problem is translate to 1. For multiclass problems
this argument should not be set as we iteratively change it in the
range [0,num_classes-1]
sample_weights: sample weights for each data point
Returns:
3-element tuple containing
fpr:
tensor with false positive rates.
If multiclass, this is a list of such tensors, one for each class.
tpr:
tensor with true positive rates.
If multiclass, this is a list of such tensors, one for each class.
thresholds:
thresholds used for computing false- and true postive rates
Example (binary case):
>>> from pytorch_lightning.metrics.functional import roc
>>> pred = torch.tensor([0, 1, 2, 3])
>>> target = torch.tensor([0, 1, 1, 1])
>>> fpr, tpr, thresholds = roc(pred, target, pos_label=1)
>>> fpr
tensor([0., 0., 0., 0., 1.])
>>> tpr
tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
>>> thresholds
tensor([4, 3, 2, 1, 0])
Example (multiclass case):
>>> from pytorch_lightning.metrics.functional import roc
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05],
... [0.05, 0.05, 0.05, 0.75]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> fpr, tpr, thresholds = roc(pred, target, num_classes=4)
>>> fpr
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500])]
"""
preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label)
return _roc_compute(preds, target, num_classes, pos_label, sample_weights)
| [
"torch.zeros",
"torch.cat"
] | 1.4 | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 |
1.4 | import pytest
import torch
from pytorch_lightning.metrics.utils import class_reduce, reduce
def test_reduce():
start_tensor = torch.rand(50, 40, 30)
assert torch.allclose(reduce(start_tensor, 'elementwise_mean'), torch.mean(start_tensor))
assert torch.allclose(reduce(start_tensor, 'sum'), torch.sum(start_tensor))
assert torch.allclose(reduce(start_tensor, 'none'), start_tensor)
with pytest.raises(ValueError):
reduce(start_tensor, 'error_reduction')
def test_class_reduce():
num = torch.randint(1, 10, (100, )).float()
denom = torch.randint(10, 20, (100, )).float()
weights = torch.randint(1, 100, (100, )).float()
assert torch.allclose(class_reduce(num, denom, weights, 'micro'), torch.sum(num) / torch.sum(denom))
assert torch.allclose(class_reduce(num, denom, weights, 'macro'), torch.mean(num / denom))
assert torch.allclose(
class_reduce(num, denom, weights, 'weighted'), torch.sum(num / denom * (weights / torch.sum(weights)))
)
assert torch.allclose(class_reduce(num, denom, weights, 'none'), num / denom)
| [
"torch.rand",
"torch.randint",
"torch.mean",
"torch.sum"
] | 1.4 | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 |
1.4 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import pytest
import torch
from torch import nn
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.states import TrainerState
from pytorch_lightning.utilities import _TPU_AVAILABLE
from tests.helpers.boring_model import BoringModel
from tests.helpers.utils import pl_multi_process_test
class WeightSharingModule(BoringModel):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(32, 10, bias=False)
self.layer_2 = nn.Linear(10, 32, bias=False)
self.layer_3 = nn.Linear(32, 10, bias=False)
self.layer_3.weight = self.layer_1.weight
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
return x
@pytest.mark.skipif(not _TPU_AVAILABLE, reason="test requires TPU machine")
@pl_multi_process_test
def test_resume_training_on_cpu(tmpdir):
""" Checks if training can be resumed from a saved checkpoint on CPU"""
# Train a model on TPU
model = BoringModel()
trainer = Trainer(
checkpoint_callback=True,
max_epochs=1,
tpu_cores=8,
)
trainer.fit(model)
model_path = trainer.checkpoint_callback.best_model_path
# Verify saved Tensors are on CPU
ckpt = torch.load(model_path)
weight_tensor = list(ckpt["state_dict"].values())[0]
assert weight_tensor.device == torch.device("cpu")
# Verify that training is resumed on CPU
trainer = Trainer(
resume_from_checkpoint=model_path,
checkpoint_callback=True,
max_epochs=1,
default_root_dir=tmpdir,
)
trainer.fit(model)
assert trainer.state == TrainerState.FINISHED, f"Training failed with {trainer.state}"
@pytest.mark.skipif(not _TPU_AVAILABLE, reason="test requires TPU machine")
@pl_multi_process_test
def test_if_test_works_after_train(tmpdir):
""" Ensure that .test() works after .fit() """
# Train a model on TPU
model = BoringModel()
trainer = Trainer(max_epochs=1, tpu_cores=8, default_root_dir=tmpdir, fast_dev_run=True)
trainer.fit(model)
assert len(trainer.test(model)) == 1
@pytest.mark.skipif(not _TPU_AVAILABLE, reason="test requires TPU machine")
@pl_multi_process_test
def test_weight_tying_warning(tmpdir, capsys=None):
"""
Ensure a warning is thrown if model parameter lengths do not match
post moving to device.
"""
model = WeightSharingModule()
trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)
with pytest.warns(UserWarning, match=r'The model layers do not match after moving to the target device.'):
result = trainer.fit(model)
assert result
@pytest.mark.skipif(not _TPU_AVAILABLE, reason="test requires TPU machine")
@pl_multi_process_test
def test_if_weights_tied(tmpdir, capsys=None):
"""
Test if weights are properly tied on `on_post_move_to_device`.
Ensure no warning for parameter mismatch is thrown.
"""
class Model(WeightSharingModule):
def on_post_move_to_device(self):
self.layer_3.weight = self.layer_1.weight
model = Model()
trainer = Trainer(checkpoint_callback=True, max_epochs=1, tpu_cores=1)
with pytest.warns(UserWarning) as warnings:
result = trainer.fit(model)
assert result
assert not list(filter(lambda x: 'The model layers do not match' in str(x), warnings.list))
assert len(trainer.test(model)) == 1
| [
"torch.nn.Linear",
"torch.device",
"torch.load"
] | 1.4 | prajakta0111/pytorch-lightning | 3df02b880a6d145ff0aca24ea429c12c0d8f1181 |
1.0 | # merges the output of the main transfer_model script
import torch
from pathlib import Path
import pickle
from scipy.spatial.transform import Rotation as R
KEYS = [
"transl",
"global_orient",
"body_pose",
"betas",
"left_hand_pose",
"right_hand_pose",
"jaw_pose",
"leye_pose",
"reye_pose",
"expression",
"vertices",
"joints",
"full_pose",
"v_shaped",
"faces"
]
IGNORED_KEYS = [
"vertices",
"faces",
"v_shaped"
]
def aggregate_rotmats(x):
x = torch.cat(x, dim=0).detach().numpy()
s = x.shape[:-2]
x = R.from_matrix(x.reshape(-1, 3, 3)).as_rotvec()
x = x.reshape(s[0], -1)
return x
aggregate_function = {k: lambda x: torch.cat(x, 0).detach().numpy() for k in KEYS}
aggregate_function["betas"] = lambda x: torch.cat(x, 0).mean(0).detach().numpy()
for k in ["global_orient", "body_pose", "left_hand_pose", "right_hand_pose", "jaw_pose", "full_pose"]:
aggregate_function[k] = aggregate_rotmats
def merge(output_dir, gender):
output_dir = Path(output_dir)
assert output_dir.exists()
assert output_dir.is_dir()
# get list of all pkl files in output_dir with fixed length numeral names
pkl_files = [f for f in output_dir.glob("*.pkl") if f.stem != "merged"]
pkl_files = [f for f in sorted(pkl_files, key=lambda x: int(x.stem))]
assert "merged.pkl" not in [f.name for f in pkl_files]
merged = {}
# iterate over keys and put all values in lists
keys = set(KEYS) - set(IGNORED_KEYS)
for k in keys:
merged[k] = []
for pkl_file in pkl_files:
with open(pkl_file, "rb") as f:
data = pickle.load(f)
for k in keys:
if k in data:
merged[k].append(data[k])
b = torch.cat(merged["betas"], 0)
print("betas:")
for mu, sigma in zip(b.mean(0), b.std(0)):
print(" {:.3f} +/- {:.3f}".format(mu, sigma))
# aggregate all values
for k in keys:
merged[k] = aggregate_function[k](merged[k])
# add gender
merged["gender"] = gender
# save merged data to same output_dir
with open(output_dir / "merged.pkl", "wb") as f:
pickle.dump(merged, f)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Merge output of transfer_model script')
parser.add_argument('output_dir', type=str, help='output directory of transfer_model script')
parser.add_argument('--gender', type=str, choices=['male', 'female', 'neutral'], help='gender of actor in motion sequence')
args = parser.parse_args()
merge(args.output_dir, args.gender)
| [
"torch.cat"
] | 1.0.1 | gngdb/smplx | ba72def2038712784458a91f94371de6550d7e65 |
1.6 | import torch
from ..distances import CosineSimilarity
from ..utils import common_functions as c_f
from .generic_pair_loss import GenericPairLoss
class NTXentLoss(GenericPairLoss):
def __init__(self, temperature=0.07, **kwargs):
super().__init__(mat_based_loss=False, **kwargs)
self.temperature = temperature
self.add_to_recordable_attributes(list_of_names=["temperature"], is_stat=False)
def _compute_loss(self, pos_pairs, neg_pairs, indices_tuple):
a1, p, a2, _ = indices_tuple
if len(a1) > 0 and len(a2) > 0:
dtype = neg_pairs.dtype
# if dealing with actual distances, use negative distances
if not self.distance.is_inverted:
pos_pairs = -pos_pairs
neg_pairs = -neg_pairs
pos_pairs = pos_pairs.unsqueeze(1) / self.temperature
neg_pairs = neg_pairs / self.temperature
n_per_p = c_f.to_dtype(a2.unsqueeze(0) == a1.unsqueeze(1), dtype=dtype)
neg_pairs = neg_pairs * n_per_p
neg_pairs[n_per_p == 0] = c_f.neg_inf(dtype)
max_val = torch.max(pos_pairs, torch.max(neg_pairs, dim=1, keepdim=True)[0])
numerator = torch.exp(pos_pairs - max_val).squeeze(1)
denominator = torch.sum(torch.exp(neg_pairs - max_val), dim=1) + numerator
log_exp = torch.log((numerator / denominator) + c_f.small_val(dtype))
return {
"loss": {
"losses": -log_exp,
"indices": (a1, p),
"reduction_type": "pos_pair",
}
}
return self.zero_losses()
def get_default_distance(self):
return CosineSimilarity()
| [
"torch.exp",
"torch.max"
] | 1.6.0 | keshav47/pytorch-metric-learning | 501e4cb5e56c56d09413c98a93039669abc2232b |
1.9 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import os
class Linear_QNet(nn.Module):
def __init__(self, input_size, hidden_size, hidden_size2, output_size):
# feed forward neural network
super().__init__()
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(input_size, hidden_size2)
self.linear3 = nn.Linear(hidden_size2, output_size)
def forward(self, x):
x = F.relu(self.linear1(x))
x = self.linear2(x)
x = self.linear3(x)
return x
def save(self, state_dict, file_name="model.pth"):
model_folder_path = "./models"
if not os.path.exists(model_folder_path):
os.makedirs(model_folder_path)
file_name = os.path.join(model_folder_path,file_name)
torch.save(state_dict, file_name)
def load(self, file_name="model.pth"):
model_folder_path = "./models"
file_name = os.path.join(model_folder_path,file_name)
checkpoint = torch.load(file_name)
self.load_state_dict(checkpoint['state_dict'])
class QTrainer():
def __init__(self,model, lr, gamma):
self.lr = lr
self.gamma = gamma
self.model = model
self.optimizer = optim.Adam(model.parameters(), lr=self.lr)
self.criterion = nn.MSELoss()
def train_step(self, state, action, reward, next_state, done):
state = torch.tensor(state,dtype=torch.float)
next_state = torch.tensor(next_state,dtype=torch.float)
action = torch.tensor(action,dtype=torch.long)
reward = torch.tensor(reward,dtype=torch.float)
if len(state.shape) == 1:
state = torch.unsqueeze(state, 0)
next_state = torch.unsqueeze(next_state, 0)
action = torch.unsqueeze(action, 0)
reward = torch.unsqueeze(reward, 0)
done = (done, )
# 1. predicted Q values with current state
pred = self.model(state)
# 2. Q_new = r + gamma * max(next_predicted_q_value) -> only if not done
# pred.clone()
# preds[argmax(action)] = Q_new
target = pred.clone()
for idx in range(len(done)):
Q_new = reward[idx]
if not done[idx]:
Q_new = reward[idx] + self.gamma * torch.max(self.model(next_state[idx]))
target[idx][torch.argmax(action[idx]).item()] = Q_new
self.optimizer.zero_grad()
loss = self.criterion(target,pred)
loss.backward()
self.optimizer.step()
| [
"torch.nn.Linear",
"torch.nn.MSELoss",
"torch.save",
"torch.unsqueeze",
"torch.tensor",
"torch.load",
"torch.argmax"
] | 1.9.0 | Chicco94/breakout-Q-learning | dfb7c1d18c4472f21828f1163641817b6f44d726 |
0.4 | from torch.utils.data import Subset
from PIL import Image
from torchvision.datasets import MNIST
from base.torchvision_dataset import TorchvisionDataset
from .preprocessing import get_target_label_idx, global_contrast_normalization
import torchvision.transforms as transforms
MNIST.resources = [
('https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
('https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')
]
class MNIST_Dataset(TorchvisionDataset):
def __init__(self, root: str, normal_class=0):
super().__init__(root)
self.n_classes = 2 # 0: normal, 1: outlier
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(0, 10))
self.outlier_classes.remove(normal_class)
# Pre-computed min and max values (after applying GCN) from train data per class
min_max = [(-0.8826567065619495, 9.001545489292527),
(-0.6661464580883915, 20.108062262467364),
(-0.7820454743183202, 11.665100841080346),
(-0.7645772083211267, 12.895051191467457),
(-0.7253923114302238, 12.683235701611533),
(-0.7698501867861425, 13.103278415430502),
(-0.778418217980696, 10.457837397569108),
(-0.7129780970522351, 12.057777597673047),
(-0.8280402650205075, 10.581538445782988),
(-0.7369959242164307, 10.697039838804978)]
# MNIST preprocessing: GCN (with L1 norm) and min-max feature scaling to [0,1]
transform = transforms.Compose([transforms.ToTensor(),
transforms.Lambda(lambda x: global_contrast_normalization(x, scale='l1')),
transforms.Normalize([min_max[normal_class][0]],
[min_max[normal_class][1] - min_max[normal_class][0]])])
target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes))
train_set = MyMNIST(root=self.root, train=True, download=True,
transform=transform, target_transform=target_transform)
# Subset train_set to normal class
train_idx_normal = get_target_label_idx(train_set.train_labels.clone().data.cpu().numpy(), self.normal_classes)
self.train_set = Subset(train_set, train_idx_normal)
self.test_set = MyMNIST(root=self.root, train=False, download=True,
transform=transform, target_transform=target_transform)
class MyMNIST(MNIST):
"""Torchvision MNIST class with patch of __getitem__ method to also return the index of a data sample."""
def __init__(self, *args, **kwargs):
super(MyMNIST, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"""Override the original method of the MNIST class.
Args:
index (int): Index
Returns:
triple: (image, target, index) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index # only line changed
| [
"torch.utils.data.Subset"
] | 0.4.1 | Pangoraw/Deep-SVDD-PyTorch | 806f7099cea2013a87ebb32f30a6f4c9595ebbeb |
1.7 | import torch
import torch.nn as nn
from torch.nn import Module as Module
import timm
from torch.nn import Parameter
import torch.nn.functional as F
class AngleSimpleLinear(nn.Module):
"""Computes cos of angles between input vectors and weights vectors"""
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
# create proxy weights
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.normal_().renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
# cos_theta = F.cosine_similarity(x, self.weight.reshape(1, 20, -1), dim=2)
cos_theta = F.normalize(x.view(x.shape[0], -1), dim=1).mm(F.normalize(self.weight, p=2, dim=0))
return cos_theta.clamp(-1, 1)
def get_centers(self):
return torch.t(self.weight)
class TimmModelsWrapper(Module):
def __init__(self,
model_name,
num_classes,
pretrained=False,
ml_decoder_used=False,
asl_use = False):
super().__init__()
self.pretrained = pretrained
self.is_mobilenet = True if model_name in ["mobilenetv3_large_100_miil_in21k", "mobilenetv3_large_100_miil"] else False
self.num_classes = num_classes
self.model = timm.create_model(model_name,
pretrained=pretrained,
num_classes=self.num_classes)
self.num_head_features = self.model.num_features
self.num_features = (self.model.conv_head.in_channels if self.is_mobilenet
else self.model.num_features)
if ml_decoder_used:
self.model.global_pool = torch.nn.Identity()
self.model.classifier = torch.nn.Identity()
else:
if asl_use:
self.model.act2 = nn.PReLU()
self.model.classifier = AngleSimpleLinear(self.num_head_features, self.num_classes)
else:
self.model.classifier = self.model.get_classifier()
def forward(self, x):
return self.model(x)
| [
"torch.nn.functional.normalize",
"torch.nn.Identity",
"torch.nn.PReLU",
"torch.t",
"torch.Tensor"
] | 1.7 | kprokofi/ML_Decoder | c01c50e0165e607afbebd8d615708ef9c084dd5b |
1.2 | import logging
from typing import Any, Dict, List, Optional
import torch
from torch.nn.functional import nll_loss, binary_cross_entropy_with_logits, sigmoid
from allennlp.common.checks import check_dimensions_match
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.models.reading_comprehension.util import get_best_span
from allennlp.modules import Highway
from allennlp.modules import Seq2SeqEncoder, SimilarityFunction, TimeDistributed, TextFieldEmbedder
from allennlp.modules.matrix_attention.legacy_matrix_attention import LegacyMatrixAttention
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import BooleanAccuracy, CategoricalAccuracy, SquadEmAndF1
logger = logging.getLogger(__name__)
@Model.register("bidaf")
class BidirectionalAttentionFlow(Model):
"""
This class implements Minjoon Seo's `Bidirectional Attention Flow model
<https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/7586b7cca1deba124af80609327395e613a20e9d>`_
for answering reading comprehension questions (ICLR 2017).
The basic layout is pretty simple: encode words as a combination of word embeddings and a
character-level encoder, pass the word representations through a bi-LSTM/GRU, use a matrix of
attentions to put question information into the passage word representations (this is the only
part that is at all non-standard), pass this through another few layers of bi-LSTMs/GRUs, and
do a softmax over span start and span end.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``question`` and ``passage`` ``TextFields`` we get as input to the model.
num_highway_layers : ``int``
The number of highway layers to use in between embedding the input and passing it through
the phrase layer.
phrase_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between embedding tokens
and doing the bidirectional attention.
similarity_function : ``SimilarityFunction``
The similarity function that we will use when comparing encoded passage and question
representations.
modeling_layer : ``Seq2SeqEncoder``
The encoder (with its own internal stacking) that we will use in between the bidirectional
attention and predicting span start and end.
span_end_encoder : ``Seq2SeqEncoder``
The encoder that we will use to incorporate span start predictions into the passage state
before predicting span end.
dropout : ``float``, optional (default=0.2)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
mask_lstms : ``bool``, optional (default=True)
If ``False``, we will skip passing the mask to the LSTM layers. This gives a ~2x speedup,
with only a slight performance decrease, if any. We haven't experimented much with this
yet, but have confirmed that we still get very similar performance with much faster
training times. We still use the mask for all softmaxes, but avoid the shuffling that's
required when using masking with pytorch LSTMs.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
num_highway_layers: int,
phrase_layer: Seq2SeqEncoder,
similarity_function: SimilarityFunction,
modeling_layer: Seq2SeqEncoder,
span_end_encoder: Seq2SeqEncoder,
dropout: float = 0.2,
mask_lstms: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super().__init__(vocab, regularizer)
self._text_field_embedder = text_field_embedder
self._highway_layer = TimeDistributed(
Highway(text_field_embedder.get_output_dim(), num_highway_layers)
)
self._phrase_layer = phrase_layer
self._matrix_attention = LegacyMatrixAttention(similarity_function)
self._modeling_layer = modeling_layer
self._span_end_encoder = span_end_encoder
encoding_dim = phrase_layer.get_output_dim()
modeling_dim = modeling_layer.get_output_dim()
span_start_input_dim = encoding_dim * 4 + modeling_dim
self._span_start_predictor = TimeDistributed(torch.nn.Linear(span_start_input_dim, 1))
span_end_encoding_dim = span_end_encoder.get_output_dim()
span_end_input_dim = encoding_dim * 4 + span_end_encoding_dim
self._span_end_predictor = TimeDistributed(torch.nn.Linear(span_end_input_dim, 1))
# Bidaf has lots of layer dimensions which need to match up - these aren't necessarily
# obvious from the configuration files, so we check here.
check_dimensions_match(
modeling_layer.get_input_dim(),
4 * encoding_dim,
"modeling layer input dim",
"4 * encoding dim",
)
check_dimensions_match(
text_field_embedder.get_output_dim(),
phrase_layer.get_input_dim(),
"text field embedder output dim",
"phrase layer input dim",
)
check_dimensions_match(
span_end_encoder.get_input_dim(),
4 * encoding_dim + 3 * modeling_dim,
"span end encoder input dim",
"4 * encoding dim + 3 * modeling dim",
)
self._accuracy = BooleanAccuracy()
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._mask_lstms = mask_lstms
initializer(self)
def forward( # type: ignore
self,
question: Dict[str, torch.LongTensor],
passage: Dict[str, torch.LongTensor],
answer: torch.BoolTensor = None,
metadata: List[Dict[str, Any]] = None,
) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
question : Dict[str, torch.LongTensor]
From a ``TextField``.
passage : Dict[str, torch.LongTensor]
From a ``TextField``. The model assumes that this passage contains the answer to the
question, and predicts the beginning and ending positions of the answer within the
passage.
span_start : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
beginning position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
span_end : ``torch.IntTensor``, optional
From an ``IndexField``. This is one of the things we are trying to predict - the
ending position of the answer with the passage. This is an `inclusive` token index.
If this is given, we will compute a loss that gets included in the output dictionary.
metadata : ``List[Dict[str, Any]]``, optional
metadata : ``List[Dict[str, Any]]``, optional
If present, this should contain the question tokens, passage tokens, original passage
text, and token offsets into the passage for each instance in the batch. The length
of this list should be the batch size, and each dictionary should have the keys
``question_tokens``, ``passage_tokens``, ``original_passage``, and ``token_offsets``.
Returns
-------
An output dictionary consisting of:
span_start_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span start position.
span_start_probs : torch.FloatTensor
The result of ``softmax(span_start_logits)``.
span_end_logits : torch.FloatTensor
A tensor of shape ``(batch_size, passage_length)`` representing unnormalized log
probabilities of the span end position (inclusive).
span_end_probs : torch.FloatTensor
The result of ``softmax(span_end_logits)``.
best_span : torch.IntTensor
The result of a constrained inference over ``span_start_logits`` and
``span_end_logits`` to find the most probable span. Shape is ``(batch_size, 2)``
and each offset is a token index.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
best_span_str : List[str]
If sufficient metadata was provided for the instances in the batch, we also return the
string from the original passage that the model thinks is the best answer to the
question.
"""
embedded_question = self._highway_layer(self._text_field_embedder(question))
embedded_passage = self._highway_layer(self._text_field_embedder(passage))
batch_size = embedded_question.size(0)
passage_length = embedded_passage.size(1)
question_mask = util.get_text_field_mask(question).float()
passage_mask = util.get_text_field_mask(passage).float()
question_lstm_mask = question_mask if self._mask_lstms else None
passage_lstm_mask = passage_mask if self._mask_lstms else None
encoded_question = self._dropout(self._phrase_layer(embedded_question, question_lstm_mask))
encoded_passage = self._dropout(self._phrase_layer(embedded_passage, passage_lstm_mask))
encoding_dim = encoded_question.size(-1)
# Shape: (batch_size, passage_length, question_length)
passage_question_similarity = self._matrix_attention(encoded_passage, encoded_question)
# Shape: (batch_size, passage_length, question_length)
passage_question_attention = util.masked_softmax(passage_question_similarity, question_mask)
# Shape: (batch_size, passage_length, encoding_dim)
passage_question_vectors = util.weighted_sum(encoded_question, passage_question_attention)
# We replace masked values with something really negative here, so they don't affect the
# max below.
masked_similarity = util.replace_masked_values(
passage_question_similarity, question_mask.unsqueeze(1), -1e7
)
# Shape: (batch_size, passage_length)
question_passage_similarity = masked_similarity.max(dim=-1)[0].squeeze(-1)
# Shape: (batch_size, passage_length)
question_passage_attention = util.masked_softmax(question_passage_similarity, passage_mask)
# Shape: (batch_size, encoding_dim)
question_passage_vector = util.weighted_sum(encoded_passage, question_passage_attention)
# Shape: (batch_size, passage_length, encoding_dim)
tiled_question_passage_vector = question_passage_vector.unsqueeze(1).expand(
batch_size, passage_length, encoding_dim
)
# Shape: (batch_size, passage_length, encoding_dim * 4)
final_merged_passage = torch.cat(
[
encoded_passage,
passage_question_vectors,
encoded_passage * passage_question_vectors,
encoded_passage * tiled_question_passage_vector,
],
dim=-1,
)
modeled_passage = self._dropout(
self._modeling_layer(final_merged_passage, passage_lstm_mask)
)
modeling_dim = modeled_passage.size(-1)
# Shape: (batch_size, passage_length, encoding_dim * 4 + modeling_dim))
span_start_input = self._dropout(torch.cat([final_merged_passage, modeled_passage], dim=-1))
# Shape: (batch_size, passage_length)
span_start_logits = self._span_start_predictor(span_start_input).squeeze(-1)
# Shape: (batch_size, passage_length)
prediction_bool_logits = util.masked_max(span_start_logits, passage_mask, dim=1)
output_dict = {
"passage_question_attention": passage_question_attention,
"prediction_bool_logits": prediction_bool_logits
}
# Compute the loss for training.
if answer is not None:
loss = binary_cross_entropy_with_logits(
prediction_bool_logits, answer
)
threshold = 0.5
prediction_bool_logits = torch.where(torch.sigmoid(prediction_bool_logits) > threshold,
torch.ones_like(prediction_bool_logits), torch.zeros_like(prediction_bool_logits))
self._accuracy(prediction_bool_logits, answer)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {
"acc": self._accuracy.get_metric(reset)
}
| [
"torch.nn.Linear",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.cat",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.ones_like",
"torch.zeros_like"
] | 1.2.0 | flyinslowly/flyinnlp | 328e45d2952da6cdebbc1cccbb6a0aa9972859df |
1.6 | import os
import argparse
import multiprocessing
from pathlib import Path
from PIL import Image
import torch
from torchvision import models, transforms
from torch.utils.data import DataLoader, Dataset
from byol_pytorch import BYOL
import pytorch_lightning as pl
# test model, a resnet 50
resnet = models.resnet50(pretrained=True)
# arguments
parser = argparse.ArgumentParser(description='byol-lightning-test')
parser.add_argument('--image_folder', type=str, required = True,
help='path to your folder of images for self-supervised learning')
args = parser.parse_args()
# constants
BATCH_SIZE = 32
EPOCHS = 1000
LR = 3e-4
NUM_GPUS = 2
IMAGE_SIZE = 256
IMAGE_EXTS = ['.jpg', '.png', '.jpeg']
NUM_WORKERS = multiprocessing.cpu_count()
# pytorch lightning module
class SelfSupervisedLearner(pl.LightningModule):
def __init__(self, net, **kwargs):
super().__init__()
self.learner = BYOL(net, **kwargs)
def forward(self, images):
return self.learner(images)
def training_step(self, images, _):
loss = self.forward(images)
return {'loss': loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=LR)
def on_before_zero_grad(self, _):
self.learner.update_moving_average()
# images dataset
def expand_greyscale(t):
return t.expand(3, -1, -1)
class ImagesDataset(Dataset):
def __init__(self, folder, image_size):
super().__init__()
self.folder = folder
self.paths = []
for path in Path(f'{folder}').glob('**/*'):
_, ext = os.path.splitext(path)
if ext.lower() in IMAGE_EXTS:
self.paths.append(path)
print(f'{len(self.paths)} images found')
self.transform = transforms.Compose([
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(expand_greyscale)
])
def __len__(self):
return len(self.paths)
def __getitem__(self, index):
path = self.paths[index]
img = Image.open(path)
img = img.convert('RGB')
return self.transform(img)
# main
if __name__ == '__main__':
ds = ImagesDataset(args.image_folder, IMAGE_SIZE)
train_loader = DataLoader(ds, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS, shuffle=True)
model = SelfSupervisedLearner(
resnet,
image_size = IMAGE_SIZE,
hidden_layer = 'avgpool',
projection_size = 256,
projection_hidden_size = 4096,
moving_average_decay = 0.99
)
trainer = pl.Trainer(gpus=NUM_GPUS, max_epochs=EPOCHS)
trainer.fit(model, train_loader)
| [
"torch.utils.data.DataLoader"
] | 1.6 | kshen6/byol-pytorch | 49e303adf89ed3d990025262fd30226a00a98d45 |
1.6 | import torch
from mimic.evaluation.divergence_measures.kl_div import calc_entropy_gauss
from mimic.evaluation.divergence_measures.kl_div import calc_kl_divergence
from mimic.evaluation.divergence_measures.kl_div import calc_kl_divergence_lb_gauss_mixture
from mimic.evaluation.divergence_measures.kl_div import calc_kl_divergence_ub_gauss_mixture
from mimic.utils.utils import reweight_weights
def poe(mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / var
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
def alpha_poe(alpha, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
if var.dim() == 3:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1)
elif var.dim() == 4:
alpha_expanded = alpha.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
T = 1 / var
pd_var = 1. / torch.sum(alpha_expanded * T, dim=0)
pd_mu = pd_var * torch.sum(alpha_expanded * mu * T, dim=0)
pd_logvar = torch.log(pd_var)
return pd_mu, pd_logvar
def calc_alphaJSD_modalities_mixture(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
klds = torch.zeros(2)
entropies_mixture = torch.zeros(2)
w_modalities = torch.Tensor(flags.alpha_modalities[1:])
if flags.cuda:
w_modalities = w_modalities.cuda()
klds = klds.cuda()
entropies_mixture = entropies_mixture.cuda()
w_modalities = reweight_weights(w_modalities)
mus = [m1_mu, m2_mu]
logvars = [m1_logvar, m2_logvar]
for k in range(len(mus)):
ent = calc_entropy_gauss(flags, logvars[k], norm_value=flags.batch_size)
# print('entropy: ' + str(ent))
# print('lb: ' )
kld_lb = calc_kl_divergence_lb_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars,
norm_value=flags.batch_size)
print('kld_lb: ' + str(kld_lb))
# print('ub: ')
kld_ub = calc_kl_divergence_ub_gauss_mixture(flags, k, mus[k], logvars[k], mus, logvars, ent,
norm_value=flags.batch_size)
print('kld_ub: ' + str(kld_ub))
# kld_mean = (kld_lb+kld_ub)/2
entropies_mixture[k] = ent.clone()
klds[k] = 0.5 * (kld_lb + kld_ub)
# klds[k] = kld_ub
summed_klds = (w_modalities * klds).sum()
# print('summed klds: ' + str(summed_klds))
return summed_klds, klds, entropies_mixture
def calc_alphaJSD_modalities(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0]
num_samples = mus.shape[1]
alpha_mu, alpha_logvar = alpha_poe(weights, mus, logvars)
if normalization is not None:
klds = torch.zeros(num_mods)
else:
klds = torch.zeros(num_mods, num_samples)
klds = klds.to(flags.device)
for k in range(0, num_mods):
kld = calc_kl_divergence(mus[k, :, :], logvars[k, :, :], alpha_mu,
alpha_logvar, norm_value=normalization)
if normalization is not None:
klds[k] = kld
else:
klds[k, :] = kld
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples)
group_div = (weights * klds).sum(dim=0)
return group_div, klds, [alpha_mu, alpha_logvar]
def calc_group_divergence_moe(flags, mus, logvars, weights, normalization=None):
num_mods = mus.shape[0]
# num_samples is the batch size
num_samples = mus.shape[1]
if normalization is not None:
klds = torch.zeros(num_mods)
else:
klds = torch.zeros(num_mods, num_samples)
klds = klds.to(flags.device)
weights = weights.to(flags.device)
for k in range(num_mods):
kld_ind = calc_kl_divergence(mus[k, :, :], logvars[k, :, :],
norm_value=normalization)
if normalization is not None:
klds[k] = kld_ind
else:
klds[k, :] = kld_ind
if normalization is None:
weights = weights.unsqueeze(1).repeat(1, num_samples)
group_div = (weights * klds).sum(dim=0)
return group_div, klds
def calc_group_divergence_poe(flags, mus, logvars, norm=None):
num_mods = mus.shape[0]
poe_mu, poe_logvar = poe(mus, logvars)
kld_poe = calc_kl_divergence(poe_mu, poe_logvar, norm_value=norm)
klds = torch.zeros(num_mods).to(flags.device)
for k in range(num_mods):
kld_ind = calc_kl_divergence(mus[k, :, :], logvars[k, :, :],
norm_value=norm)
klds[k] = kld_ind
return kld_poe, klds, [poe_mu, poe_logvar]
def calc_modality_divergence(m1_mu, m1_logvar, m2_mu, m2_logvar, flags):
if flags.modality_poe:
return calc_kl_divergence(
m1_mu, m1_logvar, m2_mu, m2_logvar, norm_value=flags.batch_size
).sum()
uniform_mu = torch.zeros(m1_mu.shape)
uniform_logvar = torch.zeros(m1_logvar.shape)
klds = torch.zeros(3, 3)
klds_modonly = torch.zeros(2, 2)
if flags.cuda:
klds = klds.cuda()
klds_modonly = klds_modonly.cuda()
uniform_mu = uniform_mu.cuda()
uniform_logvar = uniform_logvar.cuda()
mus = [uniform_mu, m1_mu, m2_mu]
logvars = [uniform_logvar, m1_logvar, m2_logvar]
for i in range(1, len(mus)): # CAREFUL: index starts from one, not zero
for j in range(len(mus)):
kld = calc_kl_divergence(mus[i], logvars[i], mus[j], logvars[j], norm_value=flags.batch_size)
klds[i, j] = kld
if i >= 1 and j >= 1:
klds_modonly[i - 1, j - 1] = kld
klds = klds.sum() / (len(mus) * (len(mus) - 1))
klds_modonly = klds_modonly.sum() / ((len(mus) - 1) * (len(mus) - 1))
return [klds, klds_modonly]
| [
"torch.zeros",
"torch.log",
"torch.Tensor",
"torch.exp",
"torch.sum"
] | 1.6.0 | Jimmy2027/MoPoE-MIMIC | d167719b0dc7ba002b7421eb82a83e47d2437795 |
1.8 | import os
from os.path import exists, join
from pathlib import Path
from typing import Optional, Tuple
from pytorch_lightning import LightningDataModule
from torch.utils.data import ConcatDataset, DataLoader, Dataset, random_split
from src.datamodules.datasets.musdb import MusdbTrainDataset, MusdbValidDataset
class MusdbDataModule(LightningDataModule):
"""
LightningDataModule for Musdb18-HQ dataset.
A DataModule implements 5 key methods:
- prepare_data (things to do on 1 GPU/TPU, not on every GPU/TPU in distributed mode)
- setup (things to do on every accelerator in distributed mode)
- train_dataloader (the training dataloader)
- val_dataloader (the validation dataloader(s))
- test_dataloader (the test dataloader(s))
This allows you to share a full dataset without explaining how to download,
split, transform and process the data
Read the docs:
https://pytorch-lightning.readthedocs.io/en/latest/extensions/datamodules.html
"""
def __init__(
self,
data_dir: str,
aug_params,
target_name: str,
overlap: int,
hop_length: int,
dim_t: int,
sample_rate: int,
batch_size: int,
num_workers: int,
pin_memory: bool,
external_datasets,
**kwargs,
):
super().__init__()
self.data_dir = Path(data_dir)
self.target_name = target_name
self.aug_params = aug_params
self.external_datasets = external_datasets
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
# audio-related
self.hop_length = hop_length
self.sample_rate = sample_rate
# derived
self.chunk_size = hop_length * (dim_t - 1)
self.overlap = overlap
self.data_train: Optional[Dataset] = None
self.data_val: Optional[Dataset] = None
self.data_test: Optional[Dataset] = None
trainset_path = self.data_dir.joinpath('train')
validset_path = self.data_dir.joinpath('valid')
# create validation split
if not exists(validset_path):
from shutil import move
os.mkdir(validset_path)
for track in kwargs['validation_set']:
if trainset_path.joinpath(track).exists():
move(trainset_path.joinpath(track), validset_path.joinpath(track))
else:
valid_files = os.listdir(validset_path)
assert set(valid_files) == set(kwargs['validation_set'])
def setup(self, stage: Optional[str] = None):
"""Load data. Set variables: self.data_train, self.data_val, self.data_test."""
self.data_train = MusdbTrainDataset(self.data_dir,
self.chunk_size,
self.target_name,
self.aug_params,
self.external_datasets)
self.data_val = MusdbValidDataset(self.data_dir,
self.chunk_size,
self.target_name,
self.overlap,
self.batch_size)
def train_dataloader(self):
return DataLoader(
dataset=self.data_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
dataset=self.data_val,
batch_size=1,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
shuffle=False,
) | [
"torch.utils.data.DataLoader"
] | 1.8.1 | kuielab/mdx-net | bbd46dbf2ceb26c3fbfbe412b5159bce2366c9c0 |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import numpy as np
import pytest
import torch
from sklearn.metrics import precision_score, recall_score
from torch import Tensor
from torchmetrics.classification import Precision, Recall
from torchmetrics.utilities import apply_to_collection
from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_7
from torchmetrics.wrappers.bootstrapping import BootStrapper, _bootstrap_sampler
_preds = torch.randint(10, (10, 32))
_target = torch.randint(10, (10, 32))
class TestBootStrapper(BootStrapper):
"""For testing purpose, we subclass the bootstrapper class so we can get the exact permutation the class is
creating."""
def update(self, *args) -> None:
self.out = []
for idx in range(self.num_bootstraps):
size = len(args[0])
sample_idx = _bootstrap_sampler(size, sampling_strategy=self.sampling_strategy)
new_args = apply_to_collection(args, Tensor, torch.index_select, dim=0, index=sample_idx)
self.metrics[idx].update(*new_args)
self.out.append(new_args)
def _sample_checker(old_samples, new_samples, op: operator, threshold: int):
found_one = False
for os in old_samples:
cond = op(os, new_samples)
if cond.sum() > threshold:
found_one = True
break
return found_one
@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"])
def test_bootstrap_sampler(sampling_strategy):
"""make sure that the bootstrap sampler works as intended."""
old_samples = torch.randn(10, 2)
# make sure that the new samples are only made up of old samples
idx = _bootstrap_sampler(10, sampling_strategy=sampling_strategy)
new_samples = old_samples[idx]
for ns in new_samples:
assert ns in old_samples
found_one = _sample_checker(old_samples, new_samples, operator.eq, 2)
assert found_one, "resampling did not work because no samples were sampled twice"
found_zero = _sample_checker(old_samples, new_samples, operator.ne, 0)
assert found_zero, "resampling did not work because all samples were atleast sampled once"
@pytest.mark.parametrize("sampling_strategy", ["poisson", "multinomial"])
@pytest.mark.parametrize(
"metric, sk_metric", [[Precision(average="micro"), precision_score], [Recall(average="micro"), recall_score]]
)
def test_bootstrap(sampling_strategy, metric, sk_metric):
"""Test that the different bootstraps gets updated as we expected and that the compute method works."""
_kwargs = {"base_metric": metric, "mean": True, "std": True, "raw": True, "sampling_strategy": sampling_strategy}
if _TORCH_GREATER_EQUAL_1_7:
_kwargs.update(dict(quantile=torch.tensor([0.05, 0.95])))
bootstrapper = TestBootStrapper(**_kwargs)
collected_preds = [[] for _ in range(10)]
collected_target = [[] for _ in range(10)]
for p, t in zip(_preds, _target):
bootstrapper.update(p, t)
for i, o in enumerate(bootstrapper.out):
collected_preds[i].append(o[0])
collected_target[i].append(o[1])
collected_preds = [torch.cat(cp) for cp in collected_preds]
collected_target = [torch.cat(ct) for ct in collected_target]
sk_scores = [sk_metric(ct, cp, average="micro") for ct, cp in zip(collected_target, collected_preds)]
output = bootstrapper.compute()
# quantile only avaible for pytorch v1.7 and forward
if _TORCH_GREATER_EQUAL_1_7:
assert np.allclose(output["quantile"][0], np.quantile(sk_scores, 0.05))
assert np.allclose(output["quantile"][1], np.quantile(sk_scores, 0.95))
assert np.allclose(output["mean"], np.mean(sk_scores))
assert np.allclose(output["std"], np.std(sk_scores, ddof=1))
assert np.allclose(output["raw"], sk_scores)
| [
"torch.cat",
"torch.randint",
"torch.tensor",
"torch.randn"
] | 1.3.1 | gagan3012/metrics | 5a2388ccaa97cc3608b1fa28879f77436434a6d6 |
1.8 | import scipy
from torch.nn import Dropout, CrossEntropyLoss
from code.abstract.abstract_torch_module import AbstractTorchModule
import torch
import numpy as np
from code.gnns.qa_gnn import QaGNN
from code.utils.evaluation.choice_model_output import ChoiceModelOutput
from code.utils.torch_utils.xavier_linear import XavierLinear
class QAModel(AbstractTorchModule):
n_edge_types = 4
def __init__(self, configuration):
AbstractTorchModule.__init__(self)
self.layers = configuration["model_parameters"]["gnn_layers"]
self.configuration = configuration
self.max_nodes = configuration["task"]["max_nodes"]
self.max_query_size = configuration["task"]["max_query_size"]
self.max_candidates = configuration["task"]["max_candidates"]
embedding_input_dim = 300
self.gcn = QaGNN(dim=512,
n_layers=self.layers,
n_relations=self.n_edge_types,
share_parameters=True)
self.node_compress_mlp = torch.nn.Sequential(XavierLinear(embedding_input_dim, 256),
torch.nn.Tanh(),
torch.nn.Dropout(p=0.2))
self.node_mlp = torch.nn.Sequential(XavierLinear(512, 1024),
torch.nn.Tanh(),
torch.nn.Dropout(p=0.2),
XavierLinear(1024, 512),
torch.nn.Tanh(),
torch.nn.Dropout(p=0.2))
# self.lstm = LSTM(3072, 256, 2, batch_first=True, bidirectional=True)
self.lstm1 = torch.nn.LSTM(embedding_input_dim, 256, num_layers=1, batch_first=True, bidirectional=True,
dropout=0)
self.lstm2 = torch.nn.LSTM(512, 128, num_layers=1, batch_first=True, bidirectional=True, dropout=0)
self.query_dropout = Dropout(p=0.2)
self.second_mlp = torch.nn.Sequential(XavierLinear(768, 128),
torch.nn.Tanh(),
XavierLinear(128, 1),
torch.nn.Dropout(p=0.2))
self.loss = CrossEntropyLoss(reduction="none")
def forward(self, batch):
processed_batch = self.process_batch(batch)
this_batch_max_nodes = max(processed_batch["nodes_length_mb"])
normalized_batch_adj_mats = torch.FloatTensor(processed_batch["adj_mb"]).to(self.device)[:, :,
:this_batch_max_nodes, :this_batch_max_nodes]
query = torch.FloatTensor(processed_batch["query_mb"]).to(self.device).view(len(batch), self.max_query_size, -1)
query_lengths = torch.LongTensor(processed_batch["query_length_mb"]).to(self.device)
packed_representation = torch.nn.utils.rnn.pack_padded_sequence(query, query_lengths.cpu(),
batch_first=True, enforce_sorted=False)
lstm1_output, _ = self.lstm1(packed_representation)
_, (query_lasthidden, _) = self.lstm2(lstm1_output)
final_output = query_lasthidden.transpose(1, 0).reshape(len(batch), -1)
final_output = self.query_dropout(final_output)
query_to_node = final_output.unsqueeze(1).repeat(1, this_batch_max_nodes, 1)
nodes = torch.FloatTensor(processed_batch["nodes_mb"]).to(self.device).view(len(batch), self.max_nodes, -1)[:,
:this_batch_max_nodes, :]
node_lengths = torch.LongTensor(processed_batch["nodes_length_mb"]).to(self.device)
node_mask = torch.arange(this_batch_max_nodes, dtype=torch.long).to(self.device).expand(node_lengths.shape[0],
this_batch_max_nodes) < node_lengths.unsqueeze(
1)
node_mask = node_mask.unsqueeze(-1).float()
nodes *= node_mask
query_to_node *= node_mask
nodes = self.node_compress_mlp(nodes)
nodes = torch.cat([query_to_node, nodes], -1)
nodes = self.node_mlp(nodes)
vertex_embeddings = self.gcn(nodes, normalized_batch_adj_mats, mask=node_mask)
vertex_embeddings = vertex_embeddings.view(len(batch), this_batch_max_nodes, -1)
final_vertex_embeddings = torch.cat([query_to_node, vertex_embeddings], -1)
final_vertex_embeddings = self.second_mlp(final_vertex_embeddings)
final_vertex_embeddings *= node_mask
bmask = torch.FloatTensor(processed_batch["bmask_mb"]).to(self.device)[:, :, :this_batch_max_nodes]
final_vertex_embeddings = final_vertex_embeddings.squeeze(-1).unsqueeze(1)
candidate_embeddings = bmask * final_vertex_embeddings
cand_unconnected = candidate_embeddings == 0
cand_n_connections = (1 - cand_unconnected.float()).sum(dim=-1)
cand_connected = torch.min(cand_n_connections, torch.ones_like(cand_n_connections))
candidate_embeddings = torch.where(cand_unconnected, torch.ones_like(candidate_embeddings) * -1e8,
candidate_embeddings)
candidate_embeddings, _ = torch.max(candidate_embeddings, dim=-1)
answers = torch.LongTensor(processed_batch["answer_positions_mb"]).to(self.device)
gold_candidate_connected = cand_connected[torch.arange(cand_connected.size(0)), answers]
# This is a bit hacky, might want to refactor.
# We only see negative targets at test time when the answer is not a mention, so we could actually skip
# computing the loss entirely in those cases.
loss_targets = torch.max(answers, torch.zeros_like(answers))
loss = (self.loss(candidate_embeddings, loss_targets) * gold_candidate_connected).mean()
scores = torch.softmax(candidate_embeddings, dim=-1).detach().cpu().numpy()
predictions = []
for i, example in enumerate(batch):
example_scores = scores[i]
example_gold = example["answer_position"]
example_output = ChoiceModelOutput(example_scores, example_gold)
predictions.append(example_output)
return loss, predictions
def get_gnn(self):
return self.gcn
def process_batch(self, data_mb):
answers_mb = [d["answer_position"] for d in data_mb]
id_mb = [d['id'] for d in data_mb]
candidates_orig_mb = [d['candidates_orig'] for d in data_mb]
candidates_orig_mb2 = [d['candidates_orig2'] for d in data_mb]
candidates_mb = [d['candidates'] for d in data_mb]
nodes_mb = np.array([np.pad(np.array([c.mean(0) for c in d['nodes_glove']]),
((0, self.max_nodes - len(d['nodes_candidates_id'])), (0, 0)),
mode='constant')
for d in data_mb])
query_mb = np.stack([np.pad(d['query_glove'],
((0, self.max_query_size - d['query_glove'].shape[0]), (0, 0)),
mode='constant')
for d in data_mb], 0)
nodes_length_mb = np.stack([len(d['nodes_candidates_id']) for d in data_mb], 0)
query_length_mb = np.stack([d['query_glove'].shape[0] for d in data_mb], 0)
adj_mb = []
for d in data_mb:
adj_ = []
if len(d['edges_in']) == 0:
adj_.append(np.zeros((self.max_nodes, self.max_nodes)))
else:
adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_in'])), np.array(d['edges_in']).T),
shape=(self.max_nodes, self.max_nodes)).toarray()
adj_.append(adj)
if len(d['edges_out']) == 0:
adj_.append(np.zeros((self.max_nodes, self.max_nodes)))
else:
adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_out'])), np.array(d['edges_out']).T),
shape=(self.max_nodes, self.max_nodes)).toarray()
adj_.append(adj)
if len(d['edges_coref']) == 0:
adj_.append(np.zeros((self.max_nodes, self.max_nodes)))
else:
adj = scipy.sparse.coo_matrix((np.ones(len(d['edges_coref'])), np.array(d['edges_coref']).T),
shape=(self.max_nodes, self.max_nodes)).toarray()
adj_.append(adj)
adj = np.pad(np.ones((len(d['nodes_candidates_id']), len(d['nodes_candidates_id']))),
((0, self.max_nodes - len(d['nodes_candidates_id'])),
(0, self.max_nodes - len(d['nodes_candidates_id']))), mode='constant') \
- adj_[0] - adj_[1] - adj_[2] - np.pad(np.eye(len(d['nodes_candidates_id'])),
((0, self.max_nodes - len(d['nodes_candidates_id'])),
(0, self.max_nodes - len(d['nodes_candidates_id']))),
mode='constant')
adj_.append(np.clip(adj, 0, 1))
adj = np.stack(adj_, 0)
d_ = adj.sum(-1)
d_[np.nonzero(d_)] **= -1
adj = adj * np.expand_dims(d_, -1)
adj_mb.append(adj)
adj_mb = np.array(adj_mb)
bmask_mb = np.array([np.pad(np.array([i == np.array(d['nodes_candidates_id'])
for i in range(len(d['candidates']))]),
((0, self.max_candidates - len(d['candidates'])),
(0, self.max_nodes - len(d['nodes_candidates_id']))), mode='constant')
for d in data_mb])
return {'id_mb': id_mb, 'nodes_mb': nodes_mb, 'nodes_length_mb': nodes_length_mb,
'query_mb': query_mb, 'query_length_mb': query_length_mb, 'bmask_mb': bmask_mb,
'adj_mb': adj_mb, 'candidates_mb': candidates_mb, 'candidates_orig_mb': candidates_orig_mb,
'candidates_orig_mb2': candidates_orig_mb2, "answer_positions_mb": answers_mb} | [
"torch.cat",
"torch.nn.LSTM",
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"torch.FloatTensor",
"torch.zeros_like",
"torch.max",
"torch.nn.Tanh",
"torch.nn.Dropout",
"torch.arange",
"torch.softmax",
"torch.ones_like"
] | 1.8.1 | S-Eggers/GraphMask | 9e431a541279801ec46a5b38ed57b2033f795240 |
1.6 | import sys
import os
sys.path.append(os.path.abspath(".."))
import cv2
import math
import torch
import kornia
import numpy as np
import torch.nn as nn
from numpy.fft import fft2, ifft2, fftshift
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms, utils
import matplotlib.pyplot as plt
from util.utils import *
from log_polar.log_polar import *
def phase_corr(a, b, device, logbase, trans=False):
# a: template; b: source
# imshow(a.squeeze(0).float())
G_a = torch.rfft(a, 2, onesided=False)
G_b = torch.rfft(b, 2, onesided=False)
eps = 1e-15
real_a = G_a[:, :, :, 0]
real_b = G_b[:, :, :, 0]
imag_a = G_a[:, :, :, 1]
imag_b = G_b[:, :, :, 1]
# compute a * b.conjugate; shape=[B,H,W,C]
R = torch.FloatTensor(G_a.shape[0], G_a.shape[1], G_a.shape[2],
2).to(device)
R[:, :, :, 0] = real_a * real_b + imag_a * imag_b
R[:, :, :, 1] = real_a * imag_b - real_b * imag_a
r0 = torch.sqrt(real_a**2 + imag_a**2 + eps) * torch.sqrt(real_b**2 +
imag_b**2 + eps)
R[:, :, :, 0] = R[:, :, :, 0].clone() / (r0 + eps).to(device)
R[:, :, :, 1] = R[:, :, :, 1].clone() / (r0 + eps).to(device)
r = torch.ifft(R, 2)
r_real = r[:, :, :, 0]
r_imag = r[:, :, :, 1]
r = torch.sqrt(r_real**2 + r_imag**2 + eps)
r = fftshift2d(r)
if trans:
r[:, 0:60, :] = 0.
r[:, G_a.shape[1] - 60:G_a.shape[1], :] = 0.
r[:, :, 0:60] = 0.
r[:, :, G_a.shape[1] - 60:G_a.shape[1]] = 0.
# imshow(r[0,:,:])
# plt.show()
angle_resize_out_tensor = torch.sum(r.clone(), 2, keepdim=False)
scale_reszie_out_tensor = torch.sum(r.clone(), 1, keepdim=False)
# get the argmax of the angle and the scale
angle_out_tensor = torch.argmax(angle_resize_out_tensor.clone().detach(),
dim=-1)
scale_out_tensor = torch.argmax(scale_reszie_out_tensor.clone().detach(),
dim=-1)
if not trans:
angle_out_tensor = angle_out_tensor * 180.00 / r.shape[1]
for batch_num in range(angle_out_tensor.shape[0]):
if angle_out_tensor[batch_num].item() > 90:
angle_out_tensor[batch_num] -= 90.00
else:
angle_out_tensor[batch_num] += 90.00
logbase = logbase.to(device)
# sca_f = scale_out_tensor.clone() * 256 / r.shape[2] - 256 // 2
scale_out_tensor = 1 / torch.pow(
logbase, scale_out_tensor.clone()) #logbase ** sca_f
return scale_out_tensor, angle_out_tensor, r, logbase
def highpass(shape):
"""Return highpass filter to be multiplied with fourier transform."""
i1 = torch.cos(torch.linspace(-np.pi / 2.0, np.pi / 2.0, shape[0]))
i2 = torch.cos(torch.linspace(-np.pi / 2.0, np.pi / 2.0, shape[1]))
x = torch.einsum('i,j->ij', i1, i2)
return (1.0 - x) * (1.0 - x)
def logpolar_filter(shape):
"""
Make a radial cosine filter for the logpolar transform.
This filter suppresses low frequencies and completely removes
the zero freq.
"""
yy = np.linspace(-np.pi / 2., np.pi / 2., shape[0])[:, np.newaxis]
xx = np.linspace(-np.pi / 2., np.pi / 2., shape[1])[np.newaxis, :]
# Supressing low spatial frequencies is a must when using log-polar
# transform. The scale stuff is poorly reflected with low freqs.
rads = np.sqrt(yy**2 + xx**2)
filt = 1.0 - np.cos(rads)**2
# vvv This doesn't really matter, very high freqs are not too usable anyway
filt[np.abs(rads) > np.pi / 2] = 1
filt = torch.from_numpy(filt)
return filt
class LogPolar(nn.Module):
def __init__(self, out_size, device):
super(LogPolar, self).__init__()
self.out_size = out_size
self.device = device
def forward(self, input):
return polar_transformer(input, self.out_size, self.device)
class PhaseCorr(nn.Module):
def __init__(self, device, logbase, trans=False):
super(PhaseCorr, self).__init__()
self.device = device
self.logbase = logbase
self.trans = trans
def forward(self, template, source):
return phase_corr(template,
source,
self.device,
self.logbase,
trans=self.trans)
| [
"torch.rfft",
"torch.sqrt",
"torch.einsum",
"torch.FloatTensor",
"torch.linspace",
"torch.from_numpy",
"torch.ifft"
] | 1.6.0 | wrld/PRoGAN | ef28d4b91b76dd7f9e7d466b007826491bce5080 |
1.8 | import logging
import warnings
from collections.abc import Iterable as IterableClass
from functools import partial
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
import pandas as pd
import torch
from anndata import AnnData
from scvi import REGISTRY_KEYS
from scvi._compat import Literal
from scvi._utils import _doc_params
from scvi.data._utils import _check_nonnegative_integers
from scvi.data.anndata import AnnDataManager
from scvi.data.anndata.fields import (
CategoricalJointObsField,
CategoricalObsField,
LayerField,
NumericalJointObsField,
ProteinObsmField,
)
from scvi.dataloaders import DataSplitter
from scvi.model._utils import (
_get_batch_code_from_category,
_init_library_size,
cite_seq_raw_counts_properties,
)
from scvi.model.base._utils import _de_core
from scvi.module import TOTALVAE
from scvi.train import AdversarialTrainingPlan, TrainRunner
from scvi.utils._docstrings import doc_differential_expression, setup_anndata_dsp
from .base import ArchesMixin, BaseModelClass, RNASeqMixin, VAEMixin
logger = logging.getLogger(__name__)
Number = TypeVar("Number", int, float)
class TOTALVI(RNASeqMixin, VAEMixin, ArchesMixin, BaseModelClass):
"""
total Variational Inference [GayosoSteier21]_.
Parameters
----------
adata
AnnData object that has been registered via :meth:`~scvi.model.TOTALVI.setup_anndata`.
n_latent
Dimensionality of the latent space.
gene_dispersion
One of the following:
* ``'gene'`` - genes_dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - genes_dispersion can differ between different batches
* ``'gene-label'`` - genes_dispersion can differ between different labels
protein_dispersion
One of the following:
* ``'protein'`` - protein_dispersion parameter is constant per protein across cells
* ``'protein-batch'`` - protein_dispersion can differ between different batches NOT TESTED
* ``'protein-label'`` - protein_dispersion can differ between different labels NOT TESTED
gene_likelihood
One of:
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
latent_distribution
One of:
* ``'normal'`` - Normal distribution
* ``'ln'`` - Logistic normal distribution (Normal(0, I) transformed by softmax)
empirical_protein_background_prior
Set the initialization of protein background prior empirically. This option fits a GMM for each of
100 cells per batch and averages the distributions. Note that even with this option set to `True`,
this only initializes a parameter that is learned during inference. If `False`, randomly initializes.
The default (`None`), sets this to `True` if greater than 10 proteins are used.
override_missing_proteins
If `True`, will not treat proteins with all 0 expression in a particular batch as missing.
**model_kwargs
Keyword args for :class:`~scvi.module.TOTALVAE`
Examples
--------
>>> adata = anndata.read_h5ad(path_to_anndata)
>>> scvi.model.TOTALVI.setup_anndata(adata, batch_key="batch", protein_expression_obsm_key="protein_expression")
>>> vae = scvi.model.TOTALVI(adata)
>>> vae.train()
>>> adata.obsm["X_totalVI"] = vae.get_latent_representation()
Notes
-----
See further usage examples in the following tutorials:
1. :doc:`/tutorials/notebooks/totalVI`
2. :doc:`/tutorials/notebooks/cite_scrna_integration_w_totalVI`
3. :doc:`/tutorials/notebooks/scarches_scvi_tools`
"""
def __init__(
self,
adata: AnnData,
n_latent: int = 20,
gene_dispersion: Literal[
"gene", "gene-batch", "gene-label", "gene-cell"
] = "gene",
protein_dispersion: Literal[
"protein", "protein-batch", "protein-label"
] = "protein",
gene_likelihood: Literal["zinb", "nb"] = "nb",
latent_distribution: Literal["normal", "ln"] = "normal",
empirical_protein_background_prior: Optional[bool] = None,
override_missing_proteins: bool = False,
**model_kwargs,
):
super(TOTALVI, self).__init__(adata)
self.protein_state_registry = self.adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
)
if (
ProteinObsmField.PROTEIN_BATCH_MASK in self.protein_state_registry
and not override_missing_proteins
):
batch_mask = self.protein_state_registry.protein_batch_mask
msg = (
"Some proteins have all 0 counts in some batches. "
+ "These proteins will be treated as missing measurements; however, "
+ "this can occur due to experimental design/biology. "
+ "Reinitialize the model with `override_missing_proteins=True`,"
+ "to override this behavior."
)
warnings.warn(msg, UserWarning)
self._use_adversarial_classifier = True
else:
batch_mask = None
self._use_adversarial_classifier = False
emp_prior = (
empirical_protein_background_prior
if empirical_protein_background_prior is not None
else (self.summary_stats.n_proteins > 10)
)
if emp_prior:
prior_mean, prior_scale = self._get_totalvi_protein_priors(adata)
else:
prior_mean, prior_scale = None, None
n_cats_per_cov = (
self.adata_manager.get_state_registry(REGISTRY_KEYS.CAT_COVS_KEY)[
CategoricalJointObsField.N_CATS_PER_KEY
]
if REGISTRY_KEYS.CAT_COVS_KEY in self.adata_manager.data_registry
else None
)
n_batch = self.summary_stats.n_batch
library_log_means, library_log_vars = _init_library_size(
self.adata_manager, n_batch
)
self.module = TOTALVAE(
n_input_genes=self.summary_stats.n_vars,
n_input_proteins=self.summary_stats.n_proteins,
n_batch=n_batch,
n_latent=n_latent,
n_continuous_cov=self.summary_stats.get("n_extra_continuous_covs", 0),
n_cats_per_cov=n_cats_per_cov,
gene_dispersion=gene_dispersion,
protein_dispersion=protein_dispersion,
gene_likelihood=gene_likelihood,
latent_distribution=latent_distribution,
protein_batch_mask=batch_mask,
protein_background_prior_mean=prior_mean,
protein_background_prior_scale=prior_scale,
library_log_means=library_log_means,
library_log_vars=library_log_vars,
**model_kwargs,
)
self._model_summary_string = (
"TotalVI Model with the following params: \nn_latent: {}, "
"gene_dispersion: {}, protein_dispersion: {}, gene_likelihood: {}, latent_distribution: {}"
).format(
n_latent,
gene_dispersion,
protein_dispersion,
gene_likelihood,
latent_distribution,
)
self.init_params_ = self._get_init_params(locals())
def train(
self,
max_epochs: Optional[int] = 400,
lr: float = 4e-3,
use_gpu: Optional[Union[str, int, bool]] = None,
train_size: float = 0.9,
validation_size: Optional[float] = None,
batch_size: int = 256,
early_stopping: bool = True,
check_val_every_n_epoch: Optional[int] = None,
reduce_lr_on_plateau: bool = True,
n_steps_kl_warmup: Union[int, None] = None,
n_epochs_kl_warmup: Union[int, None] = None,
adversarial_classifier: Optional[bool] = None,
plan_kwargs: Optional[dict] = None,
**kwargs,
):
"""
Trains the model using amortized variational inference.
Parameters
----------
max_epochs
Number of passes through the dataset.
lr
Learning rate for optimization.
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
train_size
Size of training set in the range [0.0, 1.0].
validation_size
Size of the test set. If `None`, defaults to 1 - `train_size`. If
`train_size + validation_size < 1`, the remaining cells belong to a test set.
batch_size
Minibatch size to use during training.
early_stopping
Whether to perform early stopping with respect to the validation set.
check_val_every_n_epoch
Check val every n train epochs. By default, val is not checked, unless `early_stopping` is `True`
or `reduce_lr_on_plateau` is `True`. If either of the latter conditions are met, val is checked
every epoch.
reduce_lr_on_plateau
Reduce learning rate on plateau of validation metric (default is ELBO).
n_steps_kl_warmup
Number of training steps (minibatches) to scale weight on KL divergences from 0 to 1.
Only activated when `n_epochs_kl_warmup` is set to None. If `None`, defaults
to `floor(0.75 * adata.n_obs)`.
n_epochs_kl_warmup
Number of epochs to scale weight on KL divergences from 0 to 1.
Overrides `n_steps_kl_warmup` when both are not `None`.
adversarial_classifier
Whether to use adversarial classifier in the latent space. This helps mixing when
there are missing proteins in any of the batches. Defaults to `True` is missing proteins
are detected.
plan_kwargs
Keyword args for :class:`~scvi.train.AdversarialTrainingPlan`. Keyword arguments passed to
`train()` will overwrite values present in `plan_kwargs`, when appropriate.
**kwargs
Other keyword args for :class:`~scvi.train.Trainer`.
"""
if adversarial_classifier is None:
adversarial_classifier = self._use_adversarial_classifier
n_steps_kl_warmup = (
n_steps_kl_warmup
if n_steps_kl_warmup is not None
else int(0.75 * self.adata.n_obs)
)
if reduce_lr_on_plateau:
check_val_every_n_epoch = 1
update_dict = {
"lr": lr,
"adversarial_classifier": adversarial_classifier,
"reduce_lr_on_plateau": reduce_lr_on_plateau,
"n_epochs_kl_warmup": n_epochs_kl_warmup,
"n_steps_kl_warmup": n_steps_kl_warmup,
"check_val_every_n_epoch": check_val_every_n_epoch,
}
if plan_kwargs is not None:
plan_kwargs.update(update_dict)
else:
plan_kwargs = update_dict
if max_epochs is None:
n_cells = self.adata.n_obs
max_epochs = np.min([round((20000 / n_cells) * 400), 400])
plan_kwargs = plan_kwargs if isinstance(plan_kwargs, dict) else dict()
data_splitter = DataSplitter(
self.adata_manager,
train_size=train_size,
validation_size=validation_size,
batch_size=batch_size,
use_gpu=use_gpu,
)
training_plan = AdversarialTrainingPlan(self.module, **plan_kwargs)
runner = TrainRunner(
self,
training_plan=training_plan,
data_splitter=data_splitter,
max_epochs=max_epochs,
use_gpu=use_gpu,
early_stopping=early_stopping,
**kwargs,
)
return runner()
@torch.no_grad()
def get_latent_library_size(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
give_mean: bool = True,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Returns the latent library size for each cell.
This is denoted as :math:`\ell_n` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
give_mean
Return the mean or a sample from the posterior distribution.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
self._check_if_trained(warn=False)
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
libraries = []
for tensors in post:
inference_inputs = self.module._get_inference_input(tensors)
outputs = self.module.inference(**inference_inputs)
if give_mean:
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
library = torch.exp(ql_m + 0.5 * ql_v)
else:
library = outputs["library_gene"]
libraries += [library.cpu()]
return torch.cat(libraries).numpy()
@torch.no_grad()
def get_normalized_expression(
self,
adata=None,
indices=None,
n_samples_overall: Optional[int] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
library_size: Optional[Union[float, Literal["latent"]]] = 1,
n_samples: int = 1,
sample_protein_mixing: bool = False,
scale_protein: bool = False,
include_protein_background: bool = False,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
) -> Tuple[Union[np.ndarray, pd.DataFrame], Union[np.ndarray, pd.DataFrame]]:
r"""
Returns the normalized gene expression and protein expression.
This is denoted as :math:`\rho_n` in the totalVI paper for genes, and TODO
for proteins, :math:`(1-\pi_{nt})\alpha_{nt}\beta_{nt}`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples_overall
Number of samples to use in total
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
gene_list
Return frequencies of expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
library_size
Scale the expression frequencies to a common library size.
This allows gene expression levels to be interpreted on a common scale of relevant
magnitude.
n_samples
Get sample scale from multiple samples.
sample_protein_mixing
Sample mixing bernoulli, setting background to zero
scale_protein
Make protein expression sum to 1
include_protein_background
Include background component for protein expression
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a `np.ndarray` instead of a `pd.DataFrame`. Includes gene
names as columns. If either n_samples=1 or return_mean=True, defaults to False.
Otherwise, it defaults to True.
Returns
-------
- **gene_normalized_expression** - normalized expression for RNA
- **protein_normalized_expression** - normalized expression for proteins
If ``n_samples`` > 1 and ``return_mean`` is False, then the shape is ``(samples, cells, genes)``.
Otherwise, shape is ``(cells, genes)``. Return type is ``pd.DataFrame`` unless ``return_numpy`` is True.
"""
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples_overall is not None:
indices = np.random.choice(indices, n_samples_overall)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = adata.var_names
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if indices is None:
indices = np.arange(adata.n_obs)
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(adata_manager, transform_batch)
scale_list_gene = []
scale_list_pro = []
for tensors in post:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
px_scale = torch.zeros_like(x)
py_scale = torch.zeros_like(y)
if n_samples > 1:
px_scale = torch.stack(n_samples * [px_scale])
py_scale = torch.stack(n_samples * [py_scale])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
if library_size == "latent":
px_scale += generative_outputs["px_"]["rate"].cpu()
else:
px_scale += generative_outputs["px_"]["scale"].cpu()
px_scale = px_scale[..., gene_mask]
py_ = generative_outputs["py_"]
# probability of background
protein_mixing = 1 / (1 + torch.exp(-py_["mixing"].cpu()))
if sample_protein_mixing is True:
protein_mixing = torch.distributions.Bernoulli(
protein_mixing
).sample()
protein_val = py_["rate_fore"].cpu() * (1 - protein_mixing)
if include_protein_background is True:
protein_val += py_["rate_back"].cpu() * protein_mixing
if scale_protein is True:
protein_val = torch.nn.functional.normalize(
protein_val, p=1, dim=-1
)
protein_val = protein_val[..., protein_mask]
py_scale += protein_val
px_scale /= len(transform_batch)
py_scale /= len(transform_batch)
scale_list_gene.append(px_scale)
scale_list_pro.append(py_scale)
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
scale_list_gene = torch.cat(scale_list_gene, dim=1)
scale_list_pro = torch.cat(scale_list_pro, dim=1)
# (cells, features, samples)
scale_list_gene = scale_list_gene.permute(1, 2, 0)
scale_list_pro = scale_list_pro.permute(1, 2, 0)
else:
scale_list_gene = torch.cat(scale_list_gene, dim=0)
scale_list_pro = torch.cat(scale_list_pro, dim=0)
if return_mean is True and n_samples > 1:
scale_list_gene = torch.mean(scale_list_gene, dim=-1)
scale_list_pro = torch.mean(scale_list_pro, dim=-1)
scale_list_gene = scale_list_gene.cpu().numpy()
scale_list_pro = scale_list_pro.cpu().numpy()
if return_numpy is None or return_numpy is False:
gene_df = pd.DataFrame(
scale_list_gene,
columns=adata.var_names[gene_mask],
index=adata.obs_names[indices],
)
protein_names = self.protein_state_registry.column_names
pro_df = pd.DataFrame(
scale_list_pro,
columns=protein_names[protein_mask],
index=adata.obs_names[indices],
)
return gene_df, pro_df
else:
return scale_list_gene, scale_list_pro
@torch.no_grad()
def get_protein_foreground_probability(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
protein_list: Optional[Sequence[str]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
return_mean: bool = True,
return_numpy: Optional[bool] = None,
):
r"""
Returns the foreground probability for proteins.
This is denoted as :math:`(1 - \pi_{nt})` in the totalVI paper.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
transform_batch
Batch to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- List[int], then average over batches in list
protein_list
Return protein expression for a subset of genes.
This can save memory when working with large datasets and few genes are
of interest.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
return_mean
Whether to return the mean of the samples.
return_numpy
Return a :class:`~numpy.ndarray` instead of a :class:`~pandas.DataFrame`. DataFrame includes
gene names as columns. If either `n_samples=1` or `return_mean=True`, defaults to `False`.
Otherwise, it defaults to `True`.
Returns
-------
- **foreground_probability** - probability foreground for each protein
If `n_samples` > 1 and `return_mean` is False, then the shape is `(samples, cells, genes)`.
Otherwise, shape is `(cells, genes)`. In this case, return type is :class:`~pandas.DataFrame` unless `return_numpy` is True.
"""
adata = self._validate_anndata(adata)
post = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
if n_samples > 1 and return_mean is False:
if return_numpy is False:
warnings.warn(
"return_numpy must be True if n_samples > 1 and return_mean is False, returning np.ndarray"
)
return_numpy = True
if indices is None:
indices = np.arange(adata.n_obs)
py_mixings = []
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.adata_manager, transform_batch
)
for tensors in post:
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
py_mixing = torch.zeros_like(y[..., protein_mask])
if n_samples > 1:
py_mixing = torch.stack(n_samples * [py_mixing])
for b in transform_batch:
generative_kwargs = dict(transform_batch=b)
inference_kwargs = dict(n_samples=n_samples)
_, generative_outputs = self.module.forward(
tensors=tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
py_mixing += torch.sigmoid(generative_outputs["py_"]["mixing"])[
..., protein_mask
].cpu()
py_mixing /= len(transform_batch)
py_mixings += [py_mixing]
if n_samples > 1:
# concatenate along batch dimension -> result shape = (samples, cells, features)
py_mixings = torch.cat(py_mixings, dim=1)
# (cells, features, samples)
py_mixings = py_mixings.permute(1, 2, 0)
else:
py_mixings = torch.cat(py_mixings, dim=0)
if return_mean is True and n_samples > 1:
py_mixings = torch.mean(py_mixings, dim=-1)
py_mixings = py_mixings.cpu().numpy()
if return_numpy is True:
return 1 - py_mixings
else:
pro_names = self.protein_state_registry.column_names
foreground_prob = pd.DataFrame(
1 - py_mixings,
columns=pro_names[protein_mask],
index=adata.obs_names[indices],
)
return foreground_prob
def _expression_for_de(
self,
adata=None,
indices=None,
n_samples_overall=None,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
scale_protein=False,
batch_size: Optional[int] = None,
sample_protein_mixing=False,
include_protein_background=False,
protein_prior_count=0.5,
):
rna, protein = self.get_normalized_expression(
adata=adata,
indices=indices,
n_samples_overall=n_samples_overall,
transform_batch=transform_batch,
return_numpy=True,
n_samples=1,
batch_size=batch_size,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
)
protein += protein_prior_count
joint = np.concatenate([rna, protein], axis=1)
return joint
@_doc_params(
doc_differential_expression=doc_differential_expression,
)
def differential_expression(
self,
adata: Optional[AnnData] = None,
groupby: Optional[str] = None,
group1: Optional[Iterable[str]] = None,
group2: Optional[str] = None,
idx1: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
idx2: Optional[Union[Sequence[int], Sequence[bool], str]] = None,
mode: Literal["vanilla", "change"] = "change",
delta: float = 0.25,
batch_size: Optional[int] = None,
all_stats: bool = True,
batch_correction: bool = False,
batchid1: Optional[Iterable[str]] = None,
batchid2: Optional[Iterable[str]] = None,
fdr_target: float = 0.05,
silent: bool = False,
protein_prior_count: float = 0.1,
scale_protein: bool = False,
sample_protein_mixing: bool = False,
include_protein_background: bool = False,
**kwargs,
) -> pd.DataFrame:
r"""
A unified method for differential expression analysis.
Implements `"vanilla"` DE [Lopez18]_ and `"change"` mode DE [Boyeau19]_.
Parameters
----------
{doc_differential_expression}
protein_prior_count
Prior count added to protein expression before LFC computation
scale_protein
Force protein values to sum to one in every single cell (post-hoc normalization)
sample_protein_mixing
Sample the protein mixture component, i.e., use the parameter to sample a Bernoulli
that determines if expression is from foreground/background.
include_protein_background
Include the protein background component as part of the protein expression
**kwargs
Keyword args for :meth:`scvi.model.base.DifferentialComputation.get_bayes_factors`
Returns
-------
Differential expression DataFrame.
"""
adata = self._validate_anndata(adata)
model_fn = partial(
self._expression_for_de,
scale_protein=scale_protein,
sample_protein_mixing=sample_protein_mixing,
include_protein_background=include_protein_background,
protein_prior_count=protein_prior_count,
batch_size=batch_size,
)
col_names = np.concatenate(
[
np.asarray(adata.var_names),
self.protein_state_registry.column_names,
]
)
result = _de_core(
self.get_anndata_manager(adata, required=True),
model_fn,
groupby,
group1,
group2,
idx1,
idx2,
all_stats,
cite_seq_raw_counts_properties,
col_names,
mode,
batchid1,
batchid2,
delta,
batch_correction,
fdr_target,
silent,
**kwargs,
)
return result
@torch.no_grad()
def posterior_predictive_sample(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: int = 1,
batch_size: Optional[int] = None,
gene_list: Optional[Sequence[str]] = None,
protein_list: Optional[Sequence[str]] = None,
) -> np.ndarray:
r"""
Generate observation samples from the posterior predictive distribution.
The posterior predictive distribution is written as :math:`p(\hat{x}, \hat{y} \mid x, y)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of required samples for each cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
gene_list
Names of genes of interest
protein_list
Names of proteins of interest
Returns
-------
x_new : :class:`~numpy.ndarray`
tensor with shape (n_cells, n_genes, n_samples)
"""
if self.module.gene_likelihood not in ["nb"]:
raise ValueError("Invalid gene_likelihood")
adata = self._validate_anndata(adata)
if gene_list is None:
gene_mask = slice(None)
else:
all_genes = adata.var_names
gene_mask = [True if gene in gene_list else False for gene in all_genes]
if protein_list is None:
protein_mask = slice(None)
else:
all_proteins = self.protein_state_registry.column_names
protein_mask = [True if p in protein_list else False for p in all_proteins]
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
rna_sample, protein_sample = self.module.sample(
tensors, n_samples=n_samples
)
rna_sample = rna_sample[..., gene_mask]
protein_sample = protein_sample[..., protein_mask]
data = torch.cat([rna_sample, protein_sample], dim=-1).numpy()
scdl_list += [data]
if n_samples > 1:
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
scdl_list = np.concatenate(scdl_list, axis=0)
return scdl_list
@torch.no_grad()
def _get_denoised_samples(
self,
adata=None,
indices=None,
n_samples: int = 25,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[int] = None,
) -> np.ndarray:
"""
Return samples from an adjusted posterior predictive.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
indices of `adata` to use
n_samples
How may samples per cell
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
int of which batch to condition on for all cells
"""
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
scdl_list = []
for tensors in scdl:
x = tensors[REGISTRY_KEYS.X_KEY]
y = tensors[REGISTRY_KEYS.PROTEIN_EXP_KEY]
generative_kwargs = dict(transform_batch=transform_batch)
inference_kwargs = dict(n_samples=n_samples)
with torch.no_grad():
inference_outputs, generative_outputs, = self.module.forward(
tensors,
inference_kwargs=inference_kwargs,
generative_kwargs=generative_kwargs,
compute_loss=False,
)
px_ = generative_outputs["px_"]
py_ = generative_outputs["py_"]
device = px_["r"].device
pi = 1 / (1 + torch.exp(-py_["mixing"]))
mixing_sample = torch.distributions.Bernoulli(pi).sample()
protein_rate = py_["rate_fore"]
rate = torch.cat((rna_size_factor * px_["scale"], protein_rate), dim=-1)
if len(px_["r"].size()) == 2:
px_dispersion = px_["r"]
else:
px_dispersion = torch.ones_like(x).to(device) * px_["r"]
if len(py_["r"].size()) == 2:
py_dispersion = py_["r"]
else:
py_dispersion = torch.ones_like(y).to(device) * py_["r"]
dispersion = torch.cat((px_dispersion, py_dispersion), dim=-1)
# This gamma is really l*w using scVI manuscript notation
p = rate / (rate + dispersion)
r = dispersion
l_train = torch.distributions.Gamma(r, (1 - p) / p).sample()
data = l_train.cpu().numpy()
# make background 0
data[:, :, self.adata.shape[1] :] = (
data[:, :, self.adata.shape[1] :] * (1 - mixing_sample).cpu().numpy()
)
scdl_list += [data]
scdl_list[-1] = np.transpose(scdl_list[-1], (1, 2, 0))
return np.concatenate(scdl_list, axis=0)
@torch.no_grad()
def get_feature_correlation_matrix(
self,
adata=None,
indices=None,
n_samples: int = 10,
batch_size: int = 64,
rna_size_factor: int = 1000,
transform_batch: Optional[Sequence[Union[Number, str]]] = None,
correlation_type: Literal["spearman", "pearson"] = "spearman",
log_transform: bool = False,
) -> pd.DataFrame:
"""
Generate gene-gene correlation matrix using scvi uncertainty and expression.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
rna_size_factor
size factor for RNA prior to sampling gamma distribution
transform_batch
Batches to condition on.
If transform_batch is:
- None, then real observed batch is used
- int, then batch transform_batch is used
- list of int, then values are averaged over provided batches.
correlation_type
One of "pearson", "spearman".
log_transform
Whether to log transform denoised values prior to correlation calculation.
Returns
-------
Gene-protein-gene-protein correlation matrix
"""
from scipy.stats import spearmanr
adata = self._validate_anndata(adata)
if not isinstance(transform_batch, IterableClass):
transform_batch = [transform_batch]
transform_batch = _get_batch_code_from_category(
self.get_anndata_manager(adata, required=True), transform_batch
)
corr_mats = []
for b in transform_batch:
denoised_data = self._get_denoised_samples(
n_samples=n_samples,
batch_size=batch_size,
rna_size_factor=rna_size_factor,
transform_batch=b,
)
flattened = np.zeros(
(denoised_data.shape[0] * n_samples, denoised_data.shape[1])
)
for i in range(n_samples):
flattened[
denoised_data.shape[0] * (i) : denoised_data.shape[0] * (i + 1)
] = denoised_data[:, :, i]
if log_transform is True:
flattened[:, : self.n_genes] = np.log(
flattened[:, : self.n_genes] + 1e-8
)
flattened[:, self.n_genes :] = np.log1p(flattened[:, self.n_genes :])
if correlation_type == "pearson":
corr_matrix = np.corrcoef(flattened, rowvar=False)
else:
corr_matrix, _ = spearmanr(flattened, axis=0)
corr_mats.append(corr_matrix)
corr_matrix = np.mean(np.stack(corr_mats), axis=0)
var_names = adata.var_names
names = np.concatenate(
[
np.asarray(var_names),
self.protein_state_registry.column_names,
]
)
return pd.DataFrame(corr_matrix, index=names, columns=names)
@torch.no_grad()
def get_likelihood_parameters(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
n_samples: Optional[int] = 1,
give_mean: Optional[bool] = False,
batch_size: Optional[int] = None,
) -> Dict[str, np.ndarray]:
r"""
Estimates for the parameters of the likelihood :math:`p(x, y \mid z)`.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
n_samples
Number of posterior samples to use for estimation.
give_mean
Return expected value of parameters or a samples
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
"""
raise NotImplementedError
def _validate_anndata(
self, adata: Optional[AnnData] = None, copy_if_view: bool = True
):
adata = super()._validate_anndata(adata=adata, copy_if_view=copy_if_view)
error_msg = "Number of {} in anndata different from when setup_anndata was run. Please rerun setup_anndata."
if REGISTRY_KEYS.PROTEIN_EXP_KEY in self.adata_manager.data_registry.keys():
pro_exp = self.get_from_registry(adata, REGISTRY_KEYS.PROTEIN_EXP_KEY)
if self.summary_stats.n_proteins != pro_exp.shape[1]:
raise ValueError(error_msg.format("proteins"))
is_nonneg_int = _check_nonnegative_integers(pro_exp)
if not is_nonneg_int:
warnings.warn(
"Make sure the registered protein expression in anndata contains unnormalized count data."
)
else:
raise ValueError("No protein data found, please setup or transfer anndata")
return adata
def _get_totalvi_protein_priors(self, adata, n_cells=100):
"""Compute an empirical prior for protein background."""
import warnings
from sklearn.exceptions import ConvergenceWarning
from sklearn.mixture import GaussianMixture
warnings.filterwarnings("error")
logger.info("Computing empirical prior initialization for protein background.")
adata = self._validate_anndata(adata)
adata_manager = self.get_anndata_manager(adata)
pro_exp = adata_manager.get_from_registry(REGISTRY_KEYS.PROTEIN_EXP_KEY)
pro_exp = pro_exp.to_numpy() if isinstance(pro_exp, pd.DataFrame) else pro_exp
batch_mask = adata_manager.get_state_registry(
REGISTRY_KEYS.PROTEIN_EXP_KEY
).get(ProteinObsmField.PROTEIN_BATCH_MASK)
batch = adata_manager.get_from_registry(REGISTRY_KEYS.BATCH_KEY).ravel()
cats = adata_manager.get_state_registry(REGISTRY_KEYS.BATCH_KEY)[
CategoricalObsField.CATEGORICAL_MAPPING_KEY
]
codes = np.arange(len(cats))
batch_avg_mus, batch_avg_scales = [], []
for b in np.unique(codes):
# can happen during online updates
# the values of these batches will not be used
num_in_batch = np.sum(batch == b)
if num_in_batch == 0:
batch_avg_mus.append(0)
batch_avg_scales.append(1)
continue
batch_pro_exp = pro_exp[batch == b]
# non missing
if batch_mask is not None:
batch_pro_exp = batch_pro_exp[:, batch_mask[b]]
if batch_pro_exp.shape[1] < 5:
logger.debug(
f"Batch {b} has too few proteins to set prior, setting randomly."
)
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
# a batch is missing because it's in the reference but not query data
# for scarches case, these values will be replaced by original state dict
if batch_pro_exp.shape[0] == 0:
batch_avg_mus.append(0.0)
batch_avg_scales.append(0.05)
continue
cells = np.random.choice(np.arange(batch_pro_exp.shape[0]), size=n_cells)
batch_pro_exp = batch_pro_exp[cells]
gmm = GaussianMixture(n_components=2)
mus, scales = [], []
# fit per cell GMM
for c in batch_pro_exp:
try:
gmm.fit(np.log1p(c.reshape(-1, 1)))
# when cell is all 0
except ConvergenceWarning:
mus.append(0)
scales.append(0.05)
continue
means = gmm.means_.ravel()
sorted_fg_bg = np.argsort(means)
mu = means[sorted_fg_bg].ravel()[0]
covariances = gmm.covariances_[sorted_fg_bg].ravel()[0]
scale = np.sqrt(covariances)
mus.append(mu)
scales.append(scale)
# average distribution over cells
batch_avg_mu = np.mean(mus)
batch_avg_scale = np.sqrt(np.sum(np.square(scales)) / (n_cells**2))
batch_avg_mus.append(batch_avg_mu)
batch_avg_scales.append(batch_avg_scale)
# repeat prior for each protein
batch_avg_mus = np.array(batch_avg_mus, dtype=np.float32).reshape(1, -1)
batch_avg_scales = np.array(batch_avg_scales, dtype=np.float32).reshape(1, -1)
batch_avg_mus = np.tile(batch_avg_mus, (pro_exp.shape[1], 1))
batch_avg_scales = np.tile(batch_avg_scales, (pro_exp.shape[1], 1))
warnings.resetwarnings()
return batch_avg_mus, batch_avg_scales
@torch.no_grad()
def get_protein_background_mean(self, adata, indices, batch_size):
adata = self._validate_anndata(adata)
scdl = self._make_data_loader(
adata=adata, indices=indices, batch_size=batch_size
)
background_mean = []
for tensors in scdl:
_, inference_outputs, _ = self.module.forward(tensors)
b_mean = inference_outputs["py_"]["rate_back"]
background_mean += [b_mean.cpu().numpy()]
return np.concatenate(background_mean)
@classmethod
@setup_anndata_dsp.dedent
def setup_anndata(
cls,
adata: AnnData,
protein_expression_obsm_key: str,
protein_names_uns_key: Optional[str] = None,
batch_key: Optional[str] = None,
layer: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
**kwargs,
) -> Optional[AnnData]:
"""
%(summary)s.
Parameters
----------
%(param_adata)s
protein_expression_obsm_key
key in `adata.obsm` for protein expression data.
protein_names_uns_key
key in `adata.uns` for protein names. If None, will use the column names of `adata.obsm[protein_expression_obsm_key]`
if it is a DataFrame, else will assign sequential names to proteins.
%(param_batch_key)s
%(param_layer)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
%(param_copy)s
Returns
-------
%(returns)s
"""
setup_method_args = cls._get_setup_method_args(**locals())
batch_field = CategoricalObsField(REGISTRY_KEYS.BATCH_KEY, batch_key)
anndata_fields = [
LayerField(REGISTRY_KEYS.X_KEY, layer, is_count_data=True),
CategoricalObsField(
REGISTRY_KEYS.LABELS_KEY, None
), # Default labels field for compatibility with TOTALVAE
batch_field,
CategoricalJointObsField(
REGISTRY_KEYS.CAT_COVS_KEY, categorical_covariate_keys
),
NumericalJointObsField(
REGISTRY_KEYS.CONT_COVS_KEY, continuous_covariate_keys
),
ProteinObsmField(
REGISTRY_KEYS.PROTEIN_EXP_KEY,
protein_expression_obsm_key,
use_batch_mask=True,
batch_key=batch_field.attr_key,
colnames_uns_key=protein_names_uns_key,
is_count_data=True,
),
]
adata_manager = AnnDataManager(
fields=anndata_fields, setup_method_args=setup_method_args
)
adata_manager.register_fields(adata, **kwargs)
cls.register_manager(adata_manager)
| [
"torch.cat",
"torch.stack",
"torch.distributions.Bernoulli",
"torch.exp",
"torch.sigmoid",
"torch.zeros_like",
"torch.nn.functional.normalize",
"torch.distributions.Gamma",
"torch.no_grad",
"torch.ones_like",
"torch.mean"
] | 1.8.0 | Semih-Kurt/scvi-tools | 1bea2af8cc99e11d55a6925f09d978de5f6994fb |
1.8 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import numpy as np
from nni.compression.pytorch.utils.counter import count_flops_params
from mobilenet import MobileNet
from mobilenet_v2 import MobileNetV2
def create_model(model_type=None, n_classes=120, input_size=224, checkpoint=None, pretrained=False, width_mult=1.):
if model_type == 'mobilenet_v1':
model = MobileNet(n_class=n_classes, profile='normal')
elif model_type == 'mobilenet_v2':
model = MobileNetV2(n_class=n_classes, input_size=input_size, width_mult=width_mult)
elif model_type == 'mobilenet_v2_torchhub':
model = torch.hub.load('pytorch/vision:v0.8.1', 'mobilenet_v2', pretrained=pretrained)
# model = torch.hub.load('pytorch/vision:v0.10.0', 'mobilenet_v2', pretrained=pretrained)
feature_size = model.classifier[1].weight.data.size()[1]
replace_classifier = torch.nn.Linear(feature_size, n_classes)
model.classifier[1] = replace_classifier
elif model_type is None:
model = None
else:
raise RuntimeError('Unknown model_type.')
if checkpoint is not None:
model.load_state_dict(torch.load(checkpoint))
return model
class TrainDataset(Dataset):
def __init__(self, npy_dir):
self.root_dir = npy_dir
self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)]
transform_set = [transforms.Lambda(lambda x: x),
transforms.RandomRotation(30),
# transforms.RandomPerspective(),
transforms.ColorJitter(),
transforms.RandomHorizontalFlip(p=1)]
self.transform = transforms.RandomChoice(transform_set)
# self.transform = transforms.AutoAugment(transforms.AutoAugmentPolicy.IMAGENET)
def __len__(self):
return len(self.case_names)
def __getitem__(self, index):
instance = np.load(self.case_names[index], allow_pickle=True).item()
x = instance['input'].transpose(2, 0, 1) # (C, H, W)
x = torch.from_numpy(x).type(torch.float)#.type(torch.uint8) # convert to Tensor to use torchvision.transforms
x = self.transform(x)
return x, instance['label']
class EvalDataset(Dataset):
def __init__(self, npy_dir):
self.root_dir = npy_dir
self.case_names = [self.root_dir + '/' + x for x in os.listdir(self.root_dir)]
def __len__(self):
return len(self.case_names)
def __getitem__(self, index):
instance = np.load(self.case_names[index], allow_pickle=True).item()
x = instance['input'].transpose(2, 0, 1)
x = torch.from_numpy(x).type(torch.float) #.type(torch.uint8)
return x, instance['label']
def count_flops(model, log=None):
dummy_input = torch.rand([1, 3, 256, 256])
flops, params, results = count_flops_params(model, dummy_input)
print(f"FLOPs: {flops}, params: {params}")
if log is not None:
log.write(f"FLOPs: {flops}, params: {params}\n")
return flops, params
| [
"torch.nn.Linear",
"torch.rand",
"torch.from_numpy",
"torch.load",
"torch.hub.load"
] | 1.8.1 | xiaowu0162/mobilenet_compression | a04fa087ac84b0918fb49ef77bf8439d02cbcf1f |
1.4 | import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm
from neuralprophet import configure
from neuralprophet import time_net
from neuralprophet import time_dataset
from neuralprophet import df_utils
from neuralprophet import utils
from neuralprophet.plot_forecast import plot, plot_components
from neuralprophet.plot_model_parameters import plot_parameters
from neuralprophet import metrics
log = logging.getLogger("NP.forecaster")
METRICS = {
"mae": metrics.MAE,
"mse": metrics.MSE,
"rmse": metrics.RMSE,
}
class NeuralProphet:
"""NeuralProphet forecaster.
A simple yet powerful forecaster that models:
Trend, seasonality, events, holidays, auto-regression, lagged covariates, and future-known regressors.
Can be regualrized and configured to model nonlinear relationships.
Parameters
----------
COMMENT
Trend Config
COMMENT
growth : {'off' or 'linear'}, default 'linear'
Set use of trend growth type.
Options:
* ``off``: no trend.
* (default) ``linear``: fits a piece-wise linear trend with ``n_changepoints + 1`` segments
* ``discontinuous``: For advanced users only - not a conventional trend,
allows arbitrary jumps at each trend changepoint
changepoints : {list of str, list of np.datetimes or np.array of np.datetimes}, optional
Manually set dates at which to include potential changepoints.
Note
----
Does not accept ``np.array`` of ``np.str``. If not specified, potential changepoints are selected automatically.
n_changepoints : int
Number of potential trend changepoints to include.
Note
----
Changepoints are selected uniformly from the first ``changepoint_range`` proportion of the history.
Ignored if manual ``changepoints`` list is supplied.
changepoints_range : float
Proportion of history in which trend changepoints will be estimated.
e.g. set to 0.8 to allow changepoints only in the first 80% of training data.
Ignored if manual ``changepoints`` list is supplied.
trend_reg : float, optional
Parameter modulating the flexibility of the automatic changepoint selection.
Note
----
Large values (~1-100) will limit the variability of changepoints.
Small values (~0.001-1.0) will allow changepoints to change faster.
default: 0 will fully fit a trend to each segment.
trend_reg_threshold : bool, optional
Allowance for trend to change without regularization.
Options
* ``True``: Automatically set to a value that leads to a smooth trend.
* (default) ``False``: All changes in changepoints are regularized
COMMENT
Seasonality Config
COMMENT
yearly_seasonality : bool, int
Fit yearly seasonality.
Options
* ``True`` or ``False``
* ``auto``: set automatically
* ``value``: number of Fourier/linear terms to generate
weekly_seasonality : bool, int
Fit monthly seasonality.
Options
* ``True`` or ``False``
* ``auto``: set automatically
* ``value``: number of Fourier/linear terms to generate
daily_seasonality : bool, int
Fit daily seasonality.
Options
* ``True`` or ``False``
* ``auto``: set automatically
* ``value``: number of Fourier/linear terms to generate
seasonality_mode : str
Specifies mode of seasonality
Options
* (default) ``additive``
* ``multiplicative``
seasonality_reg : float, optional
Parameter modulating the strength of the seasonality model.
Note
----
Smaller values (~0.1-1) allow the model to fit larger seasonal fluctuations,
larger values (~1-100) dampen the seasonality.
default: None, no regularization
COMMENT
AR Config
COMMENT
n_lags : int
Previous time series steps to include in auto-regression. Aka AR-order
ar_reg : float, optional
how much sparsity to enduce in the AR-coefficients
Note
----
Large values (~1-100) will limit the number of nonzero coefficients dramatically.
Small values (~0.001-1.0) will allow more non-zero coefficients.
default: 0 no regularization of coefficients.
COMMENT
Model Config
COMMENT
n_forecasts : int
Number of steps ahead of prediction time step to forecast.
num_hidden_layers : int, optional
number of hidden layer to include in AR-Net (defaults to 0)
d_hidden : int, optional
dimension of hidden layers of the AR-Net. Ignored if ``num_hidden_layers`` == 0.
COMMENT
Train Config
COMMENT
learning_rate : float
Maximum learning rate setting for 1cycle policy scheduler.
Note
----
Default ``None``: Automatically sets the ``learning_rate`` based on a learning rate range test.
For manual user input, (try values ~0.001-10).
epochs : int
Number of epochs (complete iterations over dataset) to train model.
Note
----
Default ``None``: Automatically sets the number of epochs based on dataset size.
For best results also leave batch_size to None. For manual values, try ~5-500.
batch_size : int
Number of samples per mini-batch.
If not provided, ``batch_size`` is approximated based on dataset size.
For manual values, try ~8-1024.
For best results also leave ``epochs`` to ``None``.
newer_samples_weight: float, default 2.0
Sets factor by which the model fit is skewed towards more recent observations.
Controls the factor by which final samples are weighted more compared to initial samples.
Applies a positional weighting to each sample's loss value.
e.g. ``newer_samples_weight = 2``: final samples are weighted twice as much as initial samples.
newer_samples_start: float, default 0.0
Sets beginning of 'newer' samples as fraction of training data.
Throughout the range of 'newer' samples, the weight is increased
from ``1.0/newer_samples_weight`` initially to 1.0 at the end,
in a monotonously increasing function (cosine from pi to 2*pi).
loss_func : str, torch.nn.functional.loss
Type of loss to use:
Options
* (default) ``Huber``: Huber loss function
* ``MSE``: Mean Squared Error loss function
* ``MAE``: Mean Absolute Error loss function
* ``torch.nn.functional.loss.``: loss or callable for custom loss, eg. L1-Loss
Examples
--------
>>> from neuralprophet import NeuralProphet
>>> import torch
>>> import torch.nn as nn
>>> m = NeuralProphet(loss_func=torch.nn.L1Loss)
collect_metrics : list of str, bool
Set metrics to compute.
Valid: [``mae``, ``rmse``, ``mse``]
Options
* (default) ``True``: [``mae``, ``rmse``]
* ``False``: No metrics
COMMENT
Missing Data
COMMENT
impute_missing : bool
whether to automatically impute missing dates/values
Note
----
imputation follows a linear method up to 10 missing values, more are filled with trend.
COMMENT
Data Normalization
COMMENT
normalize : str
Type of normalization to apply to the time series.
Options
* ``off`` bypasses data normalization
* (default, binary timeseries) ``minmax`` scales the minimum value to 0.0 and the maximum value to 1.0
* ``standardize`` zero-centers and divides by the standard deviation
* (default) ``soft`` scales the minimum value to 0.0 and the 95th quantile to 1.0
* ``soft1`` scales the minimum value to 0.1 and the 90th quantile to 0.9
global_normalization : bool
Activation of global normalization
Options
* ``True``: dict of dataframes is used as global_time_normalization
* (default) ``False``: local normalization
global_time_normalization (bool):
Specifies global time normalization
Options
* (default) ``True``: only valid in case of global modeling local normalization
* ``False``: set time data_params locally
unknown_data_normalization : bool
Specifies unknown data normalization
Options
* ``True``: test data is normalized with global data params even if trained with local data params (global modeling with local normalization)
* (default) ``False``: no global modeling with local normalization
"""
def __init__(
self,
growth="linear",
changepoints=None,
n_changepoints=10,
changepoints_range=0.9,
trend_reg=0,
trend_reg_threshold=False,
yearly_seasonality="auto",
weekly_seasonality="auto",
daily_seasonality="auto",
seasonality_mode="additive",
seasonality_reg=0,
n_forecasts=1,
n_lags=0,
num_hidden_layers=0,
d_hidden=None,
ar_reg=None,
learning_rate=None,
epochs=None,
batch_size=None,
loss_func="Huber",
optimizer="AdamW",
newer_samples_weight=2,
newer_samples_start=0.0,
impute_missing=True,
collect_metrics=True,
normalize="auto",
global_normalization=False,
global_time_normalization=True,
unknown_data_normalization=False,
):
kwargs = locals()
# General
self.name = "NeuralProphet"
self.n_forecasts = n_forecasts
# Data Normalization settings
self.config_normalization = configure.Normalization(
normalize=normalize,
global_normalization=global_normalization,
global_time_normalization=global_time_normalization,
unknown_data_normalization=unknown_data_normalization,
)
# Missing Data Preprocessing
self.impute_missing = impute_missing
self.impute_limit_linear = 5
self.impute_rolling = 20
# Training
self.config_train = configure.from_kwargs(configure.Train, kwargs)
if collect_metrics is None:
collect_metrics = []
elif collect_metrics is True:
collect_metrics = ["mae", "rmse"]
elif isinstance(collect_metrics, str):
if not collect_metrics.lower() in METRICS.keys():
raise ValueError("Received unsupported argument for collect_metrics.")
collect_metrics = [collect_metrics]
elif isinstance(collect_metrics, list):
if not all([m.lower() in METRICS.keys() for m in collect_metrics]):
raise ValueError("Received unsupported argument for collect_metrics.")
elif collect_metrics is not False:
raise ValueError("Received unsupported argument for collect_metrics.")
self.metrics = None
if isinstance(collect_metrics, list):
self.metrics = metrics.MetricsCollection(
metrics=[metrics.LossMetric(self.config_train.loss_func)]
+ [METRICS[m.lower()]() for m in collect_metrics],
value_metrics=[metrics.ValueMetric("RegLoss")],
)
# AR
self.config_ar = configure.from_kwargs(configure.AR, kwargs)
self.n_lags = self.config_ar.n_lags
self.max_lags = self.n_lags
# Model
self.config_model = configure.from_kwargs(configure.Model, kwargs)
# Trend
self.config_trend = configure.from_kwargs(configure.Trend, kwargs)
# Seasonality
self.season_config = configure.AllSeason(
mode=seasonality_mode,
reg_lambda=seasonality_reg,
yearly_arg=yearly_seasonality,
weekly_arg=weekly_seasonality,
daily_arg=daily_seasonality,
)
self.config_train.reg_lambda_season = self.season_config.reg_lambda
# Events
self.events_config = None
self.country_holidays_config = None
# Extra Regressors
self.config_covar = None
self.regressors_config = None
# set during fit()
self.data_freq = None
# Set during _train()
self.fitted = False
self.data_params = None
self.optimizer = None
self.scheduler = None
self.model = None
# set during prediction
self.future_periods = None
# later set by user (optional)
self.highlight_forecast_step_n = None
self.true_ar_weights = None
def add_lagged_regressor(
self,
names,
n_lags="auto",
regularization=None,
normalize="auto",
):
"""Add a covariate or list of covariate time series as additional lagged regressors to be used for fitting and predicting.
The dataframe passed to ``fit`` and ``predict`` will have the column with the specified name to be used as
lagged regressor. When normalize=True, the covariate will be normalized unless it is binary.
Parameters
----------
names : string or list
name of the regressor/list of regressors.
n_lags : int
previous regressors time steps to use as input in the predictor (covar order)
if ``auto``, time steps will be equivalent to the AR order (default)
if ``scalar``, all the regressors will only use last known value as input
regularization : float
optional scale for regularization strength
normalize : bool
optional, specify whether this regressor will benormalized prior to fitting.
if ``auto``, binary regressors will not be normalized.
"""
if n_lags == 0 or n_lags is None:
n_lags = 0
log.warning(
"Please, set n_lags to a value greater than 0 or to the options 'scalar' or 'auto'. No lags will be added to regressors when n_lags = 0 or n_lags is None"
)
if n_lags == "auto":
if self.n_lags is not None and self.n_lags > 0:
n_lags = self.n_lags
log.info(
"n_lags = 'auto', number of lags for regressor is set to Autoregression number of lags ({})".format(
self.n_lags
)
)
else:
n_lags = 1
log.info(
"n_lags = 'auto', but there is no lags for Autoregression. Number of lags for regressor is automatically set to 1"
)
if n_lags == "scalar":
n_lags = 1
log.info("n_lags = 'scalar', number of lags for regressor is set to 1")
only_last_value = False if n_lags > 1 else True
if self.fitted:
raise Exception("Regressors must be added prior to model fitting.")
if not isinstance(names, list):
names = [names]
for name in names:
self._validate_column_name(name)
if self.config_covar is None:
self.config_covar = OrderedDict({})
self.config_covar[name] = configure.Covar(
reg_lambda=regularization, normalize=normalize, as_scalar=only_last_value, n_lags=n_lags
)
return self
def add_future_regressor(self, name, regularization=None, normalize="auto", mode="additive"):
"""Add a regressor as lagged covariate with order 1 (scalar) or as known in advance (also scalar).
The dataframe passed to :meth:`fit` and :meth:`predict` will have a column with the specified name to be used as
a regressor. When normalize=True, the regressor will be normalized unless it is binary.
Note
----
Future Regressors have to be known for the entire forecast horizon, e.g. ``n_forecasts`` into the future.
Parameters
----------
name : string
name of the regressor.
regularization : float
optional scale for regularization strength
normalize : bool
optional, specify whether this regressor will be normalized prior to fitting.
Note
----
if ``auto``, binary regressors will not be normalized.
mode : str
``additive`` (default) or ``multiplicative``.
"""
if self.fitted:
raise Exception("Regressors must be added prior to model fitting.")
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
self._validate_column_name(name)
if self.regressors_config is None:
self.regressors_config = {}
self.regressors_config[name] = configure.Regressor(reg_lambda=regularization, normalize=normalize, mode=mode)
return self
def add_events(self, events, lower_window=0, upper_window=0, regularization=None, mode="additive"):
"""
Add user specified events and their corresponding lower, upper windows and the
regularization parameters into the NeuralProphet object
Parameters
----------
events : str, list
name or list of names of user specified events
lower_window : int
the lower window for the events in the list of events
upper_window : int
the upper window for the events in the list of events
regularization : float
optional scale for regularization strength
mode : str
``additive`` (default) or ``multiplicative``.
"""
if self.fitted:
raise Exception("Events must be added prior to model fitting.")
if self.events_config is None:
self.events_config = OrderedDict({})
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
if not isinstance(events, list):
events = [events]
for event_name in events:
self._validate_column_name(event_name)
self.events_config[event_name] = configure.Event(
lower_window=lower_window, upper_window=upper_window, reg_lambda=regularization, mode=mode
)
return self
def add_country_holidays(self, country_name, lower_window=0, upper_window=0, regularization=None, mode="additive"):
"""
Add a country into the NeuralProphet object to include country specific holidays
and create the corresponding configs such as lower, upper windows and the regularization
parameters
Parameters
----------
country_name : string
name of the country
lower_window : int
the lower window for all the country holidays
upper_window : int
the upper window for all the country holidays
regularization : float
optional scale for regularization strength
mode : str
``additive`` (default) or ``multiplicative``.
"""
if self.fitted:
raise Exception("Country must be specified prior to model fitting.")
if regularization is not None:
if regularization < 0:
raise ValueError("regularization must be >= 0")
if regularization == 0:
regularization = None
self.country_holidays_config = configure.Holidays(
country=country_name,
lower_window=lower_window,
upper_window=upper_window,
reg_lambda=regularization,
mode=mode,
)
self.country_holidays_config.init_holidays()
return self
def add_seasonality(self, name, period, fourier_order):
"""Add a seasonal component with specified period, number of Fourier components, and regularization.
Increasing the number of Fourier components allows the seasonality to change more quickly
(at risk of overfitting).
Note: regularization and mode (additive/multiplicative) are set in the main init.
Parameters
----------
name : string
name of the seasonality component.
period : float
number of days in one period.
fourier_order : int
number of Fourier components to use.
"""
if self.fitted:
raise Exception("Seasonality must be added prior to model fitting.")
if name in ["daily", "weekly", "yearly"]:
log.error("Please use inbuilt daily, weekly, or yearly seasonality or set another name.")
# Do not Allow overwriting built-in seasonalities
self._validate_column_name(name, seasons=True)
if fourier_order <= 0:
raise ValueError("Fourier Order must be > 0")
self.season_config.append(name=name, period=period, resolution=fourier_order, arg="custom")
return self
def fit(self, df, freq="auto", validation_df=None, progress="bar", minimal=False):
"""Train, and potentially evaluate model.
Parameters
----------
df : pd.DataFrame, dict
containing column ``ds``, ``y`` with all data
freq : str
Data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
validation_df : pd.DataFrame, dict
if provided, model with performance will be evaluated after each training epoch over this data.
epochs : int
number of epochs to train (overrides default setting).
default: if not specified, uses self.epochs
progress : str
Method of progress display
Options
* (default) ``bar`` display updating progress bar (tqdm)
* ``print`` print out progress (fallback option)
* ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.
* ``plot-all`` extended to all recorded metrics.
minimal : bool
whether to train without any printouts or metrics collection
Returns
-------
pd.DataFrame
metrics with training and potentially evaluation metrics
"""
df_dict, _ = df_utils.prep_copy_df_dict(df)
if self.fitted is True:
log.error("Model has already been fitted. Re-fitting may break or produce different results.")
self.max_lags = df_utils.get_max_num_lags(self.config_covar, self.n_lags)
if self.max_lags == 0 and self.n_forecasts > 1:
self.n_forecasts = 1
log.warning(
"Changing n_forecasts to 1. Without lags, the forecast can be "
"computed for any future time, independent of lagged values"
)
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)
self.data_freq = df_utils.infer_frequency(df_dict, n_lags=self.max_lags, freq=freq)
df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)
if validation_df is not None and (self.metrics is None or minimal):
log.warning("Ignoring validation_df because no metrics set or minimal training set.")
validation_df = None
if validation_df is None:
if minimal:
self._train_minimal(df_dict, progress_bar=progress == "bar")
metrics_df = None
else:
metrics_df = self._train(df_dict, progress=progress)
else:
df_val_dict, _ = df_utils.prep_copy_df_dict(validation_df)
df_val_dict = self._check_dataframe(df_val_dict, check_y=False, exogenous=False)
df_val_dict = self._handle_missing_data(df_val_dict, freq=self.data_freq)
metrics_df = self._train(df_dict, df_val_dict=df_val_dict, progress=progress)
self.fitted = True
return metrics_df
def predict(self, df, decompose=True, raw=False):
"""Runs the model to make predictions.
Expects all data needed to be present in dataframe.
If you are predicting into the unknown future and need to add future regressors or events,
please prepare data with make_future_dataframe.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with data
decompose : bool
whether to add individual components of forecast to the dataframe
raw : bool
specifies raw data
Options
* (default) ``False``: returns forecasts sorted by target (highlighting forecast age)
* ``True``: return the raw forecasts sorted by forecast start date
Returns
-------
pd.DataFrame
dependent on ``raw``
Note
----
``raw == True``: columns ``ds``, ``y``, and [``step<i>``] where step<i> refers to the i-step-ahead
prediction *made at* this row's datetime, e.g. step3 is the prediction for 3 steps into the future,
predicted using information up to (excluding) this datetime.
``raw == False``: columns ``ds``, ``y``, ``trend`` and [``yhat<i>``] where yhat<i> refers to
the i-step-ahead prediction for this row's datetime,
e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, "3 steps old".
"""
if raw:
log.warning("Raw forecasts are incompatible with plotting utilities")
if self.fitted is False:
raise ValueError("Model has not been fitted. Predictions will be random.")
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
# to get all forecasteable values with df given, maybe extend into future:
df_dict, periods_added = self._maybe_extend_df(df_dict)
df_dict = self._prepare_dataframe_to_predict(df_dict)
# normalize
df_dict = self._normalize(df_dict)
for key, df_i in df_dict.items():
dates, predicted, components = self._predict_raw(df_i, key, include_components=decompose)
if raw:
fcst = self._convert_raw_predictions_to_raw_df(dates, predicted, components)
if periods_added[key] > 0:
fcst = fcst[:-1]
else:
fcst = self._reshape_raw_predictions_to_forecst_df(df_i, predicted, components)
if periods_added[key] > 0:
fcst = fcst[: -periods_added[key]]
df_dict[key] = fcst
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def test(self, df):
"""Evaluate model on holdout data.
Parameters
----------
df : pd.DataFrame,dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with with holdout data
Returns
-------
pd.DataFrame
evaluation metrics
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
if self.fitted is False:
log.warning("Model has not been fitted. Test results will be random.")
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=True)
_ = df_utils.infer_frequency(df_dict, n_lags=self.max_lags, freq=self.data_freq)
df_dict = self._handle_missing_data(df_dict, freq=self.data_freq)
loader = self._init_val_loader(df_dict)
val_metrics_df = self._evaluate(loader)
if not self.config_normalization.global_normalization:
log.warning("Note that the metrics are displayed in normalized scale because of local normalization.")
return val_metrics_df
def split_df(self, df, freq="auto", valid_p=0.2, local_split=False):
"""Splits timeseries df into train and validation sets.
Prevents leakage of targets. Sharing/Overbleed of inputs can be configured.
Also performs basic data checks and fills in missing data.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
freq : str
data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
valid_p : float
fraction of data to use for holdout validation set, targets will still never be shared.
local_split : bool
Each dataframe will be split according to valid_p locally (in case of dict of dataframes
Returns
-------
tuple of two pd.DataFrames
training data
validation data
See Also
--------
crossvalidation_split_df : Splits timeseries data in k folds for crossvalidation.
double_crossvalidation_split_df : Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.
Examples
--------
>>> df1 = pd.DataFrame({'ds': pd.date_range(start='2022-12-01', periods=5,
... freq='D'), 'y': [9.59, 8.52, 8.18, 8.07, 7.89]})
>>> df2 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,
... freq='D'), 'y': [8.71, 8.09, 7.84, 7.65, 8.02]})
>>> df3 = pd.DataFrame({'ds': pd.date_range(start='2022-12-09', periods=5,
... freq='D'), 'y': [7.67, 7.64, 7.55, 8.25, 8.3]})
>>> df3
ds y
0 2022-12-09 7.67
1 2022-12-10 7.64
2 2022-12-11 7.55
3 2022-12-12 8.25
4 2022-12-13 8.30
One can define a dict with many time series.
>>> df_dict = {'data1': df1, 'data2': df2, 'data3': df3}
You can split a single dataframe.
>>> (df_train, df_val) = m.split_df(df3, valid_p=0.2)
>>> df_train
ds y
0 2022-12-09 7.67
1 2022-12-10 7.64
2 2022-12-11 7.55
3 2022-12-12 8.25
>>> df_val
ds y
0 2022-12-13 8.3
You can also use a dict of dataframes (especially useful for global modeling), which will account for the time range of the whole group of time series as default.
>>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2)
>>> df_dict_train
{'data1': ds y
0 2022-12-01 9.59
1 2022-12-02 8.52
2 2022-12-03 8.18
3 2022-12-04 8.07
4 2022-12-05 7.89,
'data2': ds y
0 2022-12-09 8.71
1 2022-12-10 8.09
2 2022-12-11 7.84,
'data3': ds y
0 2022-12-09 7.67
1 2022-12-10 7.64
2 2022-12-11 7.55}
>>> df_dict_val
{'data2': ds y
0 2022-12-12 7.65
1 2022-12-13 8.02,
'data3': ds y
0 2022-12-12 8.25
1 2022-12-13 8.30}
In some applications, splitting locally each time series may be helpful. In this case, one should set `local_split` to True.
>>> (df_dict_train, df_dict_val) = m.split_df(df_dict, valid_p=0.2,
... local_split=True)
>>> df_dict_train
{'data1': ds y
0 2022-12-01 9.59
1 2022-12-02 8.52
2 2022-12-03 8.18
3 2022-12-04 8.07,
'data2': ds y
0 2022-12-09 8.71
1 2022-12-10 8.09
2 2022-12-11 7.84
3 2022-12-12 7.65,
'data3': ds y
0 2022-12-09 7.67
1 2022-12-10 7.64
2 2022-12-11 7.55
3 2022-12-12 8.25}
>>> df_dict_val
{'data1': ds y
0 2022-12-05 7.89,
'data2': ds y
0 2022-12-13 8.02,
'data3': ds y
0 2022-12-13 8.3}
"""
df, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
df_train, df_val = df_utils.split_df(
df,
n_lags=self.max_lags,
n_forecasts=self.n_forecasts,
valid_p=valid_p,
inputs_overbleed=True,
local_split=local_split,
)
df_train = df_utils.maybe_get_single_df_from_df_dict(df_train, received_unnamed_df)
df_val = df_utils.maybe_get_single_df_from_df_dict(df_val, received_unnamed_df)
return df_train, df_val
def crossvalidation_split_df(
self, df, freq="auto", k=5, fold_pct=0.1, fold_overlap_pct=0.5, global_model_cv_type="None"
):
"""Splits timeseries data in k folds for crossvalidation.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
freq : str
data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
k : int
number of CV folds
fold_pct : float
percentage of overall samples to be in each fold
fold_overlap_pct : float
percentage of overlap between the validation folds.
global_model_cv_type : str
Type of crossvalidation to apply to the dict of time series.
options:
``global-time`` (default) crossvalidation is performed according to a time stamp threshold.
``local`` each episode will be crosvalidated locally (may cause time leakage among different episodes)
``intersect`` only the time intersection of all the episodes will be considered. A considerable amount of data may not be used. However, this approach guarantees an equal number of train/test samples for each episode.
Returns
-------
list of k tuples [(df_train, df_val), ...]
training data
validation data
"""
df, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
folds = df_utils.crossvalidation_split_df(
df,
n_lags=self.max_lags,
n_forecasts=self.n_forecasts,
k=k,
fold_pct=fold_pct,
fold_overlap_pct=fold_overlap_pct,
global_model_cv_type=global_model_cv_type,
)
return folds
def double_crossvalidation_split_df(self, df, freq="auto", k=5, valid_pct=0.10, test_pct=0.10):
"""Splits timeseries data in two sets of k folds for crossvalidation on training and testing data.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
freq : str
data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
k : int
number of CV folds
valid_pct : float
percentage of overall samples to be in validation
test_pct : float
percentage of overall samples to be in test
Returns
-------
tuple of k tuples [(folds_val, folds_test), …]
elements same as :meth:`crossvalidation_split_df` returns
"""
if isinstance(df, dict):
raise NotImplementedError("Double crossvalidation not implemented for multiple dataframes")
df = df.copy(deep=True)
df = self._check_dataframe(df, check_y=False, exogenous=False)
freq = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=freq)
df = self._handle_missing_data(df, freq=freq, predicting=False)
folds_val, folds_test = df_utils.double_crossvalidation_split_df(
df,
n_lags=self.max_lags,
n_forecasts=self.n_forecasts,
k=k,
valid_pct=valid_pct,
test_pct=test_pct,
)
return folds_val, folds_test
def create_df_with_events(self, df, events_df):
"""
Create a concatenated dataframe with the time series data along with the events data expanded.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
events_df : dict, pd.DataFrame
containing column ``ds`` and ``event``
Returns
-------
dict, pd.DataFrame
columns ``y``, ``ds`` and other user specified events
"""
if self.events_config is None:
raise Exception(
"The events configs should be added to the NeuralProphet object (add_events fn)"
"before creating the data with events features"
)
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=True, exogenous=False)
if isinstance(events_df, pd.DataFrame):
events_df_i = events_df.copy(deep=True)
for df_name, df_i in df_dict.items():
if isinstance(events_df, dict):
events_df_i = events_df[df_name].copy(deep=True)
for name in events_df_i["event"].unique():
assert name in self.events_config
df_out = df_utils.convert_events_to_features(
df_i,
events_config=self.events_config,
events_df=events_df_i,
)
df_dict[df_name] = df_out.reset_index(drop=True)
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def make_future_dataframe(self, df, events_df=None, regressors_df=None, periods=None, n_historic_predictions=False):
"""
Extends dataframe a number of periods (time steps) into the future.
Only use if you predict into the *unknown* future.
New timestamps are added to the historic dataframe, with the 'y' column being NaN, as it remains to be predicted.
Further, the given future events and regressors are added to the periods new timestamps.
The returned dataframe will include historic data needed to additionally produce `n_historic_predictions`,
for which there are historic observances of the series 'y'.
Parameters
----------
df: pd.DataFrame
History to date. DataFrame containing all columns up to present
events_df : pd.DataFrame
Future event occurences corresponding to `periods` steps into future.
Contains columns ``ds`` and ``event``. The event column contains the name of the event.
regressor_df : pd.DataFrame
Future regressor values corresponding to `periods` steps into future.
Contains column ``ds`` and one column for each of the external regressors.
periods : int
number of steps to extend the DataFrame into the future
n_historic_predictions : bool, int
Includes historic data needed to predict `n_historic_predictions` timesteps,
for which there are historic observances of the series 'y'.
False: drop historic data except for needed inputs to predict future.
True: include entire history.
Returns
-------
pd.DataFrame
input df with ``ds`` extended into future, ``y`` set to None,
with future events and regressors added.
Examples
--------
>>> from neuralprophet import NeuralProphet
>>> m = NeuralProphet()
>>> # set the model to expect these events
>>> m = m.add_events(["playoff", "superbowl"])
>>> # create the data df with events
>>> history_df = m.create_df_with_events(df, events_df)
>>> metrics = m.fit(history_df, freq="D")
>>> # forecast with events known ahead
>>> future = m.make_future_dataframe(
>>> history_df, events_df, periods=365, n_historic_predictions=180
>>> )
>>> # get 180 past and 365 future predictions.
>>> forecast = m.predict(df=future)
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict_events, received_unnamed_events_df = df_utils.prep_copy_df_dict(events_df)
df_dict_regressors, received_unnamed_regressors_df = df_utils.prep_copy_df_dict(regressors_df)
if received_unnamed_events_df:
df_dict_events = {key: df_dict_events["__df__"] for key in df_dict.keys()}
elif df_dict_events is None:
df_dict_events = {key: None for key in df_dict.keys()}
else:
df_utils.compare_dict_keys(df_dict, df_dict_events, "dataframes", "events")
if received_unnamed_regressors_df:
df_dict_regressors = {key: df_dict_regressors["__df__"] for key in df_dict.keys()}
elif df_dict_regressors is None:
df_dict_regressors = {key: None for key in df_dict.keys()}
else:
df_utils.compare_dict_keys(df_dict, df_dict_regressors, "dataframes", "regressors")
df_future_dataframe = {}
for key in df_dict.keys():
df_future_dataframe[key] = self._make_future_dataframe(
df=df_dict[key],
events_df=df_dict_events[key],
regressors_df=df_dict_regressors[key],
periods=periods,
n_historic_predictions=n_historic_predictions,
)
df_future = df_utils.maybe_get_single_df_from_df_dict(df_future_dataframe, received_unnamed_df)
return df_future
def predict_trend(self, df):
"""Predict only trend component of the model.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
Returns
-------
pd.DataFrame, dict
trend on prediction dates.
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)
df_dict = self._normalize(df_dict)
for df_name, df in df_dict.items():
t = torch.from_numpy(np.expand_dims(df["t"].values, 1))
trend = self.model.trend(t).squeeze().detach().numpy()
data_params = self.config_normalization.get_data_params(df_name)
trend = trend * data_params["y"].scale + data_params["y"].shift
df_dict[df_name] = pd.DataFrame({"ds": df["ds"], "trend": trend})
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def predict_seasonal_components(self, df):
"""Predict seasonality components
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing columns ``ds``, ``y`` with all data
Returns
-------
pd.DataFrame, dict
seasonal components with columns of name <seasonality component name>
"""
df_dict, received_unnamed_df = df_utils.prep_copy_df_dict(df)
df_dict = self._check_dataframe(df_dict, check_y=False, exogenous=False)
df_dict = self._normalize(df_dict)
for df_name, df in df_dict.items():
dataset = time_dataset.TimeDataset(
df,
name=df_name,
season_config=self.season_config,
# n_lags=0,
# n_forecasts=1,
predict_mode=True,
)
loader = DataLoader(dataset, batch_size=min(4096, len(df)), shuffle=False, drop_last=False)
predicted = {}
for name in self.season_config.periods:
predicted[name] = list()
for inputs, _, _ in loader:
for name in self.season_config.periods:
features = inputs["seasonalities"][name]
y_season = torch.squeeze(self.model.seasonality(features=features, name=name))
predicted[name].append(y_season.data.numpy())
for name in self.season_config.periods:
predicted[name] = np.concatenate(predicted[name])
if self.season_config.mode == "additive":
data_params = self.config_normalization.get_data_params(df_name)
predicted[name] = predicted[name] * data_params["y"].scale
df_dict[df_name] = pd.DataFrame({"ds": df["ds"], **predicted})
df = df_utils.maybe_get_single_df_from_df_dict(df_dict, received_unnamed_df)
return df
def set_true_ar_for_eval(self, true_ar_weights):
"""Configures model to evaluate closeness of AR weights to true weights.
Parameters
----------
true_ar_weights : np.array
true AR-parameters, if known.
"""
self.true_ar_weights = true_ar_weights
def highlight_nth_step_ahead_of_each_forecast(self, step_number=None):
"""Set which forecast step to focus on for metrics evaluation and plotting.
Parameters
----------
step_number : int
i-th step ahead forecast to use for statistics and plotting.
"""
if step_number is not None:
assert step_number <= self.n_forecasts
self.highlight_forecast_step_n = step_number
return self
def plot(self, fcst, ax=None, xlabel="ds", ylabel="y", figsize=(10, 6)):
"""Plot the NeuralProphet forecast, including history.
Parameters
----------
fcst : pd.DataFrame
output of self.predict.
ax : matplotlib axes
optional, matplotlib axes on which to plot.
xlabel : string
label name on X-axis
ylabel : string
label name on Y-axis
figsize : tuple
width, height in inches. default: (10, 6)
"""
if isinstance(fcst, dict):
log.error("Received more than one DataFrame. Use a for loop for many dataframes.")
if self.max_lags > 0:
num_forecasts = sum(fcst["yhat1"].notna())
if num_forecasts < self.n_forecasts:
log.warning(
"Too few forecasts to plot a line per forecast step." "Plotting a line per forecast origin instead."
)
return self.plot_last_forecast(
fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
include_previous_forecasts=num_forecasts - 1,
plot_history_data=True,
)
return plot(
fcst=fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
highlight_forecast=self.highlight_forecast_step_n,
)
def plot_last_forecast(
self,
fcst,
ax=None,
xlabel="ds",
ylabel="y",
figsize=(10, 6),
include_previous_forecasts=0,
plot_history_data=None,
):
"""Plot the NeuralProphet forecast, including history.
Parameters
----------
fcst : pd.DataFrame
output of self.predict.
ax : matplotlib axes
Optional, matplotlib axes on which to plot.
xlabel : str
label name on X-axis
ylabel : str
abel name on Y-axis
figsize : tuple
width, height in inches. default: (10, 6)
include_previous_forecasts : int
number of previous forecasts to include in plot
plot_history_data : bool
specifies plot of historical data
Returns
-------
matplotlib.axes.Axes
plot of NeuralProphet forecasting
"""
if self.max_lags == 0:
raise ValueError("Use the standard plot function for models without lags.")
if isinstance(fcst, dict):
log.error("Received more than one DataFrame. Use a for loop for many dataframes.")
if plot_history_data is None:
fcst = fcst[-(include_previous_forecasts + self.n_forecasts + self.max_lags) :]
elif plot_history_data is False:
fcst = fcst[-(include_previous_forecasts + self.n_forecasts) :]
elif plot_history_data is True:
fcst = fcst
fcst = utils.fcst_df_to_last_forecast(fcst, n_last=1 + include_previous_forecasts)
return plot(
fcst=fcst,
ax=ax,
xlabel=xlabel,
ylabel=ylabel,
figsize=figsize,
highlight_forecast=self.highlight_forecast_step_n,
line_per_origin=True,
)
def plot_components(self, fcst, figsize=None, residuals=False):
"""Plot the NeuralProphet forecast components.
Parameters
----------
fcst : pd.DataFrame
output of self.predict
figsize : tuple
width, height in inches.
Note
----
None (default): automatic (10, 3 * npanel)
Returns
-------
matplotlib.axes.Axes
plot of NeuralProphet components
"""
if isinstance(fcst, dict):
log.error("Receiced more than one DataFrame. Use a for loop for many dataframes.")
return plot_components(
m=self,
fcst=fcst,
figsize=figsize,
forecast_in_focus=self.highlight_forecast_step_n,
residuals=residuals,
)
def plot_parameters(self, weekly_start=0, yearly_start=0, figsize=None, df_name=None):
"""Plot the NeuralProphet forecast components.
Parameters
----------
weekly_start : int
specifying the start day of the weekly seasonality plot.
Note
----
0 (default) starts the week on Sunday. 1 shifts by 1 day to Monday, and so on.
yearly_start : int
specifying the start day of the yearly seasonality plot.
Note
----
0 (default) starts the year on Jan 1. 1 shifts by 1 day to Jan 2, and so on.
df_name : str
name of dataframe to refer to data params from original keys of train dataframes (used for local normalization in global modeling)
figsize : tuple
width, height in inches.
Note
----
None (default): automatic (10, 3 * npanel)
Returns
-------
matplotlib.axes.Axes
plot of NeuralProphet forecasting
"""
return plot_parameters(
m=self,
forecast_in_focus=self.highlight_forecast_step_n,
weekly_start=weekly_start,
yearly_start=yearly_start,
figsize=figsize,
df_name=df_name,
)
def _init_model(self):
"""Build Pytorch model with configured hyperparamters.
Returns
-------
TimeNet model
"""
self.model = time_net.TimeNet(
config_trend=self.config_trend,
config_season=self.season_config,
config_covar=self.config_covar,
config_regressors=self.regressors_config,
config_events=self.events_config,
config_holidays=self.country_holidays_config,
n_forecasts=self.n_forecasts,
n_lags=self.n_lags,
num_hidden_layers=self.config_model.num_hidden_layers,
d_hidden=self.config_model.d_hidden,
)
log.debug(self.model)
return self.model
def _create_dataset(self, df_dict, predict_mode):
"""Construct dataset from dataframe.
(Configured Hyperparameters can be overridden by explicitly supplying them.
Useful to predict a single model component.)
Parameters
----------
df_dict : dict
containing pd.DataFrames of original and normalized columns ``ds``, ``y``, ``t``, ``y_scaled``
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` and
normalized columns normalized columns ``ds``, ``y``, ``t``, ``y_scaled``
predict_mode : bool
specifies predict mode
Options
* ``False``: includes target values.
* ``True``: does not include targets but includes entire dataset as input
Returns
-------
TimeDataset
"""
return time_dataset.GlobalTimeDataset(
df_dict,
predict_mode=predict_mode,
n_lags=self.n_lags,
n_forecasts=self.n_forecasts,
season_config=self.season_config,
events_config=self.events_config,
country_holidays_config=self.country_holidays_config,
covar_config=self.config_covar,
regressors_config=self.regressors_config,
)
def __handle_missing_data(self, df, freq, predicting):
"""Checks, auto-imputes and normalizes new data
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
freq : str
data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
predicting : bool
when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely
Returns
-------
pd.DataFrame
preprocessed dataframe
"""
if self.max_lags == 0 and not predicting:
# we can drop rows with NA in y
sum_na = sum(df["y"].isna())
if sum_na > 0:
df = df[df["y"].notna()]
log.info("dropped {} NAN row in 'y'".format(sum_na))
# add missing dates for autoregression modelling
if self.max_lags > 0:
df, missing_dates = df_utils.add_missing_dates_nan(df, freq=freq)
if missing_dates > 0:
if self.impute_missing:
log.info("{} missing dates added.".format(missing_dates))
else:
raise ValueError(
"{} missing dates found. Please preprocess data manually or set impute_missing to True.".format(
missing_dates
)
)
if self.regressors_config is not None:
# if future regressors, check that they are not nan at end, else drop
# we ignore missing events, as those will be filled in with zeros.
reg_nan_at_end = 0
for col in self.regressors_config.keys():
col_nan_at_end = 0
while len(df) > col_nan_at_end and df[col].isnull().iloc[-(1 + col_nan_at_end)]:
col_nan_at_end += 1
reg_nan_at_end = max(reg_nan_at_end, col_nan_at_end)
if reg_nan_at_end > 0:
# drop rows at end due to missing future regressors
df = df[:-reg_nan_at_end]
log.info("Dropped {} rows at end due to missing future regressor values.".format(reg_nan_at_end))
df_end_to_append = None
nan_at_end = 0
while len(df) > nan_at_end and df["y"].isnull().iloc[-(1 + nan_at_end)]:
nan_at_end += 1
if nan_at_end > 0:
if predicting:
# allow nans at end - will re-add at end
if self.n_forecasts > 1 and self.n_forecasts < nan_at_end:
# check that not more than n_forecasts nans, else drop surplus
df = df[: -(nan_at_end - self.n_forecasts)]
# correct new length:
nan_at_end = self.n_forecasts
log.info(
"Detected y to have more NaN values than n_forecast can predict. "
"Dropped {} rows at end.".format(nan_at_end - self.n_forecasts)
)
df_end_to_append = df[-nan_at_end:]
df = df[:-nan_at_end]
else:
# training - drop nans at end
df = df[:-nan_at_end]
log.info(
"Dropped {} consecutive nans at end. "
"Training data can only be imputed up to last observation.".format(nan_at_end)
)
# impute missing values
data_columns = []
if self.max_lags > 0:
data_columns.append("y")
if self.config_covar is not None:
data_columns.extend(self.config_covar.keys())
if self.regressors_config is not None:
data_columns.extend(self.regressors_config.keys())
if self.events_config is not None:
data_columns.extend(self.events_config.keys())
for column in data_columns:
sum_na = sum(df[column].isnull())
if sum_na > 0:
if self.impute_missing:
# use 0 substitution for holidays and events missing values
if self.events_config is not None and column in self.events_config.keys():
df[column].fillna(0, inplace=True)
remaining_na = 0
else:
df.loc[:, column], remaining_na = df_utils.fill_linear_then_rolling_avg(
df[column],
limit_linear=self.impute_limit_linear,
rolling=self.impute_rolling,
)
log.info("{} NaN values in column {} were auto-imputed.".format(sum_na - remaining_na, column))
if remaining_na > 0:
raise ValueError(
"More than {} consecutive missing values encountered in column {}. "
"{} NA remain. Please preprocess data manually.".format(
2 * self.impute_limit_linear + self.impute_rolling, column, remaining_na
)
)
else: # fail because set to not impute missing
raise ValueError(
"Missing values found. " "Please preprocess data manually or set impute_missing to True."
)
if df_end_to_append is not None:
df = df.append(df_end_to_append)
return df
def _handle_missing_data(self, df, freq, predicting=False):
"""Checks, auto-imputes and normalizes new data
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
freq : str
data step sizes. Frequency of data recording,
Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
predicting (bool): when no lags, allow NA values in ``y`` of forecast series or ``y`` to miss completely
Returns
-------
pre-processed df
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
df_handled_missing_dict = {}
for key in df:
df_handled_missing_dict[key] = self.__handle_missing_data(df[key], freq, predicting)
if not df_is_dict:
df_handled_missing_dict = df_handled_missing_dict["__df__"]
return df_handled_missing_dict
def _check_dataframe(self, df, check_y=True, exogenous=True):
"""Performs basic data sanity checks and ordering
Prepare dataframe for fitting or predicting.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
check_y : bool
if df must have series values
Note
----
set to True if training or predicting with autoregression
exogenous : bool
whether to check covariates, regressors and events column names
Returns
-------
pd.DataFrame
checked dataframe
"""
df_is_dict = True
if isinstance(df, pd.DataFrame):
df_is_dict = False
df = {"__df__": df}
elif not isinstance(df, dict):
raise ValueError("Please insert valid df type (i.e. pd.DataFrame, dict)")
checked_df = {}
for key, df_i in df.items():
checked_df[key] = df_utils.check_single_dataframe(
df=df_i,
check_y=check_y,
covariates=self.config_covar if exogenous else None,
regressors=self.regressors_config if exogenous else None,
events=self.events_config if exogenous else None,
)
if not df_is_dict:
checked_df = checked_df["__df__"]
return checked_df
def _validate_column_name(self, name, events=True, seasons=True, regressors=True, covariates=True):
"""Validates the name of a seasonality, event, or regressor.
Parameters
----------
name : str
name of seasonality, event or regressor
events : bool
check if name already used for event
seasons : bool
check if name already used for seasonality
regressors : bool
check if name already used for regressor
"""
reserved_names = [
"trend",
"additive_terms",
"daily",
"weekly",
"yearly",
"events",
"holidays",
"zeros",
"extra_regressors_additive",
"yhat",
"extra_regressors_multiplicative",
"multiplicative_terms",
]
rn_l = [n + "_lower" for n in reserved_names]
rn_u = [n + "_upper" for n in reserved_names]
reserved_names.extend(rn_l)
reserved_names.extend(rn_u)
reserved_names.extend(["ds", "y", "cap", "floor", "y_scaled", "cap_scaled"])
if name in reserved_names:
raise ValueError("Name {name!r} is reserved.".format(name=name))
if events and self.events_config is not None:
if name in self.events_config.keys():
raise ValueError("Name {name!r} already used for an event.".format(name=name))
if events and self.country_holidays_config is not None:
if name in self.country_holidays_config.holiday_names:
raise ValueError(
"Name {name!r} is a holiday name in {country_holidays}.".format(
name=name, country_holidays=self.country_holidays_config.country
)
)
if seasons and self.season_config is not None:
if name in self.season_config.periods:
raise ValueError("Name {name!r} already used for a seasonality.".format(name=name))
if covariates and self.config_covar is not None:
if name in self.config_covar:
raise ValueError("Name {name!r} already used for an added covariate.".format(name=name))
if regressors and self.regressors_config is not None:
if name in self.regressors_config.keys():
raise ValueError("Name {name!r} already used for an added regressor.".format(name=name))
def _normalize(self, df_dict):
"""Apply data scales.
Applies data scaling factors to df using data_params.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
Returns
-------
df_dict: dict of pd.DataFrame, normalized
"""
for df_name, df_i in df_dict.items():
data_params = self.config_normalization.get_data_params(df_name)
df_dict[df_name] = df_utils.normalize(df_i, data_params)
return df_dict
def _init_train_loader(self, df_dict):
"""Executes data preparation steps and initiates training procedure.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
Returns
-------
torch DataLoader
"""
if not isinstance(df_dict, dict):
raise ValueError("df_dict must be a dict of pd.DataFrames.")
# if not self.fitted:
self.config_normalization.init_data_params(
df_dict=df_dict,
covariates_config=self.config_covar,
regressor_config=self.regressors_config,
events_config=self.events_config,
)
df_dict = self._normalize(df_dict)
# if not self.fitted:
if self.config_trend.changepoints is not None:
# scale user-specified changepoint times
self.config_trend.changepoints = self._normalize(
{"__df__": pd.DataFrame({"ds": pd.Series(self.config_trend.changepoints)})}
)["__df__"]["t"].values
df_merged, _ = df_utils.join_dataframes(df_dict)
df_merged = df_merged.sort_values("ds")
df_merged.drop_duplicates(inplace=True, keep="first", subset=["ds"])
self.season_config = utils.set_auto_seasonalities(df_merged, season_config=self.season_config)
if self.country_holidays_config is not None:
self.country_holidays_config.init_holidays(df_merged)
dataset = self._create_dataset(df_dict, predict_mode=False) # needs to be called after set_auto_seasonalities
self.config_train.set_auto_batch_epoch(n_data=len(dataset))
loader = DataLoader(dataset, batch_size=self.config_train.batch_size, shuffle=True)
# if not self.fitted:
self.model = self._init_model() # needs to be called after set_auto_seasonalities
if self.config_train.learning_rate is None:
self.config_train.learning_rate = self.config_train.find_learning_rate(self.model, dataset)
log.info("lr-range-test selected learning rate: {:.2E}".format(self.config_train.learning_rate))
self.optimizer = self.config_train.get_optimizer(self.model.parameters())
self.scheduler = self.config_train.get_scheduler(self.optimizer, steps_per_epoch=len(loader))
return loader
def _init_val_loader(self, df_dict):
"""Executes data preparation steps and initiates evaluation procedure.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
Returns
-------
torch DataLoader
"""
df_dict = self._normalize(df_dict)
dataset = self._create_dataset(df_dict, predict_mode=False)
loader = DataLoader(dataset, batch_size=min(1024, len(dataset)), shuffle=False, drop_last=False)
return loader
def _get_time_based_sample_weight(self, t):
weight = torch.ones_like(t)
if self.config_train.newer_samples_weight > 1.0:
end_w = self.config_train.newer_samples_weight
start_t = self.config_train.newer_samples_start
time = (t.detach() - start_t) / (1.0 - start_t)
time = torch.maximum(torch.zeros_like(time), time)
time = torch.minimum(torch.ones_like(time), time) # time = 0 to 1
time = np.pi * (time - 1.0) # time = -pi to 0
time = 0.5 * torch.cos(time) + 0.5 # time = 0 to 1
# scales end to be end weight times bigger than start weight
# with end weight being 1.0
weight = (1.0 + time * (end_w - 1.0)) / end_w
return weight
def _train_epoch(self, e, loader):
"""Make one complete iteration over all samples in dataloader and update model after each batch.
Parameters
----------
e : int
current epoch number
loader : torch DataLoader
Training Dataloader
"""
self.model.train()
for i, (inputs, targets, meta) in enumerate(loader):
# Run forward calculation
predicted = self.model.forward(inputs)
# Compute loss. no reduction.
loss = self.config_train.loss_func(predicted, targets)
# Weigh newer samples more.
loss = loss * self._get_time_based_sample_weight(t=inputs["time"])
loss = loss.mean()
# Regularize.
loss, reg_loss = self._add_batch_regualarizations(loss, e, i / float(len(loader)))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.scheduler.step()
if self.metrics is not None:
self.metrics.update(
predicted=predicted.detach(), target=targets.detach(), values={"Loss": loss, "RegLoss": reg_loss}
)
if self.metrics is not None:
return self.metrics.compute(save=True)
else:
return None
def _add_batch_regualarizations(self, loss, e, iter_progress):
"""Add regulatization terms to loss, if applicable
Parameters
----------
loss : torch.Tensor, scalar
current batch loss
e : int
current epoch number
iter_progress : float
this epoch's progress of iterating over dataset [0, 1]
Returns
-------
loss, reg_loss
"""
delay_weight = self.config_train.get_reg_delay_weight(e, iter_progress)
reg_loss = torch.zeros(1, dtype=torch.float, requires_grad=False)
if delay_weight > 0:
# Add regularization of AR weights - sparsify
if self.max_lags > 0 and self.config_ar.reg_lambda is not None:
reg_ar = self.config_ar.regularize(self.model.ar_weights)
reg_ar = torch.sum(reg_ar).squeeze() / self.n_forecasts
reg_loss += self.config_ar.reg_lambda * reg_ar
# Regularize trend to be smoother/sparse
l_trend = self.config_trend.trend_reg
if self.config_trend.n_changepoints > 0 and l_trend is not None and l_trend > 0:
reg_trend = utils.reg_func_trend(
weights=self.model.get_trend_deltas,
threshold=self.config_train.trend_reg_threshold,
)
reg_loss += l_trend * reg_trend
# Regularize seasonality: sparsify fourier term coefficients
l_season = self.config_train.reg_lambda_season
if self.model.season_dims is not None and l_season is not None and l_season > 0:
for name in self.model.season_params.keys():
reg_season = utils.reg_func_season(self.model.season_params[name])
reg_loss += l_season * reg_season
# Regularize events: sparsify events features coefficients
if self.events_config is not None or self.country_holidays_config is not None:
reg_events_loss = utils.reg_func_events(self.events_config, self.country_holidays_config, self.model)
reg_loss += reg_events_loss
# Regularize regressors: sparsify regressor features coefficients
if self.regressors_config is not None:
reg_regressor_loss = utils.reg_func_regressors(self.regressors_config, self.model)
reg_loss += reg_regressor_loss
reg_loss = delay_weight * reg_loss
loss = loss + reg_loss
return loss, reg_loss
def _evaluate_epoch(self, loader, val_metrics):
"""Evaluates model performance.
Parameters
----------
loader : torch DataLoader
instantiated Validation Dataloader (with TimeDataset)
val_metrics : MetricsCollection
alidation metrics to be computed.
Returns
-------
dict with evaluation metrics
"""
with torch.no_grad():
self.model.eval()
for inputs, targets, meta in loader:
predicted = self.model.forward(inputs)
val_metrics.update(predicted=predicted.detach(), target=targets.detach())
val_metrics = val_metrics.compute(save=True)
return val_metrics
def _train(self, df_dict, df_val_dict=None, progress="bar"):
"""Execute model training procedure for a configured number of epochs.
Parameters
----------
df_dict : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
df_val_dict : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with validation data
progress : str
Method of progress display.
Options
* (default) ``bar`` display updating progress bar (tqdm)
* ``print`` print out progress (fallback option)
* ``plot`` plot a live updating graph of the training loss, requires [live] install or livelossplot package installed.
* ``plot-all`` "plot" extended to all recorded metrics.
Returns
-------
pd.DataFrame
metrics
"""
# parse progress arg
progress_bar = False
progress_print = False
plot_live_loss = False
plot_live_all_metrics = False
if progress.lower() == "bar":
progress_bar = True
elif progress.lower() == "print":
progress_print = True
elif progress.lower() == "plot":
plot_live_loss = True
elif progress.lower() in ["plot-all", "plotall", "plot all"]:
plot_live_loss = True
plot_live_all_metrics = True
elif not progress.lower() == "none":
raise ValueError("received unexpected value for progress {}".format(progress))
if self.metrics is None:
log.info("No progress prints or plots possible because metrics are deactivated.")
if df_val_dict is not None:
log.warning("Ignoring supplied df_val as no metrics are specified.")
if plot_live_loss or plot_live_all_metrics:
log.warning("Can not plot live loss as no metrics are specified.")
progress_bar = True
if progress_print:
log.warning("Can not print progress as no metrics are specified.")
return self._train_minimal(df_dict, progress_bar=progress_bar)
# set up data loader
loader = self._init_train_loader(df_dict)
# set up Metrics
if self.highlight_forecast_step_n is not None:
self.metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)
if not self.config_normalization.global_normalization:
log.warning("When Global modeling with local normalization, metrics are displayed in normalized scale.")
else:
if not self.config_normalization.normalize == "off":
self.metrics.set_shift_scale(
(
self.config_normalization.global_data_params["y"].shift,
self.config_normalization.global_data_params["y"].scale,
)
)
validate = df_val_dict is not None
if validate:
val_loader = self._init_val_loader(df_val_dict)
val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])
# set up printing and plotting
if plot_live_loss:
try:
from livelossplot import PlotLosses
live_out = ["MatplotlibPlot"]
if not progress_bar:
live_out.append("ExtremaPrinter")
live_loss = PlotLosses(outputs=live_out)
plot_live_loss = True
except:
log.warning(
"To plot live loss, please install neuralprophet[live]."
"Using pip: 'pip install neuralprophet[live]'"
"Or install the missing package manually: 'pip install livelossplot'",
exc_info=True,
)
plot_live_loss = False
progress_bar = True
if progress_bar:
training_loop = tqdm(
range(self.config_train.epochs),
total=self.config_train.epochs,
leave=log.getEffectiveLevel() <= 20,
)
else:
training_loop = range(self.config_train.epochs)
start = time.time()
# run training loop
for e in training_loop:
metrics_live = OrderedDict({})
self.metrics.reset()
if validate:
val_metrics.reset()
# run epoch
epoch_metrics = self._train_epoch(e, loader)
# collect metrics
if validate:
val_epoch_metrics = self._evaluate_epoch(val_loader, val_metrics)
print_val_epoch_metrics = {k + "_val": v for k, v in val_epoch_metrics.items()}
else:
val_epoch_metrics = None
print_val_epoch_metrics = OrderedDict({})
# print metrics
if progress_bar:
training_loop.set_description(f"Epoch[{(e+1)}/{self.config_train.epochs}]")
training_loop.set_postfix(ordered_dict=epoch_metrics, **print_val_epoch_metrics)
elif progress_print:
metrics_string = utils.print_epoch_metrics(epoch_metrics, e=e, val_metrics=val_epoch_metrics)
if e == 0:
log.info(metrics_string.splitlines()[0])
log.info(metrics_string.splitlines()[1])
else:
log.info(metrics_string.splitlines()[1])
# plot metrics
if plot_live_loss:
metrics_train = list(epoch_metrics)
metrics_live["log-{}".format(metrics_train[0])] = np.log(epoch_metrics[metrics_train[0]])
if plot_live_all_metrics and len(metrics_train) > 1:
for i in range(1, len(metrics_train)):
metrics_live["{}".format(metrics_train[i])] = epoch_metrics[metrics_train[i]]
if validate:
metrics_val = list(val_epoch_metrics)
metrics_live["val_log-{}".format(metrics_val[0])] = np.log(val_epoch_metrics[metrics_val[0]])
if plot_live_all_metrics and len(metrics_val) > 1:
for i in range(1, len(metrics_val)):
metrics_live["val_{}".format(metrics_val[i])] = val_epoch_metrics[metrics_val[i]]
live_loss.update(metrics_live)
if e % (1 + self.config_train.epochs // 20) == 0 or e + 1 == self.config_train.epochs:
live_loss.send()
# return metrics as df
log.debug("Train Time: {:8.3f}".format(time.time() - start))
log.debug("Total Batches: {}".format(self.metrics.total_updates))
metrics_df = self.metrics.get_stored_as_df()
if validate:
metrics_df_val = val_metrics.get_stored_as_df()
for col in metrics_df_val.columns:
metrics_df["{}_val".format(col)] = metrics_df_val[col]
return metrics_df
def _train_minimal(self, df_dict, progress_bar=False):
"""Execute minimal model training procedure for a configured number of epochs.
Parameters
----------
df_dict : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
Returns
-------
None
"""
loader = self._init_train_loader(df_dict)
if progress_bar:
training_loop = tqdm(
range(self.config_train.epochs),
total=self.config_train.epochs,
leave=log.getEffectiveLevel() <= 20,
)
else:
training_loop = range(self.config_train.epochs)
for e in training_loop:
if progress_bar:
training_loop.set_description(f"Epoch[{(e+1)}/{self.config_train.epochs}]")
_ = self._train_epoch(e, loader)
def _eval_true_ar(self):
assert self.max_lags > 0
if self.highlight_forecast_step_n is None:
if self.max_lags > 1:
raise ValueError("Please define forecast_lag for sTPE computation")
forecast_pos = 1
else:
forecast_pos = self.highlight_forecast_step_n
weights = self.model.ar_weights.detach().numpy()
weights = weights[forecast_pos - 1, :][::-1]
sTPE = utils.symmetric_total_percentage_error(self.true_ar_weights, weights)
log.info("AR parameters: ", self.true_ar_weights, "\n", "Model weights: ", weights)
return sTPE
def _evaluate(self, loader):
"""Evaluates model performance.
Parameters
----------
loader : torch DataLoader
instantiated Validation Dataloader (with TimeDataset)
Returns
-------
pd.DataFrame
evaluation metrics
"""
val_metrics = metrics.MetricsCollection([m.new() for m in self.metrics.batch_metrics])
if self.highlight_forecast_step_n is not None:
val_metrics.add_specific_target(target_pos=self.highlight_forecast_step_n - 1)
## Run
val_metrics_dict = self._evaluate_epoch(loader, val_metrics)
if self.true_ar_weights is not None:
val_metrics_dict["sTPE"] = self._eval_true_ar()
log.info("Validation metrics: {}".format(utils.print_epoch_metrics(val_metrics_dict)))
val_metrics_df = val_metrics.get_stored_as_df()
return val_metrics_df
def _make_future_dataframe(self, df, events_df, regressors_df, periods, n_historic_predictions):
if periods == 0 and n_historic_predictions is True:
log.warning(
"Not extending df into future as no periods specified." "You can call predict directly instead."
)
df = df.copy(deep=True)
_ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)
last_date = pd.to_datetime(df["ds"].copy(deep=True).dropna()).sort_values().max()
if events_df is not None:
events_df = events_df.copy(deep=True).reset_index(drop=True)
if regressors_df is not None:
regressors_df = regressors_df.copy(deep=True).reset_index(drop=True)
if periods is None:
periods = 1 if self.max_lags == 0 else self.n_forecasts
else:
assert periods >= 0
if isinstance(n_historic_predictions, bool):
if n_historic_predictions:
n_historic_predictions = len(df) - self.max_lags
else:
n_historic_predictions = 0
elif not isinstance(n_historic_predictions, int):
log.error("non-integer value for n_historic_predictions set to zero.")
n_historic_predictions = 0
if periods == 0 and n_historic_predictions == 0:
raise ValueError("Set either history or future to contain more than zero values.")
# check for external regressors known in future
if self.regressors_config is not None and periods > 0:
if regressors_df is None:
raise ValueError("Future values of all user specified regressors not provided")
else:
for regressor in self.regressors_config.keys():
if regressor not in regressors_df.columns:
raise ValueError("Future values of user specified regressor {} not provided".format(regressor))
if len(df) < self.max_lags:
raise ValueError("Insufficient data for a prediction")
elif len(df) < self.max_lags + n_historic_predictions:
log.warning(
"Insufficient data for {} historic forecasts, reduced to {}.".format(
n_historic_predictions, len(df) - self.max_lags
)
)
n_historic_predictions = len(df) - self.max_lags
if (n_historic_predictions + self.max_lags) == 0:
df = pd.DataFrame(columns=df.columns)
else:
df = df[-(self.max_lags + n_historic_predictions) :]
if len(df) > 0:
if len(df.columns) == 1 and "ds" in df:
assert self.max_lags == 0
df = self._check_dataframe(df, check_y=False, exogenous=False)
else:
df = self._check_dataframe(df, check_y=self.max_lags > 0, exogenous=True)
# future data
# check for external events known in future
if self.events_config is not None and periods > 0 and events_df is None:
log.warning(
"Future values not supplied for user specified events. "
"All events being treated as not occurring in future"
)
if self.max_lags > 0:
if periods > 0 and periods != self.n_forecasts:
periods = self.n_forecasts
log.warning(
"Number of forecast steps is defined by n_forecasts. " "Adjusted to {}.".format(self.n_forecasts)
)
if periods > 0:
future_df = df_utils.make_future_df(
df_columns=df.columns,
last_date=last_date,
periods=periods,
freq=self.data_freq,
events_config=self.events_config,
events_df=events_df,
regressor_config=self.regressors_config,
regressors_df=regressors_df,
)
if len(df) > 0:
df = df.append(future_df)
else:
df = future_df
df.reset_index(drop=True, inplace=True)
return df
def _get_maybe_extend_periods(self, df):
periods_add = 0
nan_at_end = 0
while len(df) > nan_at_end and df["y"].isnull().iloc[-(1 + nan_at_end)]:
nan_at_end += 1
if self.max_lags > 0:
if self.regressors_config is None:
# if dataframe has already been extended into future,
# don't extend beyond n_forecasts.
periods_add = max(0, self.n_forecasts - nan_at_end)
else:
# can not extend as we lack future regressor values.
periods_add = 0
return periods_add
def _maybe_extend_df(self, df_dict):
periods_add = {}
for df_name, df in df_dict.items():
_ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)
# to get all forecasteable values with df given, maybe extend into future:
periods_add[df_name] = self._get_maybe_extend_periods(df)
if periods_add[df_name] > 0:
# This does not include future regressors or events.
# periods should be 0 if those are configured.
last_date = pd.to_datetime(df["ds"].copy(deep=True)).sort_values().max()
future_df = df_utils.make_future_df(
df_columns=df.columns,
last_date=last_date,
periods=periods_add[df_name],
freq=self.data_freq,
)
df = df.append(future_df)
df.reset_index(drop=True, inplace=True)
df_dict[df_name] = df
return df_dict, periods_add
def _prepare_dataframe_to_predict(self, df_dict):
for df_name, df in df_dict.items():
df = df.copy(deep=True)
_ = df_utils.infer_frequency(df, n_lags=self.max_lags, freq=self.data_freq)
# check if received pre-processed df
if "y_scaled" in df.columns or "t" in df.columns:
raise ValueError(
"DataFrame has already been normalized. " "Please provide raw dataframe or future dataframe."
)
# Checks
if len(df) == 0 or len(df) < self.max_lags:
raise ValueError("Insufficient data to make predictions.")
if len(df.columns) == 1 and "ds" in df:
if self.max_lags != 0:
raise ValueError("only datestamps provided but y values needed for auto-regression.")
df = self._check_dataframe(df, check_y=False, exogenous=False)
else:
df = self._check_dataframe(df, check_y=self.max_lags > 0, exogenous=False)
# fill in missing nans except for nans at end
df = self._handle_missing_data(df, freq=self.data_freq, predicting=True)
df.reset_index(drop=True, inplace=True)
df_dict[df_name] = df
return df_dict
def _predict_raw(self, df, df_name, include_components=False):
"""Runs the model to make predictions.
Predictions are returned in raw vector format without decomposition.
Predictions are given on a forecast origin basis, not on a target basis.
Parameters
----------
df : pd.DataFrame, dict
dataframe or dict of dataframes containing column ``ds``, ``y`` with all data
df_name : str
name of the data params from which the current dataframe refers to (only in case of local_normalization)
include_components : bool
whether to return individual components of forecast
Returns
-------
pd.Series
timestamps referring to the start of the predictions.
np.array
array containing the forecasts
dict[np.array]
Dictionary of components containing an array of each components contribution to the forecast
"""
if isinstance(df, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
if "y_scaled" not in df.columns or "t" not in df.columns:
raise ValueError("Received unprepared dataframe to predict. " "Please call predict_dataframe_to_predict.")
dataset = self._create_dataset(df_dict={df_name: df}, predict_mode=True)
loader = DataLoader(dataset, batch_size=min(1024, len(df)), shuffle=False, drop_last=False)
if self.n_forecasts > 1:
dates = df["ds"].iloc[self.max_lags : -self.n_forecasts + 1]
else:
dates = df["ds"].iloc[self.max_lags :]
predicted_vectors = list()
component_vectors = None
with torch.no_grad():
self.model.eval()
for inputs, _, _ in loader:
predicted = self.model.forward(inputs)
predicted_vectors.append(predicted.detach().numpy())
if include_components:
components = self.model.compute_components(inputs)
if component_vectors is None:
component_vectors = {name: [value.detach().numpy()] for name, value in components.items()}
else:
for name, value in components.items():
component_vectors[name].append(value.detach().numpy())
predicted = np.concatenate(predicted_vectors)
data_params = self.config_normalization.get_data_params(df_name)
scale_y, shift_y = data_params["y"].scale, data_params["y"].shift
predicted = predicted * scale_y + shift_y
if include_components:
components = {name: np.concatenate(value) for name, value in component_vectors.items()}
for name, value in components.items():
if "multiplicative" in name:
continue
elif "event_" in name:
event_name = name.split("_")[1]
if self.events_config is not None and event_name in self.events_config:
if self.events_config[event_name].mode == "multiplicative":
continue
elif (
self.country_holidays_config is not None
and event_name in self.country_holidays_config.holiday_names
):
if self.country_holidays_config.mode == "multiplicative":
continue
elif "season" in name and self.season_config.mode == "multiplicative":
continue
# scale additive components
components[name] = value * scale_y
if "trend" in name:
components[name] += shift_y
else:
components = None
return dates, predicted, components
def _convert_raw_predictions_to_raw_df(self, dates, predicted, components=None):
"""Turns forecast-origin-wise predictions into forecast-target-wise predictions.
Parameters
----------
dates : pd.Series
timestamps referring to the start of the predictions.
predicted : np.array
Array containing the forecasts
components : dict[np.array]
Dictionary of components containing an array of each components' contribution to the forecast
Returns
-------
pd. DataFrame
columns ``ds``, ``y``, and [``step<i>``]
Note
----
where step<i> refers to the i-step-ahead prediction *made at* this row's datetime.
e.g. the first forecast step0 is the prediction for this timestamp,
the step1 is for the timestamp after, ...
... step3 is the prediction for 3 steps into the future,
predicted using information up to (excluding) this datetime.
"""
if isinstance(dates, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
predicted_names = ["step{}".format(i) for i in range(self.n_forecasts)]
all_data = predicted
all_names = predicted_names
if components is not None:
for comp_name, comp_data in components.items():
all_data = np.concatenate((all_data, comp_data), 1)
all_names += ["{}{}".format(comp_name, i) for i in range(self.n_forecasts)]
df_raw = pd.DataFrame(data=all_data, columns=all_names)
df_raw.insert(0, "ds", dates.values)
return df_raw
def _reshape_raw_predictions_to_forecst_df(self, df, predicted, components):
"""Turns forecast-origin-wise predictions into forecast-target-wise predictions.
Parameters
----------
df : pd.DataFrame
input dataframe
predicted : np.array
Array containing the forecasts
components : dict[np.array]
Dictionary of components containing an array of each components' contribution to the forecast
Returns
-------
pd.DataFrame
columns ``ds``, ``y``, ``trend`` and [``yhat<i>``]
Note
----
where yhat<i> refers to the i-step-ahead prediction for this row's datetime.
e.g. yhat3 is the prediction for this datetime, predicted 3 steps ago, "3 steps old".
"""
if isinstance(df, dict):
raise ValueError("Receiced more than one DataFrame. Use a for loop for many dataframes.")
cols = ["ds", "y"] # cols to keep from df
df_forecast = pd.concat((df[cols],), axis=1)
# create a line for each forecast_lag
# 'yhat<i>' is the forecast for 'y' at 'ds' from i steps ago.
for forecast_lag in range(1, self.n_forecasts + 1):
forecast = predicted[:, forecast_lag - 1]
pad_before = self.max_lags + forecast_lag - 1
pad_after = self.n_forecasts - forecast_lag
yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))
df_forecast["yhat{}".format(forecast_lag)] = yhat
df_forecast["residual{}".format(forecast_lag)] = yhat - df_forecast["y"]
if components is None:
return df_forecast
# else add components
lagged_components = [
"ar",
]
if self.config_covar is not None:
for name in self.config_covar.keys():
lagged_components.append("lagged_regressor_{}".format(name))
for comp in lagged_components:
if comp in components:
for forecast_lag in range(1, self.n_forecasts + 1):
forecast = components[comp][:, forecast_lag - 1]
pad_before = self.max_lags + forecast_lag - 1
pad_after = self.n_forecasts - forecast_lag
yhat = np.concatenate(([None] * pad_before, forecast, [None] * pad_after))
df_forecast["{}{}".format(comp, forecast_lag)] = yhat
# only for non-lagged components
for comp in components:
if comp not in lagged_components:
forecast_0 = components[comp][0, :]
forecast_rest = components[comp][1:, self.n_forecasts - 1]
yhat = np.concatenate(([None] * self.max_lags, forecast_0, forecast_rest))
df_forecast[comp] = yhat
return df_forecast
| [
"torch.zeros",
"torch.cos",
"torch.no_grad",
"torch.utils.data.DataLoader",
"torch.ones_like",
"torch.zeros_like",
"torch.sum"
] | 1.4.0 | ssattari/neural_prophet | e121234d2f64d2b81f9c53f52b30d21a2cf1c6e0 |
1.4 | #!/usr/bin/env python
import logging
import os
import sys
from typing import Union
import fire
import pyarrow
from sentence_transformers.models import Pooling, Transformer
from smart_open import open
from tqdm import tqdm
from sentence_transformers import SentenceTransformer, losses
import torch
from torch.utils.data import DataLoader
from experiments import basic_logger_config
from experiments.environment import get_env
from experiments.sentence_transformers.dataset import DocumentPairSentencesDataset
from experiments.sentence_transformers.nearest_neighbors_evaluator import NearestNeighborsEvaluator
from experiments.utils import get_local_hf_dataset_path
from datasets import load_dataset, Dataset
from hf_datasets.paperswithcode_aspects import get_test_split, get_train_split
logging.basicConfig(**basic_logger_config)
logger = logging.getLogger(__name__)
env = get_env()
def train(
model_name_or_path: str,
hf_dataset: str,
aspect: str,
fold: Union[int, str],
output_path: str,
train_epochs: int = 3,
train_batch_size: int = 25,
eval_batch_size: int = 32,
evaluation_steps: int = 5000,
train_on_test: bool = False,
loss: str = 'multiple_negatives_ranking',
override: bool = False):
"""
# $MODEL_NAME $HF_DATASET $ASPECT $FOLD $OUTPUT_DIR --train_epochs=3 --train_batch_size=$TRAIN_BATCH_SIZE --eval_batch_size=$EVAL_BATCH_SIZE
Run with:
$ export CUDA_VISIBLE_DEVICES=1
$ ./sentence_transformer_cli.py train scibert-scivocab-uncased paperswithcode_task_docs 1 ./output/st_scibert/1 --train_epochs=3 --train_batch_size=25 --eval_batch_size=32
:param loss: Training loss function (choices: multiple_negatives_ranking, cosine)
:param train_on_test: If True, joint training on train and test set (validation disabled)
:param aspect:
:param evaluation_steps:
:param train_epochs:
:param model_name_or_path:
:param hf_dataset:
:param fold:
:param output_path:
:param train_batch_size:
:param eval_batch_size:
:param override:
:return:
"""
top_ks = [5,10,25,50]
# cuda_device = -1
# hf_dataset = 'paperswithcode_task_docs'
# model_name_or_path = 'scibert-scivocab-uncased'
# fold = 1
max_token_length = 336 # ssee pwc_token_stats.ipynb
nlp_cache_dir = './data/nlp_cache'
# train_batch_size = 25
# eval_batch_size = 32
# override = False
# output_path = './output/pwc_task_st/1/sci-bert'
# output_path = os.path.join(output_path, str(fold), model_name_or_path) # output/1/sci-bert
if os.path.exists(output_path) and not override:
logger.error(f'Stop. Output path exists already: {output_path}')
sys.exit(1)
# if cuda_device >= 0:
# os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_device)
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model path from env
if not os.path.exists(model_name_or_path) and os.path.exists(
os.path.join(env['bert_dir'], model_name_or_path)):
model_name_or_path = os.path.join(env['bert_dir'], model_name_or_path)
word_embedding_model = Transformer(model_name_or_path, max_seq_length=max_token_length)
pooling_model = Pooling(word_embedding_model.get_word_embedding_dimension())
model = SentenceTransformer(modules=[word_embedding_model, pooling_model])
# tokenizer = BertTokenizer.from_pretrained(model_name_or_path)
# dataset
docs_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),
name='docs',
cache_dir=nlp_cache_dir,
split='docs')
train_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),
name='relations',
cache_dir=nlp_cache_dir,
split=get_train_split(aspect, fold))
test_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),
name='relations',
cache_dir=nlp_cache_dir,
split=get_test_split(aspect, fold))
# filter for positive labels only
train_ds = train_ds.filter(lambda row: row['label'] == 'y')
logger.info(f'After filtering: {len(train_ds):,}')
# joint training on train and test?
if train_on_test:
#
# import pyarrow
# from datasets.arrow_dataset import Dataset
#
# full_ds_table = pyarrow.concat_tables([train_ds.data, test_ds.data])
# full_ds = Dataset(arrow_table=full_ds_table)
raise NotImplementedError('TODO Evaluator')
else:
# standard training on test only
train_sds = DocumentPairSentencesDataset(docs_ds, train_ds, model, max_length=max_token_length, forced_length=0)
train_sds.tokenize_all_docs()
evaluator = NearestNeighborsEvaluator(model, docs_ds, test_ds, top_ks=top_ks, batch_size=eval_batch_size, show_progress_bar=True)
if loss == 'cosine':
train_loss = losses.CosineSimilarityLoss(model)
elif loss == 'multiple_negatives_ranking':
# A nice advantage of MultipleNegativesRankingLoss is that it only requires positive pairs
# https://github.com/UKPLab/sentence-transformers/tree/master/examples/training/quora_duplicate_questions
train_loss = losses.MultipleNegativesRankingLoss(model)
else:
raise ValueError(f'Unsupported loss function: {loss}')
train_dl = DataLoader(train_sds, shuffle=True, batch_size=train_batch_size)
# Training
model.fit(
train_objectives=[(train_dl, train_loss)],
epochs=train_epochs, # try 1-4
warmup_steps=100,
evaluator=evaluator,
evaluation_steps=evaluation_steps, # increase to 5000 (full dataset => 20k steps)
output_path=output_path,
output_path_ignore_not_empty=True
)
logger.info('Training done')
def build_vectors(
st_output_path: str,
hf_dataset: str,
aspect: str,
fold: Union[int, str],
include_all_docs: bool = False,
override: bool = False
):
"""
:param override:
:param include_all_docs: Generate also vectors for samples from training data
:param st_output_path: Path to Sentence Transformer model
:param hf_dataset: Huggingface dataset path or name
:param aspect:
:param fold:
:return:
"""
max_token_length = 336 # ssee pwc_token_stats.ipynb
nlp_cache_dir = './data/nlp_cache'
out_fn = 'pwc_id2vec__all_docs.w2v.txt' if include_all_docs else 'pwc_id2vec.w2v.txt'
out_fp = os.path.join(st_output_path, out_fn)
if not os.path.exists(st_output_path):
logger.error(f'Sentence Transformer directory does not exist: {st_output_path}')
return
if os.path.exists(out_fp) and not override:
logger.error(f'Output path exists already and override is disabled: {out_fp}')
return
# Inference for best model
best_model = SentenceTransformer(st_output_path)
best_model.get_sentence_embedding_dimension()
test_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),
name='relations',
cache_dir=nlp_cache_dir,
split=get_test_split(aspect, fold))
docs_ds = load_dataset(get_local_hf_dataset_path(hf_dataset),
name='docs',
cache_dir=nlp_cache_dir,
split='docs')
test_sds = DocumentPairSentencesDataset(docs_ds, test_ds, best_model)
if include_all_docs:
# use all document ids
input_paper_ids = set(docs_ds['paper_id'])
logger.info(f'All documents in corpus: {len(input_paper_ids):,}')
else:
# generate vectors from unique test documents only
input_paper_ids = set(test_ds['from_paper_id']).union(set(test_ds['to_paper_id']))
with open(out_fp, 'w') as f:
# header
f.write(f'{len(input_paper_ids)} {best_model.get_sentence_embedding_dimension()}\n')
# body
for paper_id in tqdm(input_paper_ids, desc='Inference'):
vec = [str(v) for v in best_model.encode(test_sds.get_text_from_doc(paper_id), show_progress_bar=False)]
assert len(vec) == best_model.get_sentence_embedding_dimension()
vec_str = ' '.join(vec)
line = f'{paper_id} {vec_str}\n'
f.write(line)
# break
logger.info(f'Encoded {len(input_paper_ids):,} into {out_fp}')
if __name__ == '__main__':
fire.Fire()
sys.exit(0)
| [
"torch.utils.data.DataLoader"
] | 1.4.0 | malteos/aspect-document-embeddings | 0836ea54a9192dbc2b01bb212c7521668bb398af |
0.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class RCNN(nn.Module):
def __init__(self, vocab_size, embed_dim, output_dim, hidden_dim, num_layers, dropout, weight):
super(RCNN, self).__init__()
self.embedding = nn.Embedding(vocab_size, embed_dim)
self.embedding.weight.data.copy_(torch.from_numpy(weight))
self.lstm = nn.LSTM(embed_dim, hidden_dim, num_layers=num_layers, bidirectional=True, dropout=dropout)
self.linear = nn.Linear(2 * hidden_dim + embed_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text):
# input = input.permute(1, 0, 2)
embeds = self.embedding(text)
embeds = embeds.permute(1, 0, 2)
# embeds = self.dropout(embeds)
# self.lstm.flatten_parameters()
output, (hidden, _) = self.lstm(embeds)
output = torch.cat((output, embeds), 2)
output = output.permute(1, 0, 2)
output = self.linear(output).permute(0, 2, 1)
pool = F.max_pool1d(output, output.size(2)).squeeze(2)
# hidden = self.dropout(hidden)
# pool = self.dropout(pool)
# output = self.fc(hidden.squeeze(0))
output = self.fc(pool)
return output
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.LSTM",
"torch.from_numpy",
"torch.nn.Embedding"
] | 0.4.0 | czhongyu/information-extraction | 6cf9905bed5ee9c33706854cd6ceae04194aa5e4 |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, List, Type, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from monai.networks.layers.factories import Conv, Norm, Pool
__all__ = ["ResNet", "resnet10", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet200"]
def get_inplanes():
return [64, 128, 256, 512]
def get_avgpool():
return [(0), (1), (1, 1), (1, 1, 1)]
def get_conv1(conv1_t_size: int, conv1_t_stride: int):
return (
[(0), (conv1_t_size), (conv1_t_size, 7), (conv1_t_size, 7, 7)],
[(0), (conv1_t_stride), (conv1_t_stride, 2), (conv1_t_stride, 2, 2)],
[(0), (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)],
)
class ResNetBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels.
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for first conv layer.
downsample: which downsample layer to use.
"""
super(ResNetBlock, self).__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = norm_type(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels (taking expansion into account).
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for second conv layer.
downsample: which downsample layer to use.
"""
super(ResNetBottleneck, self).__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = norm_type(planes)
self.conv2 = conv_type(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.conv3 = conv_type(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm_type(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
ResNet based on: `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`_
and `Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet? <https://arxiv.org/pdf/1711.09577.pdf>`_.
Adapted from `<https://github.com/kenshohara/3D-ResNets-PyTorch/tree/master/models>`_.
Args:
block: which ResNet block to use, either Basic or Bottleneck.
layers: how many layers to use.
block_inplanes: determine the size of planes at each step. Also tuneable with widen_factor.
spatial_dims: number of spatial dimensions of the input image.
n_input_channels: number of input channels for first convolutional layer.
conv1_t_size: size of first convolution layer, determines kernel and padding.
conv1_t_stride: stride of first convolution layer.
no_max_pool: bool argument to determine if to use maxpool layer.
shortcut_type: which downsample block to use.
widen_factor: widen output for each layer.
n_classes: number of output (classifications)
"""
def __init__(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
spatial_dims: int = 3,
n_input_channels: int = 3,
conv1_t_size: int = 7,
conv1_t_stride: int = 1,
no_max_pool: bool = False,
shortcut_type: str = "B",
widen_factor: float = 1.0,
n_classes: int = 400,
feed_forward: bool = True,
) -> None:
super(ResNet, self).__init__()
conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
avgp_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[
Pool.ADAPTIVEAVG, spatial_dims
]
block_avgpool = get_avgpool()
conv1_kernel, conv1_stride, con1_padding = get_conv1(conv1_t_size, conv1_t_stride)
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = conv_type(
n_input_channels,
self.in_planes,
kernel_size=conv1_kernel[spatial_dims],
stride=conv1_stride[spatial_dims],
padding=con1_padding[spatial_dims],
bias=False,
)
self.bn1 = norm_type(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = pool_type(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], spatial_dims, shortcut_type)
self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=2)
self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=2)
self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=2)
self.avgpool = avgp_type(block_avgpool[spatial_dims])
if feed_forward:
self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes)
for m in self.modules():
if isinstance(m, conv_type):
nn.init.kaiming_normal_(torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu")
elif isinstance(m, norm_type):
nn.init.constant_(torch.as_tensor(m.weight), 1)
nn.init.constant_(torch.as_tensor(m.bias), 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(torch.as_tensor(m.bias), 0)
def _downsample_basic_block(self, x: torch.Tensor, planes: int, stride: int, spatial_dims: int = 3) -> torch.Tensor:
assert spatial_dims == 3
out: torch.Tensor = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2), out.size(3), out.size(4))
if isinstance(out.data, torch.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
planes: int,
blocks: int,
spatial_dims: int,
shortcut_type: str,
stride: int = 1,
) -> nn.Sequential:
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
downsample: Union[nn.Module, partial, None] = None
if stride != 1 or self.in_planes != planes * block.expansion:
if shortcut_type == "A":
downsample = partial(
self._downsample_basic_block, planes=planes * block.expansion, kernel_size=1, stride=stride
)
else:
downsample = nn.Sequential(
conv_type(self.in_planes, planes * block.expansion, kernel_size=1, stride=stride),
norm_type(planes * block.expansion),
)
layers = []
layers.append(
block(
in_planes=self.in_planes, planes=planes, spatial_dims=spatial_dims, stride=stride, downsample=downsample
)
)
self.in_planes = planes * block.expansion
for _i in range(1, blocks):
layers.append(block(self.in_planes, planes, spatial_dims=spatial_dims))
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def _resnet(
arch: str,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, block_inplanes, **kwargs)
if pretrained:
# Author of paper zipped the state_dict on googledrive,
# so would need to download, unzip and read (2.8gb file for a ~150mb state dict).
# Would like to load dict from url but need somewhere to save the state dicts.
raise NotImplementedError("Currently not implemented, see comments in source code")
return model
def resnet10(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-10 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet10", ResNetBlock, [1, 1, 1, 1], get_inplanes(), pretrained, progress, **kwargs)
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-18 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", ResNetBlock, [2, 2, 2, 2], get_inplanes(), pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-34 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", ResNetBlock, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", ResNetBottleneck, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-101 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", ResNetBottleneck, [3, 4, 23, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-152 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", ResNetBottleneck, [3, 8, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-200 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
| [
"torch.nn.Linear",
"torch.nn.functional.avg_pool3d",
"torch.cat",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.as_tensor"
] | 1.5 | albarqounilab/MONAI | d4d173362b71a9af6c5414db591994f799e4fd2c |
1.5 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.data import decollate_batch
from monai.metrics import ROCAUCMetric, compute_roc_auc
from monai.transforms import Activations, AsDiscrete, Compose, ToTensor
TEST_CASE_1 = [
torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]),
torch.tensor([[0], [1], [0], [1]]),
True,
True,
"macro",
0.75,
]
TEST_CASE_2 = [
torch.tensor([[0.5], [0.5], [0.2], [8.3]]),
torch.tensor([[0], [1], [0], [1]]),
False,
False,
"macro",
0.875,
]
TEST_CASE_3 = [
torch.tensor([[0.5], [0.5], [0.2], [8.3]]),
torch.tensor([0, 1, 0, 1]),
False,
False,
"macro",
0.875,
]
TEST_CASE_4 = [
torch.tensor([0.5, 0.5, 0.2, 8.3]),
torch.tensor([0, 1, 0, 1]),
False,
False,
"macro",
0.875,
]
TEST_CASE_5 = [
torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5]]),
torch.tensor([[0], [1], [0], [1]]),
True,
True,
"none",
[0.75, 0.75],
]
TEST_CASE_6 = [
torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]),
torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]),
True,
False,
"weighted",
0.56667,
]
TEST_CASE_7 = [
torch.tensor([[0.1, 0.9], [0.3, 1.4], [0.2, 0.1], [0.1, 0.5], [0.1, 0.5]]),
torch.tensor([[1, 0], [0, 1], [0, 0], [1, 1], [0, 1]]),
True,
False,
"micro",
0.62,
]
class TestComputeROCAUC(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])
def test_value(self, y_pred, y, softmax, to_onehot, average, expected_value):
y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)])
y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)])
y_pred = torch.stack([y_pred_trans(i) for i in decollate_batch(y_pred)], dim=0)
y = torch.stack([y_trans(i) for i in decollate_batch(y)], dim=0)
result = compute_roc_auc(y_pred=y_pred, y=y, average=average)
np.testing.assert_allclose(expected_value, result, rtol=1e-5)
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4, TEST_CASE_5, TEST_CASE_6, TEST_CASE_7])
def test_class_value(self, y_pred, y, softmax, to_onehot, average, expected_value):
y_pred_trans = Compose([ToTensor(), Activations(softmax=softmax)])
y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=to_onehot, n_classes=2)])
y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)]
y = [y_trans(i) for i in decollate_batch(y)]
metric = ROCAUCMetric(average=average)
metric(y_pred=y_pred, y=y)
result = metric.aggregate()
metric.reset()
np.testing.assert_allclose(expected_value, result, rtol=1e-5)
if __name__ == "__main__":
unittest.main()
| [
"torch.tensor"
] | 1.5 | albarqounilab/MONAI | bb0b307d68021a243011a58fd82a1d275f00a51a |
1.8 | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss for PPO algorithm."""
import torch
import alf
from alf.algorithms.actor_critic_loss import ActorCriticLoss
from alf.utils.losses import element_wise_squared_loss
from alf.utils import value_ops
@alf.configurable
class PPOLoss(ActorCriticLoss):
"""PPO loss."""
def __init__(self,
gamma=0.99,
td_error_loss_fn=element_wise_squared_loss,
td_lambda=0.95,
normalize_advantages=True,
advantage_clip=None,
entropy_regularization=None,
td_loss_weight=1.0,
importance_ratio_clipping=0.2,
log_prob_clipping=0.0,
check_numerics=False,
debug_summaries=False,
name='PPOLoss'):
"""
Implement the simplified surrogate loss in equation (9) of `Proximal
Policy Optimization Algorithms <https://arxiv.org/abs/1707.06347>`_.
The total loss equals to
.. code-block:: python
(policy_gradient_loss # (L^{CLIP} in equation (9))
+ td_loss_weight * td_loss # (L^{VF} in equation (9))
- entropy_regularization * entropy)
This loss works with ``PPOAlgorithm``. The advantages and returns are
pre-computed by ``PPOAlgorithm.preprocess()``. One known difference with
`baselines.ppo2` is that value estimation is not clipped here, while
`baselines.ppo2` also clipped value if it deviates from returns too
much.
Args:
gamma (float|list[float]): A discount factor for future rewards. For
multi-dim reward, this can also be a list of discounts, each
discount applies to a reward dim.
td_errors_loss_fn (Callable): A function for computing the TD errors
loss. This function takes as input the target and the estimated
Q values and returns the loss for each element of the batch.
td_lambda (float): Lambda parameter for TD-lambda computation.
normalize_advantages (bool): If True, normalize advantage to zero
mean and unit variance within batch for caculating policy
gradient.
advantage_clip (float): If set, clip advantages to :math:`[-x, x]`
entropy_regularization (float): Coefficient for entropy
regularization loss term.
td_loss_weight (float): the weigt for the loss of td error.
importance_ratio_clipping (float): Epsilon in clipped, surrogate
PPO objective. See the cited paper for more detail.
log_prob_clipping (float): If >0, clipping log probs to the range
``(-log_prob_clipping, log_prob_clipping)`` to prevent ``inf/NaN``
values.
check_numerics (bool): If true, checking for ``NaN/Inf`` values. For
debugging only.
name (str):
"""
super(PPOLoss, self).__init__(
gamma=gamma,
td_error_loss_fn=td_error_loss_fn,
use_gae=True,
td_lambda=td_lambda,
use_td_lambda_return=True,
normalize_advantages=normalize_advantages,
advantage_clip=advantage_clip,
entropy_regularization=entropy_regularization,
td_loss_weight=td_loss_weight,
debug_summaries=debug_summaries,
name=name)
self._importance_ratio_clipping = importance_ratio_clipping
self._log_prob_clipping = log_prob_clipping
self._check_numerics = check_numerics
def _pg_loss(self, info, advantages):
scope = alf.summary.scope(self._name)
importance_ratio, importance_ratio_clipped = value_ops.action_importance_ratio(
action_distribution=info.action_distribution,
collect_action_distribution=info.rollout_action_distribution,
action=info.action,
clipping_mode='double_sided',
scope=scope,
importance_ratio_clipping=self._importance_ratio_clipping,
log_prob_clipping=self._log_prob_clipping,
check_numerics=self._check_numerics,
debug_summaries=self._debug_summaries)
# Pessimistically choose the maximum objective value for clipped and
# unclipped importance ratios.
pg_objective = -importance_ratio * advantages
pg_objective_clipped = -importance_ratio_clipped * advantages
policy_gradient_loss = torch.max(pg_objective, pg_objective_clipped)
if self._debug_summaries and alf.summary.should_record_summaries():
with scope:
alf.summary.histogram('pg_objective', pg_objective)
alf.summary.histogram('pg_objective_clipped',
pg_objective_clipped)
if self._check_numerics:
assert torch.all(torch.isfinite(policy_gradient_loss))
return policy_gradient_loss
def _calc_returns_and_advantages(self, info, value):
return info.returns, info.advantages
| [
"torch.isfinite",
"torch.max"
] | 1.8.1 | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 |
1.8 | # Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import torch
import numpy as np
import alf
from alf.data_structures import LossInfo
from alf.utils.losses import element_wise_squared_loss
from alf.utils.summary_utils import safe_mean_hist_summary
from alf.utils import tensor_utils, dist_utils, value_ops
from .algorithm import Loss
ActorCriticLossInfo = namedtuple("ActorCriticLossInfo",
["pg_loss", "td_loss", "neg_entropy"])
def _normalize_advantages(advantages, variance_epsilon=1e-8):
# advantages is of shape [T, B] or [T, B, N], where N is reward dim
# this function normalizes over all elements in the input advantages
shape = advantages.shape
# shape: [TB, 1] or [TB, N]
advantages = advantages.reshape(np.prod(advantages.shape[:2]), -1)
adv_mean = advantages.mean(0)
adv_var = torch.var(advantages, dim=0, unbiased=False)
normalized_advantages = (
(advantages - adv_mean) / (torch.sqrt(adv_var) + variance_epsilon))
return normalized_advantages.reshape(*shape)
@alf.configurable
class ActorCriticLoss(Loss):
def __init__(self,
gamma=0.99,
td_error_loss_fn=element_wise_squared_loss,
use_gae=False,
td_lambda=0.95,
use_td_lambda_return=True,
normalize_advantages=False,
advantage_clip=None,
entropy_regularization=None,
td_loss_weight=1.0,
debug_summaries=False,
name="ActorCriticLoss"):
"""An actor-critic loss equals to
.. code-block:: python
(policy_gradient_loss
+ td_loss_weight * td_loss
- entropy_regularization * entropy)
Args:
gamma (float|list[float]): A discount factor for future rewards. For
multi-dim reward, this can also be a list of discounts, each
discount applies to a reward dim.
td_errors_loss_fn (Callable): A function for computing the TD errors
loss. This function takes as input the target and the estimated
Q values and returns the loss for each element of the batch.
use_gae (bool): If True, uses generalized advantage estimation for
computing per-timestep advantage. Else, just subtracts value
predictions from empirical return.
use_td_lambda_return (bool): Only effective if use_gae is True.
If True, uses ``td_lambda_return`` for training value function.
``(td_lambda_return = gae_advantage + value_predictions)``.
td_lambda (float): Lambda parameter for TD-lambda computation.
normalize_advantages (bool): If True, normalize advantage to zero
mean and unit variance within batch for caculating policy
gradient. This is commonly used for PPO.
advantage_clip (float): If set, clip advantages to :math:`[-x, x]`
entropy_regularization (float): Coefficient for entropy
regularization loss term.
td_loss_weight (float): the weigt for the loss of td error.
"""
super().__init__(name=name)
self._td_loss_weight = td_loss_weight
self._name = name
self._gamma = torch.tensor(gamma)
self._td_error_loss_fn = td_error_loss_fn
self._use_gae = use_gae
self._lambda = td_lambda
self._use_td_lambda_return = use_td_lambda_return
self._normalize_advantages = normalize_advantages
assert advantage_clip is None or advantage_clip > 0, (
"Clipping value should be positive!")
self._advantage_clip = advantage_clip
self._entropy_regularization = entropy_regularization
self._debug_summaries = debug_summaries
@property
def gamma(self):
return self._gamma.clone()
def forward(self, info):
"""Cacluate actor critic loss. The first dimension of all the tensors is
time dimension and the second dimesion is the batch dimension.
Args:
info (namedtuple): information for calculating loss. All tensors are
time-major. It should contain the following fields:
- reward:
- step_type:
- discount:
- action:
- action_distribution:
- value:
Returns:
LossInfo: with ``extra`` being ``ActorCriticLossInfo``.
"""
value = info.value
returns, advantages = self._calc_returns_and_advantages(info, value)
if self._debug_summaries and alf.summary.should_record_summaries():
with alf.summary.scope(self._name):
def _summarize(v, r, adv, suffix):
alf.summary.scalar("values" + suffix, v.mean())
alf.summary.scalar("returns" + suffix, r.mean())
safe_mean_hist_summary('advantages' + suffix, adv)
alf.summary.scalar(
"explained_variance_of_return_by_value" + suffix,
tensor_utils.explained_variance(v, r))
if value.ndim == 2:
_summarize(value, returns, advantages, '')
else:
for i in range(value.shape[2]):
suffix = '/' + str(i)
_summarize(value[..., i], returns[..., i],
advantages[..., i], suffix)
if self._normalize_advantages:
advantages = _normalize_advantages(advantages)
if self._advantage_clip:
advantages = torch.clamp(advantages, -self._advantage_clip,
self._advantage_clip)
if info.reward_weights != ():
advantages = (advantages * info.reward_weights).sum(-1)
pg_loss = self._pg_loss(info, advantages.detach())
td_loss = self._td_error_loss_fn(returns.detach(), value)
if td_loss.ndim == 3:
td_loss = td_loss.mean(dim=2)
loss = pg_loss + self._td_loss_weight * td_loss
entropy_loss = ()
if self._entropy_regularization is not None:
entropy, entropy_for_gradient = dist_utils.entropy_with_fallback(
info.action_distribution, return_sum=False)
entropy_loss = alf.nest.map_structure(lambda x: -x, entropy)
loss -= self._entropy_regularization * sum(
alf.nest.flatten(entropy_for_gradient))
return LossInfo(
loss=loss,
extra=ActorCriticLossInfo(
td_loss=td_loss, pg_loss=pg_loss, neg_entropy=entropy_loss))
def _pg_loss(self, info, advantages):
action_log_prob = dist_utils.compute_log_probability(
info.action_distribution, info.action)
return -advantages * action_log_prob
def _calc_returns_and_advantages(self, info, value):
if info.reward.ndim == 3:
# [T, B, D] or [T, B, 1]
discounts = info.discount.unsqueeze(-1) * self._gamma
else:
# [T, B]
discounts = info.discount * self._gamma
returns = value_ops.discounted_return(
rewards=info.reward,
values=value,
step_types=info.step_type,
discounts=discounts)
returns = tensor_utils.tensor_extend(returns, value[-1])
if not self._use_gae:
advantages = returns - value
else:
advantages = value_ops.generalized_advantage_estimation(
rewards=info.reward,
values=value,
step_types=info.step_type,
discounts=discounts,
td_lambda=self._lambda)
advantages = tensor_utils.tensor_extend_zero(advantages)
if self._use_td_lambda_return:
returns = advantages + value
return returns, advantages
def calc_loss(self, info):
return self(info)
| [
"torch.var",
"torch.sqrt",
"torch.clamp",
"torch.tensor"
] | 1.8.1 | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 |
1.8 | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from absl.testing import parameterized
import alf
from alf import data_structures as ds
from alf.utils.data_buffer import RingBuffer
from alf.utils.data_buffer_test import get_batch, DataItem, RingBufferTest
from alf.experience_replayers.replay_buffer import ReplayBuffer
from alf.algorithms.data_transformer import HindsightExperienceTransformer
class ReplayBufferTest(RingBufferTest):
def tearDown(self):
super().tearDown()
def test_replay_with_hindsight_relabel(self):
self.max_length = 8
torch.manual_seed(0)
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=2,
max_length=self.max_length,
keep_episodic_info=True,
step_type_field="t",
with_replacement=True)
transform = HindsightExperienceTransformer(
self.data_spec,
her_proportion=0.8,
achieved_goal_field="o.a",
desired_goal_field="o.g")
steps = [
[
ds.StepType.FIRST, # will be overwritten
ds.StepType.MID, # idx == 1 in buffer
ds.StepType.LAST,
ds.StepType.FIRST,
ds.StepType.MID,
ds.StepType.MID,
ds.StepType.LAST,
ds.StepType.FIRST,
ds.StepType.MID # idx == 0
],
[
ds.StepType.FIRST, # will be overwritten in RingBuffer
ds.StepType.LAST, # idx == 1 in RingBuffer
ds.StepType.FIRST,
ds.StepType.MID,
ds.StepType.MID,
ds.StepType.LAST,
ds.StepType.FIRST,
ds.StepType.MID,
ds.StepType.MID # idx == 0
]
]
# insert data that will be overwritten later
for b, t in list(itertools.product(range(2), range(8))):
batch = get_batch([b], self.dim, t=steps[b][t], x=0.1 * t + b)
replay_buffer.add_batch(batch, batch.env_id)
# insert data
for b, t in list(itertools.product(range(2), range(9))):
batch = get_batch([b], self.dim, t=steps[b][t], x=0.1 * t + b)
replay_buffer.add_batch(batch, batch.env_id)
# Test padding
idx = torch.tensor([[7, 0, 0, 6, 3, 3, 3, 0], [6, 0, 5, 2, 2, 2, 0,
6]])
pos = replay_buffer._pad(idx, torch.tensor([[0] * 8, [1] * 8]))
self.assertTrue(
torch.equal(
pos,
torch.tensor([[15, 16, 16, 14, 11, 11, 11, 16],
[14, 16, 13, 10, 10, 10, 16, 14]],
dtype=torch.int64)))
# Verify _index is built correctly.
# Note, the _index_pos 8 represents headless timesteps, which are
# outdated and not the same as the result of padding: 16.
pos = torch.tensor([[15, 8, 8, 14, 11, 11, 11, 16],
[14, 8, 13, 10, 10, 10, 16, 14]])
self.assertTrue(torch.equal(replay_buffer._indexed_pos, pos))
self.assertTrue(
torch.equal(replay_buffer._headless_indexed_pos,
torch.tensor([10, 9])))
# Save original exp for later testing.
g_orig = replay_buffer._buffer.o["g"].clone()
r_orig = replay_buffer._buffer.reward.clone()
# HER selects indices [0, 2, 3, 4] to relabel, from all 5:
# env_ids: [[0, 0], [1, 1], [0, 0], [1, 1], [0, 0]]
# pos: [[6, 7], [1, 2], [1, 2], [3, 4], [5, 6]] + 8
# selected: x x x x
# future: [ 7 2 2 4 6 ] + 8
# g [[.7,.7],[0, 0], [.2,.2],[1.4,1.4],[.6,.6]] # 0.1 * t + b with default 0
# reward: [[-1,0], [-1,-1],[-1,0], [-1,0], [-1,0]] # recomputed with default -1
env_ids = torch.tensor([0, 0, 1, 0])
dist = replay_buffer.steps_to_episode_end(
replay_buffer._pad(torch.tensor([7, 2, 4, 6]), env_ids), env_ids)
self.assertEqual(list(dist), [1, 0, 1, 0])
# Test HER relabeled experiences
res, info = replay_buffer.get_batch(5, 2)
res = res._replace(batch_info=info)
res = transform.transform_experience(res)
self.assertEqual(list(res.o["g"].shape), [5, 2])
# Test relabeling doesn't change original experience
self.assertTrue(torch.allclose(r_orig, replay_buffer._buffer.reward))
self.assertTrue(torch.allclose(g_orig, replay_buffer._buffer.o["g"]))
# test relabeled goals
g = torch.tensor([0.7, 0., .2, 1.4, .6]).unsqueeze(1).expand(5, 2)
self.assertTrue(torch.allclose(res.o["g"], g))
# test relabeled rewards
r = torch.tensor([[-1., 0.], [-1., -1.], [-1., 0.], [-1., 0.],
[-1., 0.]])
self.assertTrue(torch.allclose(res.reward, r))
# Gold standard functions to test HER.
def episode_end_indices(self, b):
"""Compute episode ending indices in RingBuffer b.
Args:
b (ReplayBuffer): HER ReplayBuffer object.
Returns:
epi_ends (tensor): shape ``(2, E)``, ``epi_ends[0]`` are the
``env_ids``, ``epi_ends[1]`` are the ending positions of the
episode ending/LAST steps.
We assume every possible ``env_id`` is present.
"""
step_types = alf.nest.get_field(b._buffer, b._step_type_field)
epi_ends = torch.where(step_types == ds.StepType.LAST)
epi_ends = alf.nest.map_structure(lambda d: d.type(torch.int64),
epi_ends)
# if an env has no LAST step, populate with pos - 1
last_step_pos = b.circular(b._current_pos - 1)
all_envs = torch.arange(b._num_envs)
non_last_step_envs = torch.where(
step_types[(all_envs, last_step_pos)] != ds.StepType.LAST)[0]
epi_ends = (torch.cat([epi_ends[0], non_last_step_envs]),
torch.cat([epi_ends[1],
last_step_pos[non_last_step_envs]]))
return epi_ends
# Another gold standard function
def steps_to_episode_end(self, b, env_ids, idx):
"""Compute the distance to the closest episode end in future.
Args:
b (ReplayBuffer): HER ReplayBuffer object.
env_ids (tensor): shape ``L``.
idx (tensor): shape ``L``, indexes of the current timesteps in
the replay buffer.
Returns:
tensor of shape ``L``.
"""
epi_ends = self.episode_end_indices(b)
MAX_INT = 1000000000
pos = b._pad(idx, env_ids)
padded_ends = b._pad(epi_ends[1], epi_ends[0])
min_dist = torch.ones_like(pos)
# Using a loop over envs reduces memory by num_envs^3.
# Due to the small memory footprint, speed is also much faster.
for env_id in range(b._num_envs):
(pos_env_index, ) = torch.where(env_ids == env_id)
(end_env_index, ) = torch.where(epi_ends[0] == env_id)
_pos = torch.gather(pos, dim=0, index=pos_env_index)
_ends = torch.gather(padded_ends, dim=0, index=end_env_index)
L = _pos.shape[0]
E = _ends.shape[0]
dist = _ends.unsqueeze(0).expand(L, E) - _pos.unsqueeze(1).expand(
L, E)
positive_dist = torch.where(
dist < 0, torch.tensor(MAX_INT, dtype=torch.int64), dist)
_min_dist, _ = torch.min(positive_dist, dim=1)
min_dist.scatter_(dim=0, index=pos_env_index, src=_min_dist)
return min_dist
def generate_step_types(self, num_envs, max_steps, end_prob):
steps = torch.tensor([ds.StepType.MID] * max_steps * num_envs)
# start with FIRST
env_firsts = torch.arange(num_envs)
steps[env_firsts * max_steps] = torch.tensor([ds.StepType.FIRST])
# randomly insert episode ends (no overlapping positions)
segs = int(max_steps * num_envs * end_prob)
ends = (torch.arange(segs) * (1. / end_prob)).type(torch.int64)
ends += (torch.rand(segs) * (1. / end_prob - 1) + 1).type(torch.int64)
steps[ends] = torch.tensor([ds.StepType.LAST]).expand(segs)
valid_starts, = torch.where(
ends +
1 != torch.arange(max_steps, num_envs * max_steps, max_steps))
steps[(ends + 1)[valid_starts]] = torch.tensor(
[ds.StepType.FIRST]).expand(valid_starts.shape[0])
return steps
@parameterized.parameters([
(False, False),
(False, True),
(True, False),
])
def test_replay_buffer(self, allow_multiprocess, with_replacement):
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
allow_multiprocess=allow_multiprocess)
batch1 = get_batch([0, 4, 7], self.dim, t=0, x=0.1)
replay_buffer.add_batch(batch1, batch1.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([1, 0, 0, 0, 1, 0, 0, 1]))
self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 1)
batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=0, x=0.2)
replay_buffer.add_batch(batch2, batch2.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([1, 1, 1, 1, 1, 1, 1, 1]))
batch = replay_buffer.gather_all()
self.assertEqual(list(batch.t.shape), [8, 1])
# test that RingBuffer detaches gradients of inputs
self.assertFalse(batch.x.requires_grad)
self.assertRaises(AssertionError, replay_buffer.get_batch, 8, 2)
replay_buffer.get_batch(13, 1)[0]
batch = replay_buffer.get_batch(8, 1)[0]
# squeeze the time dimension
batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)
bat1 = alf.nest.map_structure(lambda bat: bat[batch1.env_id], batch)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)
self.assertEqual(bat1.env_id, batch1.env_id)
self.assertEqual(bat1.x, batch1.x)
self.assertEqual(bat1.t, batch1.t)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
self.assertEqual(bat2.t, batch2.t)
for t in range(1, 10):
batch3 = get_batch([0, 4, 7], self.dim, t=t, x=0.3)
j = t + 1
s = min(t + 1, self.max_length)
replay_buffer.add_batch(batch3, batch3.env_id)
self.assertEqual(replay_buffer._current_size,
torch.tensor([s, 1, 1, 1, s, 1, 1, s]))
self.assertEqual(replay_buffer._current_pos,
torch.tensor([j, 1, 1, 1, j, 1, 1, j]))
batch2 = get_batch([1, 2, 3, 5, 6], self.dim, t=1, x=0.2)
replay_buffer.add_batch(batch2, batch2.env_id)
batch = replay_buffer.get_batch(8, 1)[0]
# squeeze the time dimension
batch = alf.nest.map_structure(lambda bat: bat.squeeze(1), batch)
bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id], batch)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id], batch)
self.assertEqual(bat3.env_id, batch3.env_id)
self.assertEqual(bat3.x, batch3.x)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
batch = replay_buffer.get_batch(8, 2)[0]
t2 = []
t3 = []
for t in range(2):
batch_t = alf.nest.map_structure(lambda b: b[:, t], batch)
bat3 = alf.nest.map_structure(lambda bat: bat[batch3.env_id],
batch_t)
bat2 = alf.nest.map_structure(lambda bat: bat[batch2.env_id],
batch_t)
t2.append(bat2.t)
self.assertEqual(bat3.env_id, batch3.env_id)
self.assertEqual(bat3.x, batch3.x)
self.assertEqual(bat2.env_id, batch2.env_id)
self.assertEqual(bat2.x, batch2.x)
t3.append(bat3.t)
# Test time consistency
self.assertEqual(t2[0] + 1, t2[1])
self.assertEqual(t3[0] + 1, t3[1])
batch = replay_buffer.get_batch(128, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [128, 2])
batch = replay_buffer.get_batch(10, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [10, 2])
batch = replay_buffer.get_batch(4, 2)[0]
self.assertEqual(batch.t[:, 0] + 1, batch.t[:, 1])
self.assertEqual(list(batch.t.shape), [4, 2])
# Test gather_all()
# Exception because the size of all the environments are not same
self.assertRaises(AssertionError, replay_buffer.gather_all)
for t in range(2, 10):
batch4 = get_batch([1, 2, 3, 5, 6], self.dim, t=t, x=0.4)
replay_buffer.add_batch(batch4, batch4.env_id)
batch = replay_buffer.gather_all()
self.assertEqual(list(batch.t.shape), [8, 4])
# Test clear()
replay_buffer.clear()
self.assertEqual(replay_buffer.total_size, 0)
def test_recent_data_and_without_replacement(self):
num_envs = 4
max_length = 100
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=num_envs,
max_length=max_length,
with_replacement=False,
recent_data_ratio=0.5,
recent_data_steps=4)
replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=0, x=0.))
batch, info = replay_buffer.get_batch(4, 1)
self.assertEqual(info.env_ids, torch.tensor([0, 1, 2, 3]))
replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=1, x=1.0))
batch, info = replay_buffer.get_batch(8, 1)
self.assertEqual(info.env_ids, torch.tensor([0, 1, 2, 3] * 2))
for t in range(2, 32):
replay_buffer.add_batch(
get_batch([0, 1, 2, 3], self.dim, t=t, x=t))
batch, info = replay_buffer.get_batch(32, 1)
self.assertEqual(info.env_ids[16:], torch.tensor([0, 1, 2, 3] * 4))
# The first half is from recent data
self.assertEqual(info.env_ids[:16], torch.tensor([0, 1, 2, 3] * 4))
self.assertEqual(
info.positions[:16],
torch.tensor([28] * 4 + [29] * 4 + [30] * 4 + [31] * 4))
def test_num_earliest_frames_ignored_uniform(self):
num_envs = 4
max_length = 100
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=num_envs,
max_length=max_length,
keep_episodic_info=False,
num_earliest_frames_ignored=2)
replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=0, x=0.))
# not enough data
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=1, x=0.))
# not enough data
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
replay_buffer.add_batch(get_batch([0, 1, 2, 3], self.dim, t=2, x=0.))
for _ in range(10):
batch, batch_info = replay_buffer.get_batch(1, 1)
self.assertEqual(batch.t, torch.tensor([[2]]))
def test_num_earliest_frames_ignored_priortized(self):
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
num_earliest_frames_ignored=2,
keep_episodic_info=False,
prioritized_sampling=True)
batch1 = get_batch([1], self.dim, x=0.25, t=0)
replay_buffer.add_batch(batch1, batch1.env_id)
# not enough data
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
batch2 = get_batch([1], self.dim, x=0.25, t=1)
replay_buffer.add_batch(batch2, batch1.env_id)
# not enough data
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
batch3 = get_batch([1], self.dim, x=0.25, t=2)
replay_buffer.add_batch(batch3, batch1.env_id)
for _ in range(10):
batch, batch_info = replay_buffer.get_batch(1, 1)
self.assertEqual(batch_info.env_ids,
torch.tensor([1], dtype=torch.int64))
self.assertEqual(batch_info.importance_weights, 1.)
self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))
self.assertEqual(batch.t, torch.tensor([[2]]))
def test_prioritized_replay(self):
replay_buffer = ReplayBuffer(
data_spec=self.data_spec,
num_environments=self.num_envs,
max_length=self.max_length,
prioritized_sampling=True)
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 1)
batch1 = get_batch([1], self.dim, x=0.25, t=0)
replay_buffer.add_batch(batch1, batch1.env_id)
batch, batch_info = replay_buffer.get_batch(1, 1)
self.assertEqual(batch_info.env_ids,
torch.tensor([1], dtype=torch.int64))
self.assertEqual(batch_info.importance_weights, 1.)
self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))
self.assertRaises(AssertionError, replay_buffer.get_batch, 1, 2)
batch2 = get_batch([1], self.dim, x=0.5, t=1)
replay_buffer.add_batch(batch1, batch1.env_id)
batch, batch_info = replay_buffer.get_batch(4, 2)
self.assertEqual(batch_info.env_ids,
torch.tensor([1], dtype=torch.int64))
self.assertEqual(batch_info.importance_weights, torch.tensor([1.]))
self.assertEqual(batch_info.importance_weights, torch.tensor([1.] * 4))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0 = (replay_buffer.circular(batch_info.positions) == 0).sum()
n1 = (replay_buffer.circular(batch_info.positions) == 1).sum()
self.assertEqual(n0, 500)
self.assertEqual(n1, 500)
replay_buffer.update_priority(
env_ids=torch.tensor([1, 1], dtype=torch.int64),
positions=torch.tensor([0, 1], dtype=torch.int64),
priorities=torch.tensor([0.5, 1.5]))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0 = (replay_buffer.circular(batch_info.positions) == 0).sum()
n1 = (replay_buffer.circular(batch_info.positions) == 1).sum()
self.assertEqual(n0, 250)
self.assertEqual(n1, 750)
batch2 = get_batch([0, 2], self.dim, x=0.5, t=1)
replay_buffer.add_batch(batch2, batch2.env_id)
batch, batch_info = replay_buffer.get_batch(1000, 1)
def _get(env_id, pos):
flag = ((batch_info.env_ids == env_id) *
(batch_info.positions == replay_buffer._pad(pos, env_id)))
w = batch_info.importance_weights[torch.nonzero(
flag, as_tuple=True)[0]]
return flag.sum(), w
n0, w0 = _get(0, 0)
n1, w1 = _get(1, 0)
n2, w2 = _get(1, 1)
n3, w3 = _get(2, 0)
self.assertEqual(n0, 300)
self.assertEqual(n1, 100)
self.assertEqual(n2, 300)
self.assertEqual(n3, 300)
self.assertTrue(torch.all(w0 == 1.2))
self.assertTrue(torch.all(w1 == 0.4))
self.assertTrue(torch.all(w2 == 1.2))
self.assertTrue(torch.all(w3 == 1.2))
replay_buffer.update_priority(
env_ids=torch.tensor([1, 2], dtype=torch.int64),
positions=torch.tensor([1, 0], dtype=torch.int64),
priorities=torch.tensor([1.0, 1.0]))
batch, batch_info = replay_buffer.get_batch(1000, 1)
n0, w0 = _get(0, 0)
n1, w1 = _get(1, 0)
n2, w2 = _get(1, 1)
n3, w3 = _get(2, 0)
self.assertEqual(n0, 375)
self.assertEqual(n1, 125)
self.assertEqual(n2, 250)
self.assertEqual(n3, 250)
self.assertTrue(torch.all(w0 == 1.5))
self.assertTrue(torch.all(w1 == 0.5))
self.assertTrue(torch.all(w2 == 1.0))
self.assertTrue(torch.all(w3 == 1.0))
if __name__ == '__main__':
alf.test.main()
| [
"torch.rand",
"torch.cat",
"torch.nonzero",
"torch.min",
"torch.arange",
"torch.gather",
"torch.manual_seed",
"torch.all",
"torch.tensor",
"torch.ones_like",
"torch.allclose",
"torch.equal",
"torch.where"
] | 1.8.1 | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 |
1.8 | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import numpy as np
import torch
from typing import Callable
import alf
from alf.utils import common
from alf.utils import tensor_utils
from . import adam_tf, adamw
def _rbf_func(x):
r"""
Compute the rbf kernel and its gradient w.r.t. first entry
:math:`K(x, x), \nabla_x K(x, x)`, for computing ``svgd``_grad.
Args:
x (Tensor): set of N particles, shape (N x D), where D is the
dimenseion of each particle
Returns:
:math:`K(x, x)` (Tensor): the RBF kernel of shape (N x N)
:math:`\nabla_x K(x, x)` (Tensor): the derivative of RBF kernel of shape (N x N x D)
"""
N, D = x.shape
diff = x.unsqueeze(1) - x.unsqueeze(0) # [N, N, D]
dist_sq = torch.sum(diff**2, -1) # [N, N]
h, _ = torch.median(dist_sq.view(-1), dim=0)
if h == 0.:
h = torch.ones_like(h)
else:
h = h / max(np.log(N), 1.)
kappa = torch.exp(-dist_sq / h) # [N, N]
kappa_grad = -2 * kappa.unsqueeze(-1) * diff / h # [N, N, D]
return kappa, kappa_grad
def _score_func(x, alpha=1e-5):
r"""
Compute the stein estimator of the score function
:math:`\nabla\log q = -(K + \alpha I)^{-1}\nabla K`,
for computing ``gfsf``_grad.
Args:
x (Tensor): set of N particles, shape (N x D), where D is the
dimenseion of each particle
alpha (float): weight of regularization for inverse kernel
this parameter turns out to be crucial for convergence.
Returns:
:math:`\nabla\log q` (Tensor): the score function of shape (N x D)
"""
N, D = x.shape
diff = x.unsqueeze(1) - x.unsqueeze(0) # [N, N, D]
dist_sq = torch.sum(diff**2, -1) # [N, N]
h, _ = torch.median(dist_sq.view(-1), dim=0)
if h == 0.:
h = torch.ones_like(h)
else:
h = h / max(np.log(N), 1.)
kappa = torch.exp(-dist_sq / h) # [N, N]
kappa_inv = torch.inverse(kappa + alpha * torch.eye(N)) # [N, N]
kappa_grad = -2 * kappa.unsqueeze(-1) * diff / h # [N, N, D]
kappa_grad = kappa_grad.sum(0) # [N, D]
return -kappa_inv @ kappa_grad
def wrap_optimizer(cls):
"""A helper function to construct torch optimizers with
params as [{'params': []}]. After construction, new parameter
groups can be added by using the add_param_group() method.
This wrapper also clips gradients first before calling ``step()``.
"""
NewClsName = cls.__name__ + "_"
NewCls = type(NewClsName, (cls, ), {})
NewCls.counter = 0
@common.add_method(NewCls)
def __init__(self,
*,
gradient_clipping=None,
clip_by_global_norm=False,
parvi=None,
repulsive_weight=1.,
name=None,
**kwargs):
"""
Args:
gradient_clipping (float): If not None, serve as a positive threshold
clip_by_global_norm (bool): If True, use `tensor_utils.clip_by_global_norm`
to clip gradient. If False, use `tensor_utils.clip_by_norms` for
each grad.
parvi (string): if not ``None``, paramters with attribute
``ensemble_group`` will be updated by particle-based vi algorithm
specified by ``parvi``, options are [``svgd``, ``gfsf``],
* Stein Variational Gradient Descent (SVGD)
Liu, Qiang, and Dilin Wang. "Stein Variational Gradient Descent:
A General Purpose Bayesian Inference Algorithm." NIPS. 2016.
* Wasserstein Gradient Flow with Smoothed Functions (GFSF)
Liu, Chang, et al. "Understanding and accelerating particle-based
variational inference." ICML. 2019.
To work with the ``parvi`` option, the parameters added to the
optimizer (by ``add_param_group``) should have an (int) attribute
``ensemble_group``. See ``FCBatchEnsemble`` as an example.
repulsive_weight (float): the weight of the repulsive gradient term
for parameters with attribute ``ensemble_group``.
name (str): the name displayed when summarizing the gradient norm. If
None, then a global name in the format of "class_name_i" will be
created, where "i" is the global optimizer id.
kwargs: arguments passed to the constructor of the underline torch
optimizer. If ``lr`` is given and it is a ``Callable``, it is
treated as a learning rate scheduler and will be called everytime
when ``step()`` is called to get the latest learning rate.
Available schedulers are in ``alf.utils.schedulers``.
"""
self._lr_scheduler = None
if "lr" in kwargs:
lr = kwargs["lr"]
if isinstance(lr, Callable):
self._lr_scheduler = lr
kwargs["lr"] = float(lr())
super(NewCls, self).__init__([{'params': []}], **kwargs)
self._gradient_clipping = gradient_clipping
self._clip_by_global_norm = clip_by_global_norm
self._parvi = parvi
if parvi is not None:
assert parvi in ['svgd', 'gfsf'
], ("parvi method %s is not supported." % (parvi))
self._repulsive_weight = repulsive_weight
self.name = name
if name is None:
self.name = NewClsName + str(NewCls.counter)
NewCls.counter += 1
@common.add_method(NewCls)
def step(self, closure=None):
"""This function first clips the gradients if needed, then call the
parent's ``step()`` function.
"""
if self._lr_scheduler is not None:
lr = float(self._lr_scheduler())
for param_group in self.param_groups:
param_group['lr'] = lr
if self._gradient_clipping is not None:
params = []
for param_group in self.param_groups:
params.extend(param_group["params"])
grads = alf.nest.map_structure(lambda p: p.grad, params)
if self._clip_by_global_norm:
_, global_norm = tensor_utils.clip_by_global_norm(
grads, self._gradient_clipping, in_place=True)
if alf.summary.should_record_summaries():
alf.summary.scalar("global_grad_norm/%s" % self.name,
global_norm)
else:
tensor_utils.clip_by_norms(
grads, self._gradient_clipping, in_place=True)
if self._parvi is not None:
self._parvi_step()
super(NewCls, self).step(closure=closure)
@common.add_method(NewCls)
def _parvi_step(self):
for param_group in self.param_groups:
if "parvi_grad" in param_group:
params = param_group['params']
batch_size = params[0].shape[0]
params_tensor = torch.cat(
[p.view(batch_size, -1) for p in params],
dim=-1) # [N, D], D=dim(params)
if self._parvi == 'svgd':
# [N, N], [N, N, D]
kappa, kappa_grad = _rbf_func(params_tensor)
grads_tensor = torch.cat(
[p.grad.view(batch_size, -1) for p in params],
dim=-1).detach() # [N, D]
kernel_logp = torch.matmul(kappa,
grads_tensor) / batch_size
svgd_grad = torch.split(
kernel_logp -
self._repulsive_weight * kappa_grad.mean(0),
[p.nelement() // batch_size for p in params],
dim=-1)
for i in range(len(params)):
grad = params[i].grad.view(batch_size, -1)
grad.copy_(svgd_grad[i])
else:
logq_grad = _score_func(params_tensor) # [N, D]
gfsf_grad = torch.split(
logq_grad,
[p.nelement() // batch_size for p in params],
dim=-1)
for i in range(len(params)):
grad = params[i].grad.view(batch_size, -1)
grad.add_(self._repulsive_weight * gfsf_grad[i])
@common.add_method(NewCls)
def add_param_group(self, param_group):
"""This function first splits the input param_group into multiple
param_groups according to their ``ensemble_group`` attributes, then
calls the parent's ``add_param_group()`` function to add each of
them to the optimizer.
"""
assert isinstance(param_group, dict), "param_group must be a dict"
params = param_group["params"]
if isinstance(params, torch.Tensor):
param_group['params'] = [params]
elif isinstance(params, set):
raise TypeError('Please use a list instead.')
else:
param_group['params'] = list(params)
len_params = len(param_group['params'])
std_param_group = []
ensemble_param_groups = [[] for i in range(len_params)]
group_batch_sizes = [0] * len_params
for param in param_group['params']:
if not isinstance(param, torch.Tensor):
raise TypeError("optimizer can only optimize Tensors, "
"but one of the params is " +
torch.typename(param))
if hasattr(param, 'ensemble_group'):
assert isinstance(
param.ensemble_group,
int), ("ensemble_group attribute mis-specified.")
ensemble_group_id = param.ensemble_group
if group_batch_sizes[ensemble_group_id] == 0:
group_batch_sizes[ensemble_group_id] = param.shape[0]
else:
assert param.shape[0] == group_batch_sizes[
ensemble_group_id], (
"batch_size of params does not match that of the "
"ensemble param_group %d." % (ensemble_group_id))
ensemble_param_groups[ensemble_group_id].append(param)
else:
std_param_group.append(param)
if len(alf.nest.flatten(ensemble_param_groups)) > 0:
if len(std_param_group) > 0:
super(NewCls, self).add_param_group({
'params': std_param_group
})
for ensemble_param_group in ensemble_param_groups:
if len(ensemble_param_group) > 0:
super(NewCls, self).add_param_group({
'params': ensemble_param_group,
'parvi_grad': True
})
else:
super(NewCls, self).add_param_group(param_group)
return NewCls
Adam = alf.configurable('Adam')(wrap_optimizer(torch.optim.Adam))
# TODO: uncomment this after removing `adamw.py`
#AdamW = alf.configurable('AdamW')(wrap_optimizer(torch.optim.AdamW))
AdamW = alf.configurable('AdamW')(wrap_optimizer(adamw.AdamW))
SGD = alf.configurable('SGD')(wrap_optimizer(torch.optim.SGD))
AdamTF = alf.configurable('AdamTF')(wrap_optimizer(adam_tf.AdamTF))
| [
"torch.typename",
"torch.matmul",
"torch.eye",
"torch.ones_like",
"torch.exp",
"torch.sum"
] | 1.8.1 | www2171668/alf | 6e3731fc559d3b4e6b5b9ed6251fff728a560d64 |
1.4 | import numpy as np
import cv2
import torch
import os
import sys
import random
import torch.nn as nn
import torch.utils.data as tdata
import glob
from matplotlib import pyplot as plt
sys.path.append(".")
from visualizer_dataloader import thermaldataset
def visualize(data_sample):
data = data_sample['data']
label = data_sample['label']
feetin_frame = data_sample['feetin_frame']
feetin_ts = data_sample['feetin_ts']
vid_fname = data_sample['filename']
print(vid_fname[0])
fname = f'../vid_data/{vid_fname[0]}.avi'
_, d, h, w = data.shape
out = cv2.VideoWriter(fname, cv2.VideoWriter_fourcc(*'DIVX'), 15, (w,h), isColor=True)
print(data.numpy().shape, label.shape, feetin_frame, feetin_ts)
f_count = 0
for i in range(d):
vid_i = data[0][i].numpy()
ecg_i = label[0][i].numpy().flatten()
fig, ax = plt.subplots(figsize=(2.4, 1.6))
ax.plot(ecg_i)
fig.canvas.draw()
np_plot = np.array(fig.canvas.renderer.buffer_rgba())
vid_i = cv2.cvtColor(vid_i, cv2.COLOR_GRAY2BGR)
#np_plot = cv2.cvtColor(np_plot, cv2.CV_BGRA2HSV)
#print("shape of plot and img", np_plot.shape, vid_i.shape)
vid_i[0:160,:,:] = np_plot[:,:,0:3]
if(i == feetin_frame-4): f_count = 15
if(f_count>0):
cv2.putText(vid_i, 'FeetIn Water', (160,120), cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 0, 255) ,\
2, cv2.LINE_AA)
f_count = f_count-1
plt.close()
out.write(vid_i)
out.release()
return
#file usage : python visualizer.py ../data/test_label ../data/mat_files ../data/sync_data
if __name__=='__main__':
label_name = sys.argv[1]
ir_vid_name = sys.argv[2]
sync_sig_name = sys.argv[3]
print(label_name, ir_vid_name)
label = "{}/".format(label_name)
ir_video = "{}/".format(ir_vid_name)
print(label, ir_video)
visualize_dataset = thermaldataset(
label = "{}/".format(label_name),
ir_video = "{}/".format(ir_vid_name),
sync_sig = "{}/".format(sync_sig_name),
phase='train'
)
trainloader = torch.utils.data.DataLoader(visualize_dataset,batch_size=1,shuffle=True,num_workers=1)
for data_sample in trainloader:
try:
if(data_sample == -1):
print("Value -1 returned")
continue
except:
pass
visualize(data_sample)
| [
"torch.utils.data.DataLoader"
] | 1.4.0 | shub659/StressNet-Detecting-stress-from-thermal-videos | 89a06014ba2c456482d1d427cbac0171e477492a |
1.3 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Sequence
import pytest
import torch
from tests.text.helpers import TextTester
from tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_single_reference
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.text.rouge import ROUGEScore
from torchmetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE
if _ROUGE_SCORE_AVAILABLE:
from rouge_score.rouge_scorer import RougeScorer
from rouge_score.scoring import BootstrapAggregator
else:
RougeScorer, BootstrapAggregator = object, object
ROUGE_KEYS = ("rouge1", "rouge2", "rougeL", "rougeLsum")
def _compute_rouge_score(
preds: Sequence[str],
targets: Sequence[Sequence[str]],
use_stemmer: bool,
rouge_level: str,
metric: str,
accumulate: str,
):
"""Evaluates rouge scores from rouge-score package for baseline evaluation."""
if isinstance(targets, list) and all(isinstance(target, str) for target in targets):
targets = [targets] if isinstance(preds, str) else [[target] for target in targets]
if isinstance(preds, str):
preds = [preds]
if isinstance(targets, str):
targets = [[targets]]
scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = BootstrapAggregator()
for target_raw, pred_raw in zip(targets, preds):
list_results = [scorer.score(target, pred_raw) for target in target_raw]
aggregator_avg = BootstrapAggregator()
if accumulate == "best":
key_curr = list(list_results[0].keys())[0]
all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])
highest_idx = torch.argmax(all_fmeasure).item()
aggregator.add_scores(list_results[highest_idx])
elif accumulate == "avg":
for _score in list_results:
aggregator_avg.add_scores(_score)
_score = {rouge_key: scores.mid for rouge_key, scores in aggregator_avg.aggregate().items()}
aggregator.add_scores(_score)
else:
raise ValueError(f"Got unknown accumulate value {accumulate}. Expected to be one of ['best', 'avg']")
rs_scores = aggregator.aggregate()
rs_result = getattr(rs_scores[rouge_level].mid, metric)
return rs_result
@pytest.mark.skipif(not _NLTK_AVAILABLE, reason="test requires nltk")
@pytest.mark.parametrize(
["pl_rouge_metric_key", "use_stemmer"],
[
("rouge1_precision", True),
("rouge1_recall", True),
("rouge1_fmeasure", False),
("rouge2_precision", False),
("rouge2_recall", True),
("rouge2_fmeasure", True),
("rougeL_precision", False),
("rougeL_recall", False),
("rougeL_fmeasure", True),
("rougeLsum_precision", True),
("rougeLsum_recall", False),
("rougeLsum_fmeasure", False),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_multiple_references.preds, _inputs_multiple_references.targets),
],
)
@pytest.mark.parametrize("accumulate", ["avg", "best"])
class TestROUGEScore(TextTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_rouge_score_class(
self, ddp, dist_sync_on_step, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate
):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=ROUGEScore,
sk_metric=rouge_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=rouge_score,
sk_metric=rouge_metric,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_metric_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if not _NLTK_AVAILABLE:
with pytest.raises(
ValueError,
match="ROUGE metric requires that nltk is installed."
"Either as `pip install torchmetrics[text]` or `pip install nltk`",
):
ROUGEScore()
def test_rouge_metric_wrong_key_value_error():
key = ("rouge1", "rouge")
with pytest.raises(ValueError):
ROUGEScore(rouge_keys=key)
with pytest.raises(ValueError):
rouge_score(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.targets,
rouge_keys=key,
accumulate="best",
)
| [
"torch.tensor",
"torch.argmax"
] | 1.3.1 | stancld/metrics | d35c3b5cff21e68e6620ebfc9a84e60dc4559e92 |
1.1 | from torch.utils.data import Subset
from PIL import Image
from torchvision.datasets import MNIST
from base.torchvision_dataset import TorchvisionDataset
from .preprocessing import create_semisupervised_setting
import torch
import torchvision.transforms as transforms
import random
import numpy as np
class MNIST_Dataset(TorchvisionDataset):
def __init__(self, root: str, normal_class: int = 0, known_outlier_class: int = 1, n_known_outlier_classes: int = 0,
ratio_known_normal: float = 0.0, ratio_known_outlier: float = 0.0, ratio_pollution: float = 0.0):
super().__init__(root)
# Define normal and outlier classes
self.n_classes = 2 # 0: normal, 1: outlier
'''
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(0, 10))
self.outlier_classes.remove(normal_class)
self.outlier_classes = tuple(self.outlier_classes)
'''
self.normal_classes = tuple([0,1,2,3,4,5,6,7,8,9])
self.outlier_classes = []
if n_known_outlier_classes == 0:
self.known_outlier_classes = ()
elif n_known_outlier_classes == 1:
self.known_outlier_classes = tuple([known_outlier_class])
else:
self.known_outlier_classes = tuple(random.sample(self.outlier_classes, n_known_outlier_classes))
# MNIST preprocessing: feature scaling to [0, 1]
transform = transforms.ToTensor()
#target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes))
target_transform = None
# Get train set
train_set = MyMNIST(root=self.root, train=True, transform=transform, target_transform=target_transform,
download=True)
# Create semi-supervised setting
idx, _, semi_targets = create_semisupervised_setting(train_set.targets.cpu().data.numpy(), self.normal_classes,
self.outlier_classes, self.known_outlier_classes,
ratio_known_normal, ratio_known_outlier, ratio_pollution)
print("semi_targets",len(semi_targets))
print("idx",len(idx))
#train_set.semi_targets[idx] = torch.tensor(semi_targets) # set respective semi-supervised labels
# Subset train_set to semi-supervised setup
self.train_set = Subset(train_set, idx)
# Get test set
self.test_set = MyMNIST(root=self.root, train=False, transform=transform, target_transform=target_transform,
download=True)
class MyMNIST(MNIST):
"""
Torchvision MNIST class with additional targets for the semi-supervised setting and patch of __getitem__ method
to also return the semi-supervised target as well as the index of a data sample.
"""
def __init__(self, *args, **kwargs):
super(MyMNIST, self).__init__(*args, **kwargs)
self.semi_targets = torch.zeros_like(self.targets)
self.anomaly_rate = 0.05
self.semi_label_rate = 0.01
def get_anomaly(anomaly_data):
n_anomaly = len(anomaly_data)
dim = 28
#print("anomaly_data",anomaly_data.shape)
a1,a2 = anomaly_data[:n_anomaly//2,:dim//2,:],anomaly_data[:n_anomaly//2,dim//2:,:]
b1,b2 = anomaly_data[n_anomaly//2:,:dim//2,:],anomaly_data[n_anomaly//2:,dim//2:,:]
#print("a1",a1.shape)
#print("b2",b2.shape)
anomaly_data1 = np.concatenate((a1,b2),axis = 1)
anomaly_data2 = np.concatenate((b1,a2),axis = 1)
anomaly_data = np.concatenate((anomaly_data1,anomaly_data2),axis = 0)
return anomaly_data
if not self.train:
#pass
test_data_normal = self.test_data[:9000,:,:]
test_data_anomaly = get_anomaly(self.test_data[9000:,:,:])
data = np.concatenate((test_data_normal,test_data_anomaly),axis = 0)
targets = np.array([0]*(len(test_data_normal)) + [1]*len(test_data_anomaly))
#np.random.seed(1)
#np.random.shuffle(data)
#np.random.seed(1)
#np.random.shuffle(targets)
self.data = torch.from_numpy(data)
self.targets = torch.from_numpy(targets)
else:
n_train = len(self.train_data)
n_normal = n_train - int(self.anomaly_rate*n_train)
n_anomaly = int(self.anomaly_rate*n_train)
normal_train = self.train_data[:n_normal,:,:]
tobe_anomaly_train = self.train_data[n_normal:,:,:]
print("normal_train",len(normal_train))
print("tobe_anomaly_train",len(tobe_anomaly_train))
anomaly_train = get_anomaly(tobe_anomaly_train)
print("anomaly_train",len(anomaly_train))
data = np.concatenate((normal_train,anomaly_train),axis = 0)
semi_target = np.array([0 for _ in range(n_normal-50)] + [1 for _ in range(50)] + [-1 for _ in range(50)] + [0 for _ in range(n_anomaly)])
self.semi_target = torch.from_numpy(semi_target)
#np.random.seed(1)
#np.random.shuffle(data)
#print("data",len(data))
#print("self.data",len(self.data))
self.data = torch.from_numpy(data)
def __getitem__(self, index):
"""Override the original method of the MNIST class.
Args:
index (int): Index
Returns:
tuple: (image, target, semi_target, index)
"""
img, target, semi_target = self.data[index], int(self.targets[index]), int(self.semi_targets[index])
#img, target, semi_target = self.data[index], int(self.targets[index]), int(self.targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, semi_target, index
| [
"torch.utils.data.Subset",
"torch.zeros_like",
"torch.from_numpy"
] | 1.1.0 | kevinwss/Deep-SAD-Baseline | b704725cc44ab5e7aa9bb09503a4c5f244fa907b |
1.0 | # MIT License
#
# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import logging
from .configuration_mobilebert import MobileBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
_CONFIG_FOR_DOC = "MobileBertConfig"
_TOKENIZER_FOR_DOC = "MobileBertTokenizer"
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"]
def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.replace("ffn_layer", "ffn")
name = name.replace("FakeLayerNorm", "LayerNorm")
name = name.replace("extra_output_weights", "dense/kernel")
name = name.replace("bert", "mobilebert")
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class NoNorm(nn.Module):
def __init__(self, feat_size, eps=None):
super().__init__()
self.bias = nn.Parameter(torch.zeros(feat_size))
self.weight = nn.Parameter(torch.ones(feat_size))
def forward(self, input_tensor):
return input_tensor * self.weight + self.bias
NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
class MobileBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.hidden_size = config.hidden_size
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
embed_dim_multiplier = 3 if self.trigram_input else 1
embedded_input_size = self.embedding_size * embed_dim_multiplier
self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.trigram_input:
# From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
# Devices (https://arxiv.org/abs/2004.02984)
#
# The embedding table in BERT models accounts for a substantial proportion of model size. To compress
# the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
# Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
# dimensional output.
inputs_embeds = torch.cat(
[
nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0),
inputs_embeds,
nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0),
],
dim=2,
)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MobileBertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
self.value = nn.Linear(
config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class MobileBertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
if not self.use_bottleneck:
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MobileBertSelfAttention(config)
self.output = MobileBertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
self_outputs = self.self(
query_tensor,
key_tensor,
value_tensor,
attention_mask,
head_mask,
output_attentions,
)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
attention_output = self.output(self_outputs[0], layer_input)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class MobileBertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class OutputBottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class MobileBertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
if not self.use_bottleneck:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = OutputBottleneck(config)
def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2):
layer_output = self.dense(intermediate_states)
if not self.use_bottleneck:
layer_output = self.dropout(layer_output)
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
else:
layer_output = self.LayerNorm(layer_output + residual_tensor_1)
layer_output = self.bottleneck(layer_output, residual_tensor_2)
return layer_output
class BottleneckLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
layer_input = self.dense(hidden_states)
layer_input = self.LayerNorm(layer_input)
return layer_input
class Bottleneck(nn.Module):
def __init__(self, config):
super().__init__()
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.input = BottleneckLayer(config)
if self.key_query_shared_bottleneck:
self.attention = BottleneckLayer(config)
def forward(self, hidden_states):
# This method can return three different tuples of values. These different values make use of bottlenecks,
# which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
# usage. These linear layer have weights that are learned during training.
#
# If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
# key, query, value, and "layer input" to be used by the attention layer.
# This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
# in the attention self output, after the attention scores have been computed.
#
# If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
# four values, three of which have been passed through a bottleneck: the query and key, passed through the same
# bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
#
# Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
# and the residual layer will be this value passed through a bottleneck.
bottlenecked_hidden_states = self.input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
class FFNOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states, residual_tensor):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class FFNLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.intermediate = MobileBertIntermediate(config)
self.output = FFNOutput(config)
def forward(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.output(intermediate_output, hidden_states)
return layer_outputs
class MobileBertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = MobileBertAttention(config)
self.intermediate = MobileBertIntermediate(config)
self.output = MobileBertOutput(config)
if self.use_bottleneck:
self.bottleneck = Bottleneck(config)
if config.num_feedforward_networks > 1:
self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=None,
):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
self_attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
s = (attention_output,)
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.num_feedforward_networks != 1:
for i, ffn_module in enumerate(self.ffn):
attention_output = ffn_module(attention_output)
s += (attention_output,)
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output, hidden_states)
outputs = (
(layer_output,)
+ outputs
+ (
torch.tensor(1000),
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_output,
intermediate_output,
)
+ s
)
return outputs
class MobileBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class MobileBertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
pooled_output = torch.tanh(pooled_output)
return pooled_output
class MobileBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class MobileBertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MobileBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
hidden_states += self.decoder.bias
return hidden_states
class MobileBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MobileBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MobileBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class MobileBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MobileBertConfig
pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST
load_tf_weights = load_tf_weights_in_mobilebert
base_model_prefix = "mobilebert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, (nn.LayerNorm, NoNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class MobileBertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.MobileBertForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
MOBILEBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
MOBILEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BertTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertModel(MobileBertPreTrainedModel):
"""
https://arxiv.org/pdf/2004.02984.pdf
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = MobileBertEmbeddings(config)
self.encoder = MobileBertEncoder(config)
self.pooler = MobileBertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, self.device
)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForPreTraining(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddigs):
self.cls.predictions.decoder = new_embeddigs
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
# resize dense output embedings at first
self.cls.predictions.dense = self._get_resized_lm_head(
self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Examples::
>>> from transformers import MobileBertTokenizer, MobileBertForPreTraining
>>> import torch
>>> tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased")
>>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return MobileBertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING)
class MobileBertForMaskedLM(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.cls = MobileBertOnlyMLMHead(config)
self.config = config
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddigs):
self.cls.predictions.decoder = new_embeddigs
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
# resize dense output embedings at first
self.cls.predictions.dense = self._get_resized_lm_head(
self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
)
return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class MobileBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@add_start_docstrings(
"""MobileBert Model with a `next sentence prediction (classification)` head on top. """,
MOBILEBERT_START_DOCSTRING,
)
class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
self.cls = MobileBertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see ``input_ids`` docstring) Indices should be in ``[0, 1]``.
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Examples::
>>> from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction
>>> import torch
>>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased')
>>> model = MobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt')
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> loss = outputs.loss
>>> logits = outputs.logits
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_score,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.mobilebert = MobileBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mobilebert = MobileBertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
# Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
class MobileBertForTokenClassification(MobileBertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.mobilebert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.zeros",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.CrossEntropyLoss",
"torch.from_numpy",
"torch.ones",
"torch.tensor",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.softmax",
"torch.tanh",
"torch.nn.functional.pad",
"torch.matmul",
"torch.nn.Embedding"
] | 1.0 | Knarik1/transformers | c2a7d7280250addae38a49c31a57ddd897be2065 |
1.1 | import numpy as np
import pytest
import scipy
import torch
import torch.testing
from rllib.dataset.datatypes import Observation
from rllib.dataset.utilities import stack_list_of_tuples
from rllib.util.value_estimation import discount_cumsum, discount_sum, mc_return
class TestDiscountedCumSum(object):
@pytest.fixture(params=[1, 0.99, 0.9, 0], scope="class")
def gamma(self, request):
return request.param
@pytest.fixture(
params=[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 2, 1, 0.2, 0.4]], scope="class"
)
def rewards(self, request):
return request.param
@pytest.fixture(params=[True, False], scope="class")
def batch(self, request):
return request.param
def test_correctness(self, gamma, batch):
rewards = np.array([[1, 2], [0.5, 0.3], [2, -1.2], [-0.2, 0.5]])
cum_rewards = np.array(
[
[
1 + 0.5 * gamma + 2 * gamma ** 2 - 0.2 * gamma ** 3,
2 + 0.3 * gamma - 1.2 * gamma ** 2 + 0.5 * gamma ** 3,
],
[
0.5 + 2 * gamma - 0.2 * gamma ** 2,
0.3 - 1.2 * gamma + 0.5 * gamma ** 2,
],
[2 - 0.2 * gamma, -1.2 + 0.5 * gamma],
[-0.2, 0.5],
]
)
if batch:
rewards = np.tile(np.array(rewards), (5, 1, 1))
cum_rewards = np.tile(np.array(cum_rewards), (5, 1, 1))
assert scipy.allclose(cum_rewards, discount_cumsum(np.array(rewards), gamma))
torch.testing.assert_allclose(
torch.tensor(cum_rewards), discount_cumsum(torch.tensor(rewards), gamma)
)
torch.testing.assert_allclose(
torch.tensor(cum_rewards[..., 0, :], dtype=torch.get_default_dtype()),
discount_sum(torch.tensor(rewards, dtype=torch.get_default_dtype()), gamma),
)
for i in range(rewards.shape[-1]):
torch.testing.assert_allclose(
torch.tensor(cum_rewards)[..., [i]],
discount_cumsum(torch.tensor(rewards)[..., [i]], gamma),
)
torch.testing.assert_allclose(
torch.tensor(cum_rewards[..., 0, [i]], dtype=torch.get_default_dtype()),
discount_sum(
torch.tensor(rewards[..., [i]], dtype=torch.get_default_dtype()),
gamma,
),
)
def test_shape_and_type(self, rewards, gamma):
np_returns = discount_cumsum(np.atleast_2d(np.array(rewards)).T, gamma)
assert np_returns.shape == (len(rewards), 1)
assert type(np_returns) is np.ndarray
t_returns = discount_cumsum(
torch.tensor(rewards, dtype=torch.get_default_dtype()).unsqueeze(-1), gamma
)
assert t_returns.shape == torch.Size((len(rewards), 1))
assert type(t_returns) is torch.Tensor
torch.testing.assert_allclose(t_returns, np_returns)
class TestMCReturn(object):
@pytest.fixture(params=[1, 0.99, 0.9, 0.5, 0], scope="class")
def gamma(self, request):
return request.param
@pytest.fixture(params=[True, False], scope="class")
def value_function(self, request):
if request:
return lambda x: torch.tensor([0.01])
else:
return None
@pytest.fixture(params=[1, 0.1, 0], scope="class")
def entropy_reg(self, request):
return request.param
def test_correctness(self, gamma, value_function, entropy_reg):
trajectory = [
Observation(0, 0, reward=np.array([1]), done=False, entropy=0.2).to_torch(),
Observation(
0, 0, reward=np.array([0.5]), done=False, entropy=0.3
).to_torch(),
Observation(0, 0, reward=np.array([2]), done=False, entropy=0.5).to_torch(),
Observation(
0, 0, reward=np.array([-0.2]), done=False, entropy=-0.2
).to_torch(),
]
r0 = 1 + entropy_reg * 0.2
r1 = 0.5 + entropy_reg * 0.3
r2 = 2 + entropy_reg * 0.5
r3 = -0.2 - entropy_reg * 0.2
v = 0.01 if value_function is not None else 0
reward = mc_return(
stack_list_of_tuples(trajectory, 0),
gamma,
value_function=value_function,
entropy_regularization=entropy_reg,
reduction="min",
)
torch.testing.assert_allclose(
reward,
torch.tensor(
[r0 + r1 * gamma + r2 * gamma ** 2 + r3 * gamma ** 3 + v * gamma ** 4]
),
)
torch.testing.assert_allclose(
mc_return(
observation=Observation(state=0, reward=np.array([0])).to_torch(),
gamma=gamma,
value_function=value_function,
entropy_regularization=entropy_reg,
),
torch.tensor([0]),
)
| [
"torch.get_default_dtype",
"torch.tensor",
"torch.testing.assert_allclose"
] | 1.1.1 | 4kubo/rllib | 4f9f5f49916c7681675305b6c9a276b9e88c5e22 |
1.9 | """Multi-microphone components.
This library contains functions for multi-microphone signal processing.
Example
-------
>>> import torch
>>>
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, SrpPhat, Music
>>> from speechbrain.processing.multi_mic import DelaySum, Mvdr, Gev
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise_diff = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise_diff = xs_noise_diff.unsqueeze(0)
>>> xs_noise_loc = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')
>>> xs_noise_loc = xs_noise_loc.unsqueeze(0)
>>> fs = 16000 # sampling rate
>>> ss = xs_speech
>>> nn_diff = 0.05 * xs_noise_diff
>>> nn_loc = 0.05 * xs_noise_loc
>>> xs_diffused_noise = ss + nn_diff
>>> xs_localized_noise = ss + nn_loc
>>> # Delay-and-Sum Beamforming with GCC-PHAT localization
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> delaysum = DelaySum()
>>> istft = ISTFT(sample_rate=fs)
>>> Xs = stft(xs_diffused_noise)
>>> Ns = stft(nn_diff)
>>> XXs = cov(Xs)
>>> NNs = cov(Ns)
>>> tdoas = gccphat(XXs)
>>> Ys_ds = delaysum(Xs, tdoas)
>>> ys_ds = istft(Ys_ds)
>>> # Mvdr Beamforming with SRP-PHAT localization
>>> mvdr = Mvdr()
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> srpphat = SrpPhat(mics=mics)
>>> doas = srpphat(XXs)
>>> Ys_mvdr = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)
>>> ys_mvdr = istft(Ys_mvdr)
>>> # Mvdr Beamforming with MUSIC localization
>>> music = Music(mics=mics)
>>> doas = music(XXs)
>>> Ys_mvdr2 = mvdr(Xs, NNs, doas, doa_mode=True, mics=mics, fs=fs)
>>> ys_mvdr2 = istft(Ys_mvdr2)
>>> # GeV Beamforming
>>> gev = Gev()
>>> Xs = stft(xs_localized_noise)
>>> Ss = stft(ss)
>>> Ns = stft(nn_loc)
>>> SSs = cov(Ss)
>>> NNs = cov(Ns)
>>> Ys_gev = gev(Xs, SSs, NNs)
>>> ys_gev = istft(Ys_gev)
Authors:
* William Aris
* Francois Grondin
"""
import torch
from packaging import version
import speechbrain.processing.decomposition as eig
class Covariance(torch.nn.Module):
"""Computes the covariance matrices of the signals.
Arguments:
----------
average : bool
Informs the module if it should return an average
(computed on the time dimension) of the covariance
matrices. The Default value is True.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0)
>>> xs = xs_speech + 0.05 * xs_noise
>>> fs = 16000
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> XXs.shape
torch.Size([1, 1001, 201, 2, 10])
"""
def __init__(self, average=True):
super().__init__()
self.average = average
def forward(self, Xs):
""" This method uses the utility function _cov to compute covariance
matrices. Therefore, the result has the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).
The order on the last dimension corresponds to the triu_indices for a
square matrix. For instance, if we have 4 channels, we get the following
order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)
and (3, 3). Therefore, XXs[..., 0] corresponds to channels (0, 0) and XXs[..., 1]
corresponds to channels (0, 1).
Arguments:
----------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
"""
XXs = Covariance._cov(Xs=Xs, average=self.average)
return XXs
@staticmethod
def _cov(Xs, average=True):
""" Computes the covariance matrices (XXs) of the signals. The result will
have the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics + n_pairs).
Arguments:
----------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
average : boolean
Informs the function if it should return an average
(computed on the time dimension) of the covariance
matrices. Default value is True.
"""
# Get useful dimensions
n_mics = Xs.shape[4]
# Formatting the real and imaginary parts
Xs_re = Xs[..., 0, :].unsqueeze(4)
Xs_im = Xs[..., 1, :].unsqueeze(4)
# Computing the covariance
Rxx_re = torch.matmul(Xs_re, Xs_re.transpose(3, 4)) + torch.matmul(
Xs_im, Xs_im.transpose(3, 4)
)
Rxx_im = torch.matmul(Xs_re, Xs_im.transpose(3, 4)) - torch.matmul(
Xs_im, Xs_re.transpose(3, 4)
)
# Selecting the upper triangular part of the covariance matrices
idx = torch.triu_indices(n_mics, n_mics)
XXs_re = Rxx_re[..., idx[0], idx[1]]
XXs_im = Rxx_im[..., idx[0], idx[1]]
XXs = torch.stack((XXs_re, XXs_im), 3)
# Computing the average if desired
if average is True:
n_time_frames = XXs.shape[1]
XXs = torch.mean(XXs, 1, keepdim=True)
XXs = XXs.repeat(1, n_time_frames, 1, 1, 1)
return XXs
class DelaySum(torch.nn.Module):
"""Performs delay and sum beamforming by using the TDOAs and
the first channel as a reference.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech. unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> delaysum = DelaySum()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> Ys = delaysum(Xs, tdoas)
>>> ys = istft(Ys)
"""
def __init__(self):
super().__init__()
def forward(
self,
Xs,
localization_tensor,
doa_mode=False,
mics=None,
fs=None,
c=343.0,
):
"""This method computes a steering vector by using the TDOAs/DOAs and
then calls the utility function _delaysum to perform beamforming.
The result has the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
localization_tensor : tensor
A tensor containing either time differences of arrival (TDOAs)
(in samples) for each timestamp or directions of arrival (DOAs)
(xyz coordinates in meters). If localization_tensor represents
TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).
If localization_tensor represents DOAs, then its format is
(batch, time_steps, 3)
doa_mode : bool
The user needs to set this parameter to True if localization_tensor
represents DOAs instead of TDOAs. Its default value is set to False.
mics : tensor
The cartesian position (xyz coordinates in meters) of each microphone.
The tensor must have the following format (n_mics, 3). This
parameter is only mandatory when localization_tensor represents
DOAs.
fs : int
The sample rate in Hertz of the signals. This parameter is only
mandatory when localization_tensor represents DOAs.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s. This
parameter is only used when localization_tensor represents DOAs.
"""
# Get useful dimensions
n_fft = Xs.shape[2]
localization_tensor = localization_tensor.to(Xs.device)
# Convert the tdoas to taus
if doa_mode:
taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)
else:
taus = tdoas2taus(tdoas=localization_tensor)
# Generate the steering vector
As = steering(taus=taus, n_fft=n_fft)
# Apply delay and sum
Ys = DelaySum._delaysum(Xs=Xs, As=As)
return Ys
@staticmethod
def _delaysum(Xs, As):
"""Perform delay and sum beamforming. The result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
As : tensor
The steering vector to point in the direction of
the target source. The tensor must have the format
(batch, time_step, n_fft/2 + 1, 2, n_mics)
"""
# Get useful dimensions
n_mics = Xs.shape[4]
# Generate unmixing coefficients
Ws_re = As[..., 0, :] / n_mics
Ws_im = -1 * As[..., 1, :] / n_mics
# Get input signal
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
# Applying delay and sum
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
# Assembling the result
Ys = torch.stack((Ys_re, Ys_im), 3)
return Ys
class Mvdr(torch.nn.Module):
"""Perform minimum variance distortionless response (MVDR) beamforming
by using an input signal in the frequency domain, its covariance matrices
and tdoas (to compute a steering vector).
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> mvdr = Mvdr()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Xs = stft(xs)
>>> Ns = stft(xs_noise)
>>> XXs = cov(Xs)
>>> NNs = cov(Ns)
>>> tdoas = gccphat(XXs)
>>> Ys = mvdr(Xs, NNs, tdoas)
>>> ys = istft(Ys)
"""
def __init__(self, eps=1e-20):
super().__init__()
self.eps = eps
def forward(
self,
Xs,
NNs,
localization_tensor,
doa_mode=False,
mics=None,
fs=None,
c=343.0,
):
"""This method computes a steering vector before using the
utility function _mvdr to perform beamforming. The result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics)
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs)
localization_tensor : tensor
A tensor containing either time differences of arrival (TDOAs)
(in samples) for each timestamp or directions of arrival (DOAs)
(xyz coordinates in meters). If localization_tensor represents
TDOAs, then its format is (batch, time_steps, n_mics + n_pairs).
If localization_tensor represents DOAs, then its format is
(batch, time_steps, 3)
doa_mode : bool
The user needs to set this parameter to True if localization_tensor
represents DOAs instead of TDOAs. Its default value is set to False.
mics : tensor
The cartesian position (xyz coordinates in meters) of each microphone.
The tensor must have the following format (n_mics, 3). This
parameter is only mandatory when localization_tensor represents
DOAs.
fs : int
The sample rate in Hertz of the signals. This parameter is only
mandatory when localization_tensor represents DOAs.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s. This
parameter is only used when localization_tensor represents DOAs.
"""
# Get useful dimensions
n_fft = Xs.shape[2]
localization_tensor = localization_tensor.to(Xs.device)
NNs = NNs.to(Xs.device)
if mics is not None:
mics = mics.to(Xs.device)
# Convert the tdoas to taus
if doa_mode:
taus = doas2taus(doas=localization_tensor, mics=mics, fs=fs, c=c)
else:
taus = tdoas2taus(tdoas=localization_tensor)
# Generate the steering vector
As = steering(taus=taus, n_fft=n_fft)
# Perform mvdr
Ys = Mvdr._mvdr(Xs=Xs, NNs=NNs, As=As)
return Ys
@staticmethod
def _mvdr(Xs, NNs, As, eps=1e-20):
"""Perform minimum variance distortionless response beamforming.
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector to point in the direction of
the target source. The tensor must have the format
(batch, time_step, n_fft/2 + 1, 2, n_mics).
"""
# Get unique covariance values to reduce the number of computations
NNs_val, NNs_idx = torch.unique(NNs, return_inverse=True, dim=1)
# Inverse covariance matrices
NNs_inv = eig.inv(NNs_val)
# Capture real and imaginary parts, and restore time steps
NNs_inv_re = NNs_inv[..., 0][:, NNs_idx]
NNs_inv_im = NNs_inv[..., 1][:, NNs_idx]
# Decompose steering vector
AsC_re = As[..., 0, :].unsqueeze(4)
AsC_im = 1.0 * As[..., 1, :].unsqueeze(4)
AsT_re = AsC_re.transpose(3, 4)
AsT_im = -1.0 * AsC_im.transpose(3, 4)
# Project
NNs_inv_AsC_re = torch.matmul(NNs_inv_re, AsC_re) - torch.matmul(
NNs_inv_im, AsC_im
)
NNs_inv_AsC_im = torch.matmul(NNs_inv_re, AsC_im) + torch.matmul(
NNs_inv_im, AsC_re
)
# Compute the gain
alpha = 1.0 / (
torch.matmul(AsT_re, NNs_inv_AsC_re)
- torch.matmul(AsT_im, NNs_inv_AsC_im)
)
# Get the unmixing coefficients
Ws_re = torch.matmul(NNs_inv_AsC_re, alpha).squeeze(4)
Ws_im = -torch.matmul(NNs_inv_AsC_im, alpha).squeeze(4)
# Applying MVDR
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
Ys = torch.stack((Ys_re, Ys_im), -2)
return Ys
class Gev(torch.nn.Module):
"""Generalized EigenValue decomposition (GEV) Beamforming.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> import torch
>>>
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import Gev
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_0.70225_-0.70225_0.11704.flac')
>>> xs_noise = xs_noise.unsqueeze(0)
>>> fs = 16000
>>> ss = xs_speech
>>> nn = 0.05 * xs_noise
>>> xs = ss + nn
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gev = Gev()
>>> istft = ISTFT(sample_rate=fs)
>>>
>>> Ss = stft(ss)
>>> Nn = stft(nn)
>>> Xs = stft(xs)
>>>
>>> SSs = cov(Ss)
>>> NNs = cov(Nn)
>>>
>>> Ys = gev(Xs, SSs, NNs)
>>> ys = istft(Ys)
"""
def __init__(self):
super().__init__()
def forward(self, Xs, SSs, NNs):
""" This method uses the utility function _gev to perform generalized
eigenvalue decomposition beamforming. Therefore, the result has
the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
SSs : tensor
The covariance matrices of the target signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
Ys = Gev._gev(Xs=Xs, SSs=SSs, NNs=NNs)
return Ys
@staticmethod
def _gev(Xs, SSs, NNs):
""" Perform generalized eigenvalue decomposition beamforming. The result
has the following format: (batch, time_step, n_fft, 2, 1).
Arguments
---------
Xs : tensor
A batch of audio signals in the frequency domain.
The tensor must have the following format:
(batch, time_step, n_fft/2 + 1, 2, n_mics).
SSs : tensor
The covariance matrices of the target signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
NNs : tensor
The covariance matrices of the noise signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Putting on the right device
SSs = SSs.to(Xs.device)
NNs = NNs.to(Xs.device)
# Get useful dimensions
n_mics = Xs.shape[4]
n_mics_pairs = SSs.shape[4]
# Computing the eigenvectors
SSs_NNs = torch.cat((SSs, NNs), dim=4)
SSs_NNs_val, SSs_NNs_idx = torch.unique(
SSs_NNs, return_inverse=True, dim=1
)
SSs = SSs_NNs_val[..., range(0, n_mics_pairs)]
NNs = SSs_NNs_val[..., range(n_mics_pairs, 2 * n_mics_pairs)]
NNs = eig.pos_def(NNs)
Vs, Ds = eig.gevd(SSs, NNs)
# Beamforming
F_re = Vs[..., (n_mics - 1), 0]
F_im = Vs[..., (n_mics - 1), 1]
# Normalize
F_norm = 1.0 / (
torch.sum(F_re ** 2 + F_im ** 2, dim=3, keepdim=True) ** 0.5
).repeat(1, 1, 1, n_mics)
F_re *= F_norm
F_im *= F_norm
Ws_re = F_re[:, SSs_NNs_idx]
Ws_im = F_im[:, SSs_NNs_idx]
Xs_re = Xs[..., 0, :]
Xs_im = Xs[..., 1, :]
Ys_re = torch.sum((Ws_re * Xs_re - Ws_im * Xs_im), dim=3, keepdim=True)
Ys_im = torch.sum((Ws_re * Xs_im + Ws_im * Xs_re), dim=3, keepdim=True)
# Assembling the output
Ys = torch.stack((Ys_re, Ys_im), 3)
return Ys
class GccPhat(torch.nn.Module):
"""Generalized Cross-Correlation with Phase Transform localization.
Arguments
---------
tdoa_max : int
Specifies a range to search for delays. For example, if
tdoa_max = 10, the method will restrict its search for delays
between -10 and 10 samples. This parameter is optional and its
default value is None. When tdoa_max is None, the method will
search for delays between -n_fft/2 and n_fft/2 (full range).
eps : float
A small value to avoid divisions by 0 with the phase transformation.
The default value is 1e-20.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT, ISTFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, DelaySum
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channel]
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs_noise = xs_noise.unsqueeze(0) #[batch, time, channels]
>>> fs = 16000
>>> xs = xs_speech + 0.05 * xs_noise
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
"""
def __init__(self, tdoa_max=None, eps=1e-20):
super().__init__()
self.tdoa_max = tdoa_max
self.eps = eps
def forward(self, XXs):
""" Perform generalized cross-correlation with phase transform localization
by using the utility function _gcc_phat and by extracting the delays (in samples)
before performing a quadratic interpolation to improve the accuracy.
The result has the format: (batch, time_steps, n_mics + n_pairs).
The order on the last dimension corresponds to the triu_indices for a
square matrix. For instance, if we have 4 channels, we get the following
order: (0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 3), (2, 2), (2, 3)
and (3, 3). Therefore, delays[..., 0] corresponds to channels (0, 0) and delays[..., 1]
corresponds to channels (0, 1).
Arguments:
----------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
xxs = GccPhat._gcc_phat(XXs=XXs, eps=self.eps)
delays = GccPhat._extract_delays(xxs=xxs, tdoa_max=self.tdoa_max)
tdoas = GccPhat._interpolate(xxs=xxs, delays=delays)
return tdoas
@staticmethod
def _gcc_phat(XXs, eps=1e-20):
""" Evaluate GCC-PHAT for each timestamp. It returns the result in the time
domain. The result has the format: (batch, time_steps, n_fft, n_mics + n_pairs).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
eps : float
A small value to avoid divisions by 0 with the phase transform. The
default value is 1e-20.
"""
# Get useful dimensions
n_samples = (XXs.shape[2] - 1) * 2
# Extracting the tensors needed
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=4)
XXs_re = XXs_val[..., 0, :]
XXs_im = XXs_val[..., 1, :]
# Applying the phase transform
XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps
XXs_re_phat = XXs_re / XXs_abs
XXs_im_phat = XXs_im / XXs_abs
XXs_phat = torch.stack((XXs_re_phat, XXs_im_phat), 4)
# Returning in the temporal domain
XXs_phat = XXs_phat.transpose(2, 3)
if version.parse(torch.__version__) >= version.parse("1.8.0"):
XXs_phat = torch.complex(XXs_phat[..., 0], XXs_phat[..., 1])
xxs = torch.fft.irfft(XXs_phat, n=n_samples)
else:
xxs = torch.irfft(XXs_phat, signal_ndim=1, signal_sizes=[n_samples])
xxs = xxs[..., XXs_idx, :]
# Formatting the output
xxs = xxs.transpose(2, 3)
return xxs
@staticmethod
def _extract_delays(xxs, tdoa_max=None):
""" Extract the rounded delays from the cross-correlation for each timestamp.
The result has the format: (batch, time_steps, n_mics + n_pairs).
Arguments
---------
xxs : tensor
The correlation signals obtained after a gcc-phat operation. The tensor
must have the format (batch, time_steps, n_fft, n_mics + n_pairs).
tdoa_max : int
Specifies a range to search for delays. For example, if
tdoa_max = 10, the method will restrict its search for delays
between -10 and 10 samples. This parameter is optional and its
default value is None. When tdoa_max is None, the method will
search for delays between -n_fft/2 and +n_fft/2 (full range).
"""
# Get useful dimensions
n_fft = xxs.shape[2]
# If no tdoa specified, cover the whole frame
if tdoa_max is None:
tdoa_max = torch.div(n_fft, 2, rounding_mode="floor")
# Splitting the GCC-PHAT values to search in the range
slice_1 = xxs[..., 0:tdoa_max, :]
slice_2 = xxs[..., -tdoa_max:, :]
xxs_sliced = torch.cat((slice_1, slice_2), 2)
# Extracting the delays in the range
_, delays = torch.max(xxs_sliced, 2)
# Adjusting the delays that were affected by the slicing
offset = n_fft - xxs_sliced.shape[2]
idx = delays >= slice_1.shape[2]
delays[idx] += offset
# Centering the delays around 0
delays[idx] -= n_fft
return delays
@staticmethod
def _interpolate(xxs, delays):
"""Perform quadratic interpolation on the cross-correlation to
improve the tdoa accuracy. The result has the format:
(batch, time_steps, n_mics + n_pairs)
Arguments
---------
xxs : tensor
The correlation signals obtained after a gcc-phat operation. The tensor
must have the format (batch, time_steps, n_fft, n_mics + n_pairs).
delays : tensor
The rounded tdoas obtained by selecting the sample with the highest
amplitude. The tensor must have the format
(batch, time_steps, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = xxs.shape[2]
# Get the max amplitude and its neighbours
tp = torch.fmod((delays - 1) + n_fft, n_fft).unsqueeze(2)
y1 = torch.gather(xxs, 2, tp).squeeze(2)
tp = torch.fmod(delays + n_fft, n_fft).unsqueeze(2)
y2 = torch.gather(xxs, 2, tp).squeeze(2)
tp = torch.fmod((delays + 1) + n_fft, n_fft).unsqueeze(2)
y3 = torch.gather(xxs, 2, tp).squeeze(2)
# Add a fractional part to the initially rounded delay
delays_frac = delays + (y1 - y3) / (2 * y1 - 4 * y2 + 2 * y3)
return delays_frac
class SrpPhat(torch.nn.Module):
"""Steered-Response Power with Phase Transform Localization.
Arguments
---------
mics : tensor
The cartesian coordinates (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
space : string
If this parameter is set to 'sphere', the localization will
be done in 3D by searching in a sphere of possible doas. If
it set to 'circle', the search will be done in 2D by searching
in a circle. By default, this parameter is set to 'sphere'.
Note: The 'circle' option isn't implemented yet.
sample_rate : int
The sample rate in Hertz of the signals to perform SRP-PHAT on.
By default, this parameter is set to 16000 Hz.
speed_sound : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
eps : float
A small value to avoid errors like division by 0. The default value
of this parameter is 1e-20.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import SrpPhat
>>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> fs = 16000
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = xs_noise.unsqueeze(0)
>>> ss1 = xs_speech
>>> ns1 = 0.05 * xs_noise
>>> xs1 = ss1 + ns1
>>> ss2 = xs_speech
>>> ns2 = 0.20 * xs_noise
>>> xs2 = ss2 + ns2
>>> ss = torch.cat((ss1,ss2), dim=0)
>>> ns = torch.cat((ns1,ns2), dim=0)
>>> xs = torch.cat((xs1,xs2), dim=0)
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> srpphat = SrpPhat(mics=mics)
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> doas = srpphat(XXs)
"""
def __init__(
self,
mics,
space="sphere",
sample_rate=16000,
speed_sound=343.0,
eps=1e-20,
):
super().__init__()
# Generate the doas
if space == "sphere":
self.doas = sphere()
if space == "circle":
pass
# Generate associated taus with the doas
self.taus = doas2taus(
self.doas, mics=mics, fs=sample_rate, c=speed_sound
)
# Save epsilon
self.eps = eps
def forward(self, XXs):
""" Perform SRP-PHAT localization on a signal by computing a steering
vector and then by using the utility function _srp_phat to extract the doas.
The result is a tensor containing the directions of arrival (xyz coordinates
(in meters) in the direction of the sound source). The output tensor
has the format (batch, time_steps, 3).
This localization method uses Global Coherence Field (GCF):
https://www.researchgate.net/publication/221491705_Speaker_localization_based_on_oriented_global_coherence_field
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = XXs.shape[2]
# Generate the steering vector
As = steering(self.taus.to(XXs.device), n_fft)
# Perform srp-phat
doas = SrpPhat._srp_phat(XXs=XXs, As=As, doas=self.doas, eps=self.eps)
return doas
@staticmethod
def _srp_phat(XXs, As, doas, eps=1e-20):
"""Perform srp-phat to find the direction of arrival
of the sound source. The result is a tensor containing the directions
of arrival (xyz coordinates (in meters) in the direction of the sound source).
The output tensor has the format: (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector that cover the all the potential directions
of arrival. The tensor must have the format
(n_doas, n_fft/2 + 1, 2, n_mics).
doas : tensor
All the possible directions of arrival that will be scanned. The
tensor must have the format (n_doas, 3).
"""
# Putting on the right device
As = As.to(XXs.device)
doas = doas.to(XXs.device)
# Get useful dimensions
n_mics = As.shape[3]
# Get the indices for the pairs of microphones
idx = torch.triu_indices(n_mics, n_mics)
# Generate the demixing vector from the steering vector
As_1_re = As[:, :, 0, idx[0, :]]
As_1_im = As[:, :, 1, idx[0, :]]
As_2_re = As[:, :, 0, idx[1, :]]
As_2_im = As[:, :, 1, idx[1, :]]
Ws_re = As_1_re * As_2_re + As_1_im * As_2_im
Ws_im = As_1_re * As_2_im - As_1_im * As_2_re
Ws_re = Ws_re.reshape(Ws_re.shape[0], -1)
Ws_im = Ws_im.reshape(Ws_im.shape[0], -1)
# Get unique covariance values to reduce the number of computations
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)
# Perform the phase transform
XXs_re = XXs_val[:, :, :, 0, :]
XXs_im = XXs_val[:, :, :, 1, :]
XXs_re = XXs_re.reshape((XXs_re.shape[0], XXs_re.shape[1], -1))
XXs_im = XXs_im.reshape((XXs_im.shape[0], XXs_im.shape[1], -1))
XXs_abs = torch.sqrt(XXs_re ** 2 + XXs_im ** 2) + eps
XXs_re_norm = XXs_re / XXs_abs
XXs_im_norm = XXs_im / XXs_abs
# Project on the demixing vectors, and keep only real part
Ys_A = torch.matmul(XXs_re_norm, Ws_re.transpose(0, 1))
Ys_B = torch.matmul(XXs_im_norm, Ws_im.transpose(0, 1))
Ys = Ys_A - Ys_B
# Get maximum points
_, doas_idx = torch.max(Ys, dim=2)
# Repeat for each frame
doas = (doas[doas_idx, :])[:, XXs_idx, :]
return doas
class Music(torch.nn.Module):
"""Multiple Signal Classification (MUSIC) localization.
Arguments
---------
mics : tensor
The cartesian coordinates (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
space : string
If this parameter is set to 'sphere', the localization will
be done in 3D by searching in a sphere of possible doas. If
it set to 'circle', the search will be done in 2D by searching
in a circle. By default, this parameter is set to 'sphere'.
Note: The 'circle' option isn't implemented yet.
sample_rate : int
The sample rate in Hertz of the signals to perform SRP-PHAT on.
By default, this parameter is set to 16000 Hz.
speed_sound : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
eps : float
A small value to avoid errors like division by 0. The default value
of this parameter is 1e-20.
n_sig : int
An estimation of the number of sound sources. The default value is set
to one source.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import SrpPhat
>>> xs_speech = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> fs = 16000
>>> xs_speech = xs_speech.unsqueeze(0) # [batch, time, channels]
>>> xs_noise = xs_noise.unsqueeze(0)
>>> ss1 = xs_speech
>>> ns1 = 0.05 * xs_noise
>>> xs1 = ss1 + ns1
>>> ss2 = xs_speech
>>> ns2 = 0.20 * xs_noise
>>> xs2 = ss2 + ns2
>>> ss = torch.cat((ss1,ss2), dim=0)
>>> ns = torch.cat((ns1,ns2), dim=0)
>>> xs = torch.cat((xs1,xs2), dim=0)
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> music = Music(mics=mics)
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> doas = music(XXs)
"""
def __init__(
self,
mics,
space="sphere",
sample_rate=16000,
speed_sound=343.0,
eps=1e-20,
n_sig=1,
):
super().__init__()
# Generate the doas
if space == "sphere":
self.doas = sphere()
if space == "circle":
pass
# Generate associated taus with the doas
self.taus = doas2taus(
self.doas, mics=mics, fs=sample_rate, c=speed_sound
)
# Save epsilon
self.eps = eps
# Save number of signals
self.n_sig = n_sig
def forward(self, XXs):
"""Perform MUSIC localization on a signal by computing a steering
vector and then by using the utility function _music to extract the doas.
The result is a tensor containing the directions of arrival (xyz coordinates
(in meters) in the direction of the sound source). The output tensor
has the format (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
"""
# Get useful dimensions
n_fft = XXs.shape[2]
# Generate the steering vector
As = steering(self.taus.to(XXs.device), n_fft)
# Perform music
doas = Music._music(
XXs=XXs, As=As, doas=self.doas, n_sig=self.n_sig, eps=self.eps
)
return doas
@staticmethod
def _music(XXs, As, doas, n_sig, eps=1e-20):
"""Perform multiple signal classification to find the
direction of arrival of the sound source. The result
has the format: (batch, time_steps, 3).
Arguments
---------
XXs : tensor
The covariance matrices of the input signal. The tensor must
have the format (batch, time_steps, n_fft/2 + 1, 2, n_mics + n_pairs).
As : tensor
The steering vector that covers the all the potential directions
of arrival. The tensor must have the format.
(n_doas, n_fft/2 + 1, 2, n_mics).
doas : tensor
All the possible directions of arrival that will be scanned. The
tensor must have the format (n_doas, 3).
n_sig : int
The number of signals in the signal + noise subspace (default is 1).
"""
# Putting on the right device
As = As.to(XXs.device)
doas = doas.to(XXs.device)
# Collecting data
n_mics = As.shape[3]
n_doas = As.shape[0]
n_bins = As.shape[2]
svd_range = n_mics - n_sig
# Get unique values to reduce computations
XXs_val, XXs_idx = torch.unique(XXs, return_inverse=True, dim=1)
# Singular value decomposition
Us, _ = eig.svdl(XXs_val)
# Format for the projection
Us = Us.unsqueeze(2).repeat(1, 1, n_doas, 1, 1, 1, 1)
Us_re = Us[..., range(0, svd_range), 0]
Us_im = Us[..., range(0, svd_range), 1]
# Fixing the format of the steering vector
As = (
As.unsqueeze(0)
.unsqueeze(0)
.unsqueeze(6)
.permute(0, 1, 2, 3, 6, 5, 4)
)
As = As.repeat(Us.shape[0], Us.shape[1], 1, 1, 1, 1, 1)
As_re = As[..., 0]
As_im = As[..., 1]
# Applying MUSIC's formula
As_mm_Us_re = torch.matmul(As_re, Us_re) + torch.matmul(As_im, Us_im)
As_mm_Us_im = torch.matmul(As_re, Us_im) - torch.matmul(As_im, Us_re)
As_mm_Us_abs = torch.sqrt(As_mm_Us_re ** 2 + As_mm_Us_im ** 2)
As_mm_Us_sum = torch.sum(As_mm_Us_abs, dim=5)
As_As_abs = torch.sum(As_re ** 2, dim=5) + torch.sum(As_im ** 2, dim=5)
Ps = (As_As_abs / (As_mm_Us_sum + eps)).squeeze(4)
Ys = torch.sum(Ps, dim=3) / n_bins
# Get maximum points
_, doas_idx = torch.max(Ys, dim=2)
doas = (doas[doas_idx, :])[:, XXs_idx, :]
return doas
def doas2taus(doas, mics, fs, c=343.0):
"""This function converts directions of arrival (xyz coordinates
expressed in meters) in time differences of arrival (expressed in
samples). The result has the following format: (batch, time_steps, n_mics).
Arguments
---------
doas : tensor
The directions of arrival expressed with cartesian coordinates (xyz)
in meters. The tensor must have the following format: (batch, time_steps, 3).
mics : tensor
The cartesian position (xyz) in meters of each microphone.
The tensor must have the following format (n_mics, 3).
fs : int
The sample rate in Hertz of the signals.
c : float
The speed of sound in the medium. The speed is expressed in meters
per second and the default value of this parameter is 343 m/s.
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.multi_mic import sphere, doas2taus
>>> xs = read_audio('tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac')
>>> xs = xs.unsqueeze(0) # [batch, time, channels]
>>> fs = 16000
>>> mics = torch.zeros((4,3), dtype=torch.float)
>>> mics[0,:] = torch.FloatTensor([-0.05, -0.05, +0.00])
>>> mics[1,:] = torch.FloatTensor([-0.05, +0.05, +0.00])
>>> mics[2,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> mics[3,:] = torch.FloatTensor([+0.05, +0.05, +0.00])
>>> doas = sphere()
>>> taus = doas2taus(doas, mics, fs)
"""
taus = (fs / c) * torch.matmul(doas.to(mics.device), mics.transpose(0, 1))
return taus
def tdoas2taus(tdoas):
""" This function selects the tdoas of each channel and put them
in a tensor. The result has the following format:
(batch, time_steps, n_mics).
Arguments:
----------
tdoas : tensor
The time difference of arrival (TDOA) (in samples) for
each timestamp. The tensor has the format
(batch, time_steps, n_mics + n_pairs).
Example
-------
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0)
>>> fs = 16000
>>>
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>>
>>> Xs = stft(xs)
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> taus = tdoas2taus(tdoas)
"""
n_pairs = tdoas.shape[len(tdoas.shape) - 1]
n_channels = int(((1 + 8 * n_pairs) ** 0.5 - 1) / 2)
taus = tdoas[..., range(0, n_channels)]
return taus
def steering(taus, n_fft):
""" This function computes a steering vector by using the time differences
of arrival for each channel (in samples) and the number of bins (n_fft).
The result has the following format: (batch, time_step, n_fft/2 + 1, 2, n_mics).
Arguments:
----------
taus : tensor
The time differences of arrival for each channel. The tensor must have
the following format: (batch, time_steps, n_mics).
n_fft : int
The number of bins resulting of the STFT. It is assumed that the
argument "onesided" was set to True for the STFT.
Example:
--------f
>>> import torch
>>> from speechbrain.dataio.dataio import read_audio
>>> from speechbrain.processing.features import STFT
>>> from speechbrain.processing.multi_mic import Covariance
>>> from speechbrain.processing.multi_mic import GccPhat, tdoas2taus, steering
>>>
>>> xs_speech = read_audio(
... 'tests/samples/multi-mic/speech_-0.82918_0.55279_-0.082918.flac'
... )
>>> xs_noise = read_audio('tests/samples/multi-mic/noise_diffuse.flac')
>>> xs = xs_speech + 0.05 * xs_noise
>>> xs = xs.unsqueeze(0) # [batch, time, channels]
>>> fs = 16000
>>> stft = STFT(sample_rate=fs)
>>> cov = Covariance()
>>> gccphat = GccPhat()
>>>
>>> Xs = stft(xs)
>>> n_fft = Xs.shape[2]
>>> XXs = cov(Xs)
>>> tdoas = gccphat(XXs)
>>> taus = tdoas2taus(tdoas)
>>> As = steering(taus, n_fft)
"""
# Collecting useful numbers
pi = 3.141592653589793
frame_size = int((n_fft - 1) * 2)
# Computing the different parts of the steering vector
omegas = 2 * pi * torch.arange(0, n_fft, device=taus.device) / frame_size
omegas = omegas.repeat(taus.shape + (1,))
taus = taus.unsqueeze(len(taus.shape)).repeat(
(1,) * len(taus.shape) + (n_fft,)
)
# Assembling the steering vector
a_re = torch.cos(-omegas * taus)
a_im = torch.sin(-omegas * taus)
a = torch.stack((a_re, a_im), len(a_re.shape))
a = a.transpose(len(a.shape) - 3, len(a.shape) - 1).transpose(
len(a.shape) - 3, len(a.shape) - 2
)
return a
def sphere(levels_count=4):
""" This function generates cartesian coordinates (xyz) for a set
of points forming a 3D sphere. The coordinates are expressed in
meters and can be used as doas. The result has the format:
(n_points, 3).
Arguments
---------
levels_count : int
A number proportional to the number of points that the user
wants to generate.
- If levels_count = 1, then the sphere will have 42 points
- If levels_count = 2, then the sphere will have 162 points
- If levels_count = 3, then the sphere will have 642 points
- If levels_count = 4, then the sphere will have 2562 points
- If levels_count = 5, then the sphere will have 10242 points
- ...
By default, levels_count is set to 4.
Example
-------
>>> import torch
>>> from speechbrain.processing.multi_mic import sphere
>>> doas = sphere()
"""
# Generate points at level 0
h = (5.0 ** 0.5) / 5.0
r = (2.0 / 5.0) * (5.0 ** 0.5)
pi = 3.141592654
pts = torch.zeros((12, 3), dtype=torch.float)
pts[0, :] = torch.FloatTensor([0, 0, 1])
pts[11, :] = torch.FloatTensor([0, 0, -1])
pts[range(1, 6), 0] = r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)
pts[range(1, 6), 1] = r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)
pts[range(1, 6), 2] = h
pts[range(6, 11), 0] = (
-1.0 * r * torch.sin(2.0 * pi * torch.arange(0, 5) / 5.0)
)
pts[range(6, 11), 1] = (
-1.0 * r * torch.cos(2.0 * pi * torch.arange(0, 5) / 5.0)
)
pts[range(6, 11), 2] = -1.0 * h
# Generate triangles at level 0
trs = torch.zeros((20, 3), dtype=torch.long)
trs[0, :] = torch.LongTensor([0, 2, 1])
trs[1, :] = torch.LongTensor([0, 3, 2])
trs[2, :] = torch.LongTensor([0, 4, 3])
trs[3, :] = torch.LongTensor([0, 5, 4])
trs[4, :] = torch.LongTensor([0, 1, 5])
trs[5, :] = torch.LongTensor([9, 1, 2])
trs[6, :] = torch.LongTensor([10, 2, 3])
trs[7, :] = torch.LongTensor([6, 3, 4])
trs[8, :] = torch.LongTensor([7, 4, 5])
trs[9, :] = torch.LongTensor([8, 5, 1])
trs[10, :] = torch.LongTensor([4, 7, 6])
trs[11, :] = torch.LongTensor([5, 8, 7])
trs[12, :] = torch.LongTensor([1, 9, 8])
trs[13, :] = torch.LongTensor([2, 10, 9])
trs[14, :] = torch.LongTensor([3, 6, 10])
trs[15, :] = torch.LongTensor([11, 6, 7])
trs[16, :] = torch.LongTensor([11, 7, 8])
trs[17, :] = torch.LongTensor([11, 8, 9])
trs[18, :] = torch.LongTensor([11, 9, 10])
trs[19, :] = torch.LongTensor([11, 10, 6])
# Generate next levels
for levels_index in range(0, levels_count):
# 0
# / \
# A---B
# / \ / \
# 1---C---2
trs_count = trs.shape[0]
subtrs_count = trs_count * 4
subtrs = torch.zeros((subtrs_count, 6), dtype=torch.long)
subtrs[0 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 0]
subtrs[0 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]
subtrs[0 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[0 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]
subtrs[1 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[1 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 1]
subtrs[1 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 0]
subtrs[2 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[2 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[2 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 0] = trs[:, 0]
subtrs[3 * trs_count + torch.arange(0, trs_count), 1] = trs[:, 1]
subtrs[3 * trs_count + torch.arange(0, trs_count), 2] = trs[:, 1]
subtrs[3 * trs_count + torch.arange(0, trs_count), 3] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 4] = trs[:, 2]
subtrs[3 * trs_count + torch.arange(0, trs_count), 5] = trs[:, 0]
subtrs_flatten = torch.cat(
(subtrs[:, [0, 1]], subtrs[:, [2, 3]], subtrs[:, [4, 5]]), axis=0
)
subtrs_sorted, _ = torch.sort(subtrs_flatten, axis=1)
index_max = torch.max(subtrs_sorted)
subtrs_scalar = (
subtrs_sorted[:, 0] * (index_max + 1) + subtrs_sorted[:, 1]
)
unique_scalar, unique_indices = torch.unique(
subtrs_scalar, return_inverse=True
)
unique_values = torch.zeros(
(unique_scalar.shape[0], 2), dtype=unique_scalar.dtype
)
unique_values[:, 0] = torch.div(
unique_scalar, index_max + 1, rounding_mode="floor"
)
unique_values[:, 1] = unique_scalar - unique_values[:, 0] * (
index_max + 1
)
trs = torch.transpose(torch.reshape(unique_indices, (3, -1)), 0, 1)
pts = pts[unique_values[:, 0], :] + pts[unique_values[:, 1], :]
pts /= torch.repeat_interleave(
torch.unsqueeze(torch.sum(pts ** 2, axis=1) ** 0.5, 1), 3, 1
)
return pts
| [
"torch.cat",
"torch.stack",
"torch.triu_indices",
"torch.fft.irfft",
"torch.LongTensor",
"torch.sum",
"torch.reshape",
"torch.sqrt",
"torch.gather",
"torch.FloatTensor",
"torch.irfft",
"torch.div",
"torch.zeros",
"torch.cos",
"torch.max",
"torch.fmod",
"torch.complex",
"torch.matmul",
"torch.sort",
"torch.unique",
"torch.sin",
"torch.arange",
"torch.mean"
] | 1.9.0 | Chaanks/speechbrain | 6447bde54f6e3fb07fdb934ab535f17cadfbad53 |
0.4 | import random
import torch
class ImagePool():
"""This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class
Parameters:
pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
"""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""Return an image from the pool.
Parameters:
images: the latest generated images from the generator
Returns images from the buffer.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
"""
if self.pool_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = torch.cat(return_images, 0) # collect all the images and return
return return_images
| [
"torch.cat",
"torch.unsqueeze"
] | 0.4.1 | szWingLee/pytorch-CycleGAN-and-pix2pix | 6127c7ad361ff5545beb14a8b814cb81cf369f35 |
1.1 | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
UNITER for ITM model
"""
from collections import defaultdict
import torch
from torch import nn
from torch.nn import functional as F
#from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from torch.nn import LayerNorm
from .layer import GELU
from .model import UniterPreTrainedModel, UniterModel
import numpy as np
class UniterForCaptioningMetric(UniterPreTrainedModel):
""" Finetune UNITER for Caption Evaluation
"""
def __init__(self, config, img_dim, margin=0.2):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.rank_output = nn.Linear(config.hidden_size, 1)
self.margin = margin
self.apply(self.init_weights)
def init_output(self):
""" need to be called after from pretrained only for the training step"""
self.rank_output.weight.data = self.itm_output.weight.data[1:, :]
self.rank_output.bias.data = self.itm_output.bias.data[1:]
def forward(self, batch, compute_loss=True, compute_step_loss=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.rank_output(pooled_output)
if compute_loss:
# triplet loss
rank_scores_sigmoid = torch.sigmoid(rank_scores)
sample_size = batch['sample_size']
scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)
pos = scores[:, :1]
neg = scores[:, 1:]
rank_loss = torch.clamp(self.margin + neg - pos, 0)
#print("## Rank Score Sigmoid Size: ", rank_scores_sigmoid.size())
#print("## Scores size: ", scores.size())
return rank_loss, rank_scores
else:
return rank_scores
class UniterForCaptionEvaluationLinearBCE(UniterPreTrainedModel):
""" Finetune UNITER for Caption Evaluation
"""
def __init__(self, config, img_dim, margin=0.2):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
ce_scores = self.itm_output(pooled_output)
if compute_loss:
targets = batch['targets']
ce_loss = F.binary_cross_entropy_with_logits(
ce_scores, targets, reduction='none')
return ce_loss
else:
return ce_scores
class UniterForCaptionEvaluationLinearRank(UniterPreTrainedModel):
""" Finetune UNITER for Caption Evaluation
"""
def __init__(self, config, img_dim, margin=0.2):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
self.itm_output = nn.Linear(config.hidden_size, 2)
self.rank_output = nn.Linear(config.hidden_size, 1)
self.margin = margin
self.apply(self.init_weights)
def forward(self, batch, compute_loss=True, is_val=False):
batch = defaultdict(lambda: None, batch)
input_ids = batch['input_ids']
position_ids = batch['position_ids']
img_feat = batch['img_feat']
img_pos_feat = batch['img_pos_feat']
attention_mask = batch['attn_masks']
gather_index = batch['gather_index']
sequence_output = self.uniter(input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index,
output_all_encoded_layers=False)
pooled_output = self.uniter.pooler(sequence_output)
rank_scores = self.rank_output(pooled_output)
if compute_loss:
if(is_val):
rank_scores_sigmoid = torch.sigmoid(rank_scores)
else:
# triplet loss
rank_scores_sigmoid = torch.sigmoid(rank_scores)
sample_size = batch['sample_size']
scores = rank_scores_sigmoid.contiguous().view(-1, sample_size)
pos = scores[:, :1]
neg = scores[:, 1:]
rank_loss = torch.clamp(self.margin + neg - pos, 0)
return rank_loss, rank_scores
else:
return rank_scores
| [
"torch.nn.Linear",
"torch.sigmoid",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.clamp"
] | 1.1.0 | david-yoon/UMIC | 0dfab17d826e65ae3cb112e3da300772b168776f |
1.2 | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import re
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
import time
from tensorboardX import SummaryWriter
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--meta_dict', type=str, default='', help='name of dictionary')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg2')
parser.add_argument('--split', default='', type=str)
parser.add_argument('--add_random_fraction', default=0.0, type=float, help='add N*arg random edges to train, all of a new edge type')
parser.add_argument('--seed', default=1234, type=int)
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
parser.add_argument('--test_random_sample', action='store_true' )
parser.add_argument('--dump_train', action='store_true' )
parser.add_argument('--extra_test_statistics', action='store_true' )
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
# args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
args.gamma = argparse_dict['gamma']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({
**save_variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(args.save_path, 'checkpoint')
)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
print( 'Starting logging to ', log_file )
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def append_rng( args, nentity, set_relation, train_triples ):
import networkx as nx
edge_probability = args.add_random_fraction*len(train_triples['head'])/nentity/nentity
logging.info('add_random_fraction (* # train edges): %f' % args.add_random_fraction)
logging.info('edge_probability: %.8f' % edge_probability)
logging.info('seed: %d' % args.seed)
g = nx.fast_gnp_random_graph(nentity, edge_probability, seed=args.seed, directed=True)
edges = np.array(g.edges)
return { 'head': np.concatenate([train_triples['head'],edges[:,0]]),
'tail': np.concatenate([train_triples['tail'],edges[:,1]]),
'relation': np.concatenate([train_triples['relation'],np.full(edges.shape[0],set_relation)]) }
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be chosen')
if args.init_checkpoint:
override_config(args)
run_label = ''
if args.save_path and args.save_path[-1]=='-':
( run_label, args.save_path ) = ( args.save_path, None )
if args.save_path == None:
args.save_path = 'log/%s/%s/%s%s-%s/%s'%(args.dataset, args.model, run_label, args.hidden_dim, args.gamma, time.time())
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
if args.meta_dict=='':
meta = 'dataset_' + re.sub('-','_',args.dataset) + '/meta_dict.pt'
if os.path.exists(meta):
args.meta_dict = meta
if args.meta_dict!='':
meta_dict = torch.load(args.meta_dict)
print( meta_dict )
dataset = LinkPropPredDataset(name = args.dataset, meta_dict=meta_dict)
else:
meta_dict = None
dataset = LinkPropPredDataset(name = args.dataset)
if args.split!='':
split_dict = dataset.get_edge_split(split_type=args.split)
else:
split_dict = dataset.get_edge_split()
nentity = int(dataset.graph['num_nodes'])
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset, meta_info=meta_dict)
args.nentity = nentity
args.nrelation = nrelation
if args.add_random_fraction>0:
nrelation += 1
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
if args.split!='':
logging.info('Split: %s' % args.split)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
if args.add_random_fraction>0:
train_triples = append_rng( args, nentity, nrelation-1, train_triples )
if args.dump_train:
for i in range(train_triples['head'].shape[0]):
print( train_triples['head'][i],train_triples['relation'][i],train_triples['tail'][i], sep=',' )
exit(0)
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding,
evaluator=evaluator
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
TrainDataset(train_triples, nentity, nrelation,
args.negative_sample_size, 'head-batch',
train_count, train_true_head, train_true_tail),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(train_triples, nentity, nrelation,
args.negative_sample_size, 'tail-batch',
train_count, train_true_head, train_true_tail),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
# Set training configuration
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = args.max_steps // 2
if args.init_checkpoint:
# Restore model from checkpoint directory
logging.info('Loading checkpoint %s...' % args.init_checkpoint)
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info('Ramdomly Initializing %s Model...' % args.model)
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info('init_step = %d' % init_step)
logging.info('batch_size = %d' % args.batch_size)
logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
if args.negative_adversarial_sampling:
logging.info('adversarial_temperature = %f' % args.adversarial_temperature)
# Set valid dataloader as it would be evaluated during training
if args.do_train:
logging.info('learning_rate = %f' % current_learning_rate)
training_logs = []
#Training Loop
for step in range(init_step, args.max_steps):
log = kge_model.train_step(kge_model, optimizer, train_iterator, args)
training_logs.append(log)
if step >= warm_up_steps:
current_learning_rate = current_learning_rate / 10
logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
warm_up_steps = warm_up_steps * 3
if step % args.save_checkpoint_steps == 0 and step > 0: # ~ 41 seconds/saving
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if step % args.log_steps == 0:
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)
log_metrics('Train', step, metrics, writer)
training_logs = []
if args.do_valid and step % args.valid_steps == 0 and step > 0:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, random_sampling=args.test_random_sample)
log_metrics('Valid', step, metrics, writer)
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, args, random_sampling=args.test_random_sample)
log_metrics('Valid', step, metrics, writer)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, test_triples, args, random_sampling=args.test_random_sample)
log_metrics('Test', step, metrics, writer)
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
small_train_triples = {}
indices = np.random.choice(len(train_triples['head']), args.ntriples_eval_train, replace=False)
for i in train_triples:
small_train_triples[i] = train_triples[i][indices]
metrics = kge_model.test_step(kge_model, small_train_triples, args, random_sampling=True)
log_metrics('Train', step, metrics, writer)
if __name__ == '__main__':
main(parse_args())
| [
"torch.load"
] | 1.2.0 | BU-Lisp/ogb | 882786c0b71f5c836275c03b8554ad919bfe34e4 |
1.2 | import torch
from torch_geometric.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms
from gnn import GNN
from tqdm import tqdm
import argparse
import time
import numpy as np
import pandas as pd
import os
### importing OGB
from ogb.graphproppred import PygGraphPropPredDataset, Evaluator
### importing utils
from utils import ASTNodeEncoder, get_vocab_mapping
### for data transform
from utils import augment_edge, encode_y_to_arr, decode_arr_to_seq
multicls_criterion = torch.nn.CrossEntropyLoss()
def train(model, device, loader, optimizer):
model.train()
loss_accum = 0
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1 or batch.batch[-1] == 0:
pass
else:
pred_list = model(batch)
optimizer.zero_grad()
loss = 0
for i in range(len(pred_list)):
loss += multicls_criterion(pred_list[i].to(torch.float32), batch.y_arr[:,i])
loss = loss / len(pred_list)
loss.backward()
optimizer.step()
loss_accum += loss.item()
print('Average training loss: {}'.format(loss_accum / (step + 1)))
def eval(model, device, loader, evaluator, arr_to_seq):
model.eval()
seq_ref_list = []
seq_pred_list = []
for step, batch in enumerate(tqdm(loader, desc="Iteration")):
batch = batch.to(device)
if batch.x.shape[0] == 1:
pass
else:
with torch.no_grad():
pred_list = model(batch)
mat = []
for i in range(len(pred_list)):
mat.append(torch.argmax(pred_list[i], dim = 1).view(-1,1))
mat = torch.cat(mat, dim = 1)
seq_pred = [arr_to_seq(arr) for arr in mat]
# PyG = 1.4.3
# seq_ref = [batch.y[i][0] for i in range(len(batch.y))]
# PyG >= 1.5.0
seq_ref = [batch.y[i] for i in range(len(batch.y))]
seq_ref_list.extend(seq_ref)
seq_pred_list.extend(seq_pred)
input_dict = {"seq_ref": seq_ref_list, "seq_pred": seq_pred_list}
return evaluator.eval(input_dict)
def main():
# Training settings
parser = argparse.ArgumentParser(description='GNN baselines on ogbg-code data with Pytorch Geometrics')
parser.add_argument('--device', type=int, default=0,
help='which gpu to use if any (default: 0)')
parser.add_argument('--gnn', type=str, default='gcn-virtual',
help='GNN gin, gin-virtual, or gcn, or gcn-virtual (default: gcn-virtual)')
parser.add_argument('--drop_ratio', type=float, default=0,
help='dropout ratio (default: 0)')
parser.add_argument('--max_seq_len', type=int, default=5,
help='maximum sequence length to predict (default: 5)')
parser.add_argument('--num_vocab', type=int, default=5000,
help='the number of vocabulary used for sequence prediction (default: 5000)')
parser.add_argument('--num_layer', type=int, default=5,
help='number of GNN message passing layers (default: 5)')
parser.add_argument('--emb_dim', type=int, default=300,
help='dimensionality of hidden units in GNNs (default: 300)')
parser.add_argument('--batch_size', type=int, default=128,
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=30,
help='number of epochs to train (default: 30)')
parser.add_argument('--num_workers', type=int, default=0,
help='number of workers (default: 0)')
parser.add_argument('--dataset', type=str, default="ogbg-code",
help='dataset name (default: ogbg-code)')
parser.add_argument('--filename', type=str, default="",
help='filename to output result (default: )')
args = parser.parse_args()
device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
### automatic dataloading and splitting
dataset = PygGraphPropPredDataset(name = args.dataset)
seq_len_list = np.array([len(seq) for seq in dataset.data.y])
print('Target seqence less or equal to {} is {}%.'.format(args.max_seq_len, np.sum(seq_len_list <= args.max_seq_len) / len(seq_len_list)))
split_idx = dataset.get_idx_split()
# print(split_idx['train'])
# print(split_idx['valid'])
# print(split_idx['test'])
# train_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['train']]
# valid_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['valid']]
# test_method_name = [' '.join(dataset.data.y[i]) for i in split_idx['test']]
# print('#train')
# print(len(train_method_name))
# print('#valid')
# print(len(valid_method_name))
# print('#test')
# print(len(test_method_name))
# train_method_name_set = set(train_method_name)
# valid_method_name_set = set(valid_method_name)
# test_method_name_set = set(test_method_name)
# # unique method name
# print('#unique train')
# print(len(train_method_name_set))
# print('#unique valid')
# print(len(valid_method_name_set))
# print('#unique test')
# print(len(test_method_name_set))
# # unique valid/test method name
# print('#valid unseen during training')
# print(len(valid_method_name_set - train_method_name_set))
# print('#test unseen during training')
# print(len(test_method_name_set - train_method_name_set))
### building vocabulary for sequence predition. Only use training data.
vocab2idx, idx2vocab = get_vocab_mapping([dataset.data.y[i] for i in split_idx['train']], args.num_vocab)
# test encoder and decoder
# for data in dataset:
# # PyG >= 1.5.0
# print(data.y)
#
# # PyG 1.4.3
# # print(data.y[0])
# data = encode_y_to_arr(data, vocab2idx, args.max_seq_len)
# print(data.y_arr[0])
# decoded_seq = decode_arr_to_seq(data.y_arr[0], idx2vocab)
# print(decoded_seq)
# print('')
## test augment_edge
# data = dataset[2]
# print(data)
# data_augmented = augment_edge(data)
# print(data_augmented)
### set the transform function
# augment_edge: add next-token edge as well as inverse edges. add edge attributes.
# encode_y_to_arr: add y_arr to PyG data object, indicating the array representation of a sequence.
dataset.transform = transforms.Compose([augment_edge, lambda data: encode_y_to_arr(data, vocab2idx, args.max_seq_len)])
### automatic evaluator. takes dataset name as input
evaluator = Evaluator(args.dataset)
train_loader = DataLoader(dataset[split_idx["train"]], batch_size=args.batch_size, shuffle=True, num_workers = args.num_workers)
valid_loader = DataLoader(dataset[split_idx["valid"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
test_loader = DataLoader(dataset[split_idx["test"]], batch_size=args.batch_size, shuffle=False, num_workers = args.num_workers)
nodetypes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'typeidx2type.csv.gz'))
nodeattributes_mapping = pd.read_csv(os.path.join(dataset.root, 'mapping', 'attridx2attr.csv.gz'))
### Encoding node features into emb_dim vectors.
### The following three node features are used.
# 1. node type
# 2. node attribute
# 3. node depth
node_encoder = ASTNodeEncoder(args.emb_dim, num_nodetypes = len(nodetypes_mapping['type']), num_nodeattributes = len(nodeattributes_mapping['attr']), max_depth = 20)
if args.gnn == 'gin':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gin-virtual':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gin', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
elif args.gnn == 'gcn':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = False).to(device)
elif args.gnn == 'gcn-virtual':
model = GNN(num_vocab = len(vocab2idx), max_seq_len = args.max_seq_len, node_encoder = node_encoder, num_layer = args.num_layer, gnn_type = 'gcn', emb_dim = args.emb_dim, drop_ratio = args.drop_ratio, virtual_node = True).to(device)
else:
raise ValueError('Invalid GNN type')
optimizer = optim.Adam(model.parameters(), lr=0.001)
valid_curve = []
test_curve = []
train_curve = []
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print('Training...')
train(model, device, train_loader, optimizer)
print('Evaluating...')
train_perf = eval(model, device, train_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
valid_perf = eval(model, device, valid_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
test_perf = eval(model, device, test_loader, evaluator, arr_to_seq = lambda arr: decode_arr_to_seq(arr, idx2vocab))
print({'Train': train_perf, 'Validation': valid_perf, 'Test': test_perf})
train_curve.append(train_perf[dataset.eval_metric])
valid_curve.append(valid_perf[dataset.eval_metric])
test_curve.append(test_perf[dataset.eval_metric])
print('F1')
best_val_epoch = np.argmax(np.array(valid_curve))
best_train = max(train_curve)
print('Finished training!')
print('Best validation score: {}'.format(valid_curve[best_val_epoch]))
print('Test score: {}'.format(test_curve[best_val_epoch]))
if not args.filename == '':
result_dict = {'Val': valid_curve[best_val_epoch], 'Test': test_curve[best_val_epoch], 'Train': train_curve[best_val_epoch], 'BestTrain': best_train}
torch.save(result_dict, args.filename)
if __name__ == "__main__":
main() | [
"torch.device",
"torch.cat",
"torch.argmax",
"torch.save",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.CrossEntropyLoss"
] | 1.2.0 | BU-Lisp/ogb | 1d6dde8080261931bc6ce2491e9149298af1ea98 |
1.7 | # -*- coding:utf-8 -*-
"""
Utilities to handel graph data
"""
import os
import dgl
import pickle
import numpy as np
import torch as th
from ogb.nodeproppred import DglNodePropPredDataset
def load_dgl_graph(base_path):
"""
读取预处理的Graph,Feature和Label文件,并构建相应的数据供训练代码使用。
:param base_path:
:return:
"""
graphs, _ = dgl.load_graphs(os.path.join(base_path, 'graph.bin'))
graph = graphs[0]
print('################ Graph info: ###############', flush=True)
print(graph)
with open(os.path.join(base_path, 'labels.pkl'), 'rb') as f:
label_data = pickle.load(f)
labels = th.from_numpy(label_data['label'])
tr_label_idx = label_data['tr_label_idx']
val_label_idx = label_data['val_label_idx']
test_label_idx = label_data['test_label_idx']
print('################ Label info: ################', flush=True)
print('Total labels (including not labeled): {}'.format(labels.shape[0]), flush=True)
print(' Training label number: {}'.format(tr_label_idx.shape[0]), flush=True)
print(' Validation label number: {}'.format(val_label_idx.shape[0]), flush=True)
print(' Test label number: {}'.format(test_label_idx.shape[0]), flush=True)
# get node features
features = np.load(os.path.join(base_path, 'features.npy'))
node_feat = th.from_numpy(features).float()
print('################ Feature info: ###############', flush=True)
print('Node\'s feature shape:{}'.format(node_feat.shape), flush=True)
return graph, labels, tr_label_idx, val_label_idx, test_label_idx, node_feat
def load_dgl_ogb_graph(base_path):
"""
读取预处理的Graph,Feature和Label文件,并构建相应的数据供训练代码使用。
:param base_path:
:return:
"""
# graphs, _ = dgl.load_graphs(os.path.join(base_path, 'graph.bin'))
# graph = graphs[0]
# print('################ Graph info: ###############', flush=True)
# print(graph)
# with open(os.path.join(base_path, 'labels.pkl'), 'rb') as f:
# label_data = pickle.load(f)
# labels = th.from_numpy(label_data['label'])
# tr_label_idx = label_data['tr_label_idx']
# val_label_idx = label_data['val_label_idx']
# test_label_idx = label_data['test_label_idx']
# print('################ Label info: ################', flush=True)
# print('Total labels (including not labeled): {}'.format(labels.shape[0]), flush=True)
# print(' Training label number: {}'.format(tr_label_idx.shape[0]), flush=True)
# print(' Validation label number: {}'.format(val_label_idx.shape[0]), flush=True)
# print(' Test label number: {}'.format(test_label_idx.shape[0]), flush=True)
# # get node features
# features = np.load(os.path.join(base_path, 'features.npy'))
# node_feat = th.from_numpy(features).float()
# print('################ Feature info: ###############', flush=True)
# print('Node\'s feature shape:{}'.format(node_feat.shape), flush=True)
# return graph, labels, tr_label_idx, val_label_idx, test_label_idx, node_feat
dgldataset = DglNodePropPredDataset('ogbn-papers100M', root='../dataset/ogbn_papers100M') # 有待修改
graph, labels = dgldataset[0]
# srcs, dsts = graph.all_edges()
# graph.add_edges(dsts, srcs)
labels = labels.view(-1).type(th.long)
splitted_idx = dgldataset.get_idx_split()
train_idx, val_idx, test_idx = splitted_idx["train"], splitted_idx["valid"], splitted_idx["test"]
node_feat = graph.ndata['feat']
return graph, labels, train_idx, val_idx, test_idx, node_feat
def time_diff(t_end, t_start):
"""
计算时间差。t_end, t_start are datetime format, so use deltatime
Parameters
----------
t_end
t_start
Returns
-------
"""
diff_sec = (t_end - t_start).seconds
diff_min, rest_sec = divmod(diff_sec, 60)
diff_hrs, rest_min = divmod(diff_min, 60)
return (diff_hrs, rest_min, rest_sec)
| [
"torch.from_numpy"
] | 1.7.1 | ytchx1999/MAXP_DGL_Graph | 01ea0dc3e6f957b8c7a9b6958df02559f1866b32 |
1.1 | from typing import List, Callable, Tuple, Dict
import warnings
import torch
import ipdb
from allennlp.common.checks import ConfigurationError
StateType = Dict[str, torch.Tensor] # pylint: disable=invalid-name
StepFunctionType = Callable[[torch.Tensor, StateType], Tuple[torch.Tensor, StateType]] # pylint: disable=invalid-name
class CoverageBeamSearch:
"""
Implements the beam search algorithm for decoding the most likely sequences.
Parameters
----------
end_index : ``int``
The index of the "stop" or "end" token in the target vocabulary.
max_steps : ``int``, optional (default = 50)
The maximum number of decoding steps to take, i.e. the maximum length
of the predicted sequences.
beam_size : ``int``, optional (default = 10)
The width of the beam used.
per_node_beam_size : ``int``, optional (default = beam_size)
The maximum number of candidates to consider per node, at each step in the search.
If not given, this just defaults to ``beam_size``. Setting this parameter
to a number smaller than ``beam_size`` may give better results, as it can introduce
more diversity into the search. See `Beam Search Strategies for Neural Machine Translation.
Freitag and Al-Onaizan, 2017 <http://arxiv.org/abs/1702.01806>`_.
"""
def __init__(self,
end_index: int,
max_steps: int = 50,
beam_size: int = 10,
per_node_beam_size: int = None) -> None:
self._end_index = end_index
self.max_steps = max_steps
self.beam_size = beam_size
self.per_node_beam_size = per_node_beam_size or beam_size
# self.per_node_beam_size = 1
def search(self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
"""
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# Same as predictions, but contains the log probabilities
word_log_probabilities: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size.")
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_log_probabilities, start_predicted_classes = \
start_class_log_probabilities.topk(self.beam_size)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn("Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
word_log_probabilities.append(start_top_log_probabilities)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes),
float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.
# Set the same state for each element in the beam.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
unsqueeze(1).\
expand(batch_size, self.beam_size, *last_dims).\
reshape(batch_size * self.beam_size, *last_dims)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size,
num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes = \
cleaned_log_probabilities.topk(self.per_node_beam_size)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = last_log_probabilities.\
unsqueeze(2).\
expand(batch_size, self.beam_size, self.per_node_beam_size).\
reshape(batch_size * self.beam_size, self.per_node_beam_size)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_log_probs, restricted_beam_indices = reshaped_summed.topk(self.beam_size)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_top_log_probabilities = top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size)
restricted_top_log_probs = reshaped_top_log_probabilities.gather(1, restricted_beam_indices)
predictions.append(restricted_predicted_classes)
word_log_probabilities.append(restricted_top_log_probs)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices / self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.\
view(batch_size, self.beam_size, *([1] * len(last_dims))).\
expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
reshape(batch_size, self.beam_size, *last_dims).\
gather(1, expanded_backpointer).\
reshape(batch_size * self.beam_size, *last_dims)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning)
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
reconstructed_word_log_probabilities = [word_log_probabilities[-1].unsqueeze(2)]
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
cur_word_logs = word_log_probabilities[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
reconstructed_word_log_probabilities.append(cur_word_logs)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
final_word_log_probabilities = word_log_probabilities[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
reconstructed_word_log_probabilities.append(final_word_log_probabilities)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
all_word_log_probabilities = torch.cat(list(reversed(reconstructed_word_log_probabilities)), 2)
return all_predictions, last_log_probabilities, all_word_log_probabilities
| [
"torch.isfinite",
"torch.where"
] | 1.1.0 | wangzhen263/allennlp | 309b2b572aeb0677511b4f972281ac265d7477a9 |
1.4 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Transform, constraints
from pyro.distributions.conditional import ConditionalTransformModule
from pyro.distributions.torch_transform import TransformModule
from pyro.distributions.util import copy_docs_from
from pyro.nn import DenseNN
@copy_docs_from(Transform)
class ConditionedPlanar(Transform):
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, bias=None, u=None, w=None):
super().__init__(cache_size=1)
self.bias = bias
self.u = u
self.w = w
self._cached_logDetJ = None
# This method ensures that torch(u_hat, w) > -1, required for invertibility
def u_hat(self, u, w):
alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)
a_prime = -1 + F.softplus(alpha)
return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))
def _call(self, x):
"""
:param x: the input into the bijection
:type x: torch.Tensor
Invokes the bijection x => y; in the prototypical context of a
:class:`~pyro.distributions.TransformedDistribution` `x` is a sample from
the base distribution (or the output of a previous transform)
"""
# x ~ (batch_size, dim_size, 1)
# w ~ (batch_size, 1, dim_size)
# bias ~ (batch_size, 1)
act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)
u_hat = self.u_hat(self.u, self.w)
y = x + u_hat * act
psi_z = (1. - act.pow(2)) * self.w
self._cached_logDetJ = torch.log(
torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))
return y
def _inverse(self, y):
"""
:param y: the output of the bijection
:type y: torch.Tensor
Inverts y => x. As noted above, this implementation is incapable of
inverting arbitrary values `y`; rather it assumes `y` is the result of a
previously computed application of the bijector to some `x` (which was
cached on the forward call)
"""
raise KeyError("ConditionedPlanar object expected to find key in intermediates cache but didn't")
def log_abs_det_jacobian(self, x, y):
"""
Calculates the elementwise determinant of the log Jacobian
"""
x_old, y_old = self._cached_x_y
if x is not x_old or y is not y_old:
# This call to the parent class Transform will update the cache
# as well as calling self._call and recalculating y and log_detJ
self(x)
return self._cached_logDetJ
@copy_docs_from(ConditionedPlanar)
class Planar(ConditionedPlanar, TransformModule):
"""
A 'planar' bijective transform with equation,
:math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs,
and the learnable parameters are :math:`b\\in\\mathbb{R}`,
:math:`\\mathbf{u}\\in\\mathbb{R}^D`, :math:`\\mathbf{w}\\in\\mathbb{R}^D` for
input dimension :math:`D`. For this to be an invertible transformation, the
condition :math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.TransformedDistribution` this provides
a way to create richer variational approximations.
Example usage:
>>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
>>> transform = Planar(10)
>>> pyro.module("my_transform", transform) # doctest: +SKIP
>>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
>>> flow_dist.sample() # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param input_dim: the dimension of the input (and output) variable.
:type input_dim: int
References:
[1] Danilo Jimenez Rezende, Shakir Mohamed. Variational Inference with
Normalizing Flows. [arXiv:1505.05770]
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, input_dim):
super().__init__()
self.bias = nn.Parameter(torch.Tensor(1,))
self.u = nn.Parameter(torch.Tensor(input_dim,))
self.w = nn.Parameter(torch.Tensor(input_dim,))
self.input_dim = input_dim
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.u.size(0))
self.w.data.uniform_(-stdv, stdv)
self.u.data.uniform_(-stdv, stdv)
self.bias.data.zero_()
@copy_docs_from(ConditionalTransformModule)
class ConditionalPlanar(ConditionalTransformModule):
"""
A conditional 'planar' bijective transform using the equation,
:math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
where :math:`\\mathbf{x}` are the inputs with dimension :math:`D`,
:math:`\\mathbf{y}` are the outputs, and the pseudo-parameters
:math:`b\\in\\mathbb{R}`, :math:`\\mathbf{u}\\in\\mathbb{R}^D`, and
:math:`\\mathbf{w}\\in\\mathbb{R}^D` are the output of a function, e.g. a NN,
with input :math:`z\\in\\mathbb{R}^{M}` representing the context variable to
condition on. For this to be an invertible transformation, the condition
:math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
Together with :class:`~pyro.distributions.ConditionalTransformedDistribution`
this provides a way to create richer variational approximations.
Example usage:
>>> from pyro.nn.dense_nn import DenseNN
>>> input_dim = 10
>>> context_dim = 5
>>> batch_size = 3
>>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
>>> param_dims = [1, input_dim, input_dim]
>>> hypernet = DenseNN(context_dim, [50, 50], param_dims)
>>> transform = ConditionalPlanar(hypernet)
>>> z = torch.rand(batch_size, context_dim)
>>> flow_dist = dist.ConditionalTransformedDistribution(base_dist,
... [transform]).condition(z)
>>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
The inverse of this transform does not possess an analytical solution and is
left unimplemented. However, the inverse is cached when the forward operation is
called during sampling, and so samples drawn using the planar transform can be
scored.
:param nn: a function inputting the context variable and outputting a triplet of
real-valued parameters of dimensions :math:`(1, D, D)`.
:type nn: callable
References:
[1] Variational Inference with Normalizing Flows [arXiv:1505.05770]
Danilo Jimenez Rezende, Shakir Mohamed
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, nn):
super().__init__()
self.nn = nn
def condition(self, context):
bias, u, w = self.nn(context)
return ConditionedPlanar(bias, u, w)
def planar(input_dim):
"""
A helper function to create a :class:`~pyro.distributions.transforms.Planar`
object for consistency with other helpers.
:param input_dim: Dimension of input variable
:type input_dim: int
"""
return Planar(input_dim)
def conditional_planar(input_dim, context_dim, hidden_dims=None):
"""
A helper function to create a
:class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care
of constructing a dense network with the correct input/output dimensions.
:param input_dim: Dimension of input variable
:type input_dim: int
:param context_dim: Dimension of context variable
:type context_dim: int
:param hidden_dims: The desired hidden dimensions of the dense network. Defaults
to using [input_dim * 10, input_dim * 10]
:type hidden_dims: list[int]
"""
if hidden_dims is None:
hidden_dims = [input_dim * 10, input_dim * 10]
nn = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
return ConditionalPlanar(nn)
| [
"torch.Tensor",
"torch.nn.functional.softplus"
] | 1.4.0 | akern40/pyro | 8633b7136946ab2ae2e16062503fe51c2aac8c38 |
1.0 |
import argparse
import time
import logging
from datetime import datetime
try:
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
from torch.nn.parallel import DistributedDataParallel as DDP
has_apex = False
from timm.data import Dataset, create_loader, resolve_data_config, FastCollateMixup, mixup_target
from timm.models import create_model, resume_checkpoint
from timm.utils import *
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.optim import create_optimizer
from timm.scheduler import create_scheduler
import torch
import torch.nn as nn
import torchvision.utils
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--model', default='resnet101', type=str, metavar='MODEL',
help='Name of model to train (default: "countception"')
parser.add_argument('--pretrained', action='store_true', default=False,
help='Start with pretrained version of specified network (if avail)')
parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH',
help='Initialize model from this checkpoint (default: none)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='Resume full model and optimizer state from checkpoint (default: none)')
parser.add_argument('--num-classes', type=int, default=1000, metavar='N',
help='number of label classes (default: 1000)')
parser.add_argument('--gp', default='avg', type=str, metavar='POOL',
help='Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")')
parser.add_argument('--img-size', type=int, default=None, metavar='N',
help='Image patch size (default: None => model default)')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--drop', type=float, default=0.0, metavar='DROP',
help='Dropout rate (default: 0.)')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR',
help='warmup learning rate (default: 0.0001)')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 2)')
parser.add_argument('--start-epoch', default=None, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--decay-epochs', type=int, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--reprob', type=float, default=0., metavar='PCT',
help='Random erase prob (default: 0.)')
parser.add_argument('--remode', type=str, default='const',
help='Random erase mode (default: "const")')
parser.add_argument('--mixup', type=float, default=0.0,
help='mixup alpha, mixup enabled if > 0. (default: 0.)')
parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N',
help='turn off mixup after this epoch, disabled if 0 (default: 0)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='label smoothing (default: 0.1)')
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument('--bn-tf', action='store_true', default=False,
help='Use Tensorflow BatchNorm defaults for models that support it (default: False)')
parser.add_argument('--bn-momentum', type=float, default=None,
help='BatchNorm momentum override (if not None)')
parser.add_argument('--bn-eps', type=float, default=None,
help='BatchNorm epsilon override (if not None)')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# Misc
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=50, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--recovery-interval', type=int, default=0, metavar='N',
help='how many batches to wait before writing recovery checkpoint')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--num-gpu', type=int, default=1,
help='Number of GPUS to use')
parser.add_argument('--save-images', action='store_true', default=False,
help='save images of input bathes every log interval for debugging')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA amp for mixed precision training')
parser.add_argument('--sync-bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--no-prefetcher', action='store_true', default=False,
help='disable fast prefetcher')
parser.add_argument('--output', default='', type=str, metavar='PATH',
help='path to output folder (default: none, current dir)')
parser.add_argument('--eval-metric', default='prec1', type=str, metavar='EVAL_METRIC',
help='Best metric (default: "prec1"')
parser.add_argument('--tta', type=int, default=0, metavar='N',
help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)')
parser.add_argument("--local_rank", default=0, type=int)
def main():
setup_default_logging()
args = parser.parse_args()
args.prefetcher = not args.no_prefetcher
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed and args.num_gpu > 1:
logging.warning('Using more than one GPU per process in distributed mode is not allowed. Setting num_gpu to 1.')
args.num_gpu = 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.num_gpu = 1
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
assert args.rank >= 0
if args.distributed:
logging.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (args.rank, args.world_size))
else:
logging.info('Training with a single process on %d GPUs.' % args.num_gpu)
torch.manual_seed(args.seed + args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
checkpoint_path=args.initial_checkpoint)
if args.local_rank == 0:
logging.info('Model %s created, param count: %d' %
(args.model, sum([m.numel() for m in model.parameters()])))
data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0)
# optionally resume from a checkpoint
optimizer_state = None
resume_epoch = None
if args.resume:
optimizer_state, resume_epoch = resume_checkpoint(model, args.resume)
if args.num_gpu > 1:
if args.amp:
logging.warning(
'AMP does not work well with nn.DataParallel, disabling. Use distributed mode for multi-GPU AMP.')
args.amp = False
model = nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
else:
model.cuda()
optimizer = create_optimizer(args, model)
if optimizer_state is not None:
optimizer.load_state_dict(optimizer_state)
use_amp = False
if has_apex and args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
use_amp = True
if args.local_rank == 0:
logging.info('NVIDIA APEX {}. AMP {}.'.format(
'installed' if has_apex else 'not installed', 'on' if use_amp else 'off'))
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=args.resume)
if args.distributed:
if args.sync_bn:
try:
if has_apex:
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
logging.info('Converted model to use Synchronized BatchNorm.')
except Exception as e:
logging.error('Failed to enable Synchronized BatchNorm. Install Apex or Torch >= 1.1')
if has_apex:
model = DDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
logging.info("Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.")
model = DDP(model, device_ids=[args.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
logging.info('Scheduled epochs: {}'.format(num_epochs))
train_dir = os.path.join(args.data, 'train')
if not os.path.exists(train_dir):
logging.error('Training folder does not exist at: {}'.format(train_dir))
exit(1)
dataset_train = Dataset(train_dir)
collate_fn = None
if args.prefetcher and args.mixup > 0:
collate_fn = FastCollateMixup(args.mixup, args.smoothing, args.num_classes)
loader_train = create_loader(
dataset_train,
input_size=data_config['input_size'],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
rand_erase_prob=args.reprob,
rand_erase_mode=args.remode,
color_jitter=args.color_jitter,
interpolation='random', # FIXME cleanly resolve this? data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
)
eval_dir = os.path.join(args.data, 'validation')
if not os.path.isdir(eval_dir):
logging.error('Validation folder does not exist at: {}'.format(eval_dir))
exit(1)
dataset_eval = Dataset(eval_dir)
loader_eval = create_loader(
dataset_eval,
input_size=data_config['input_size'],
batch_size=4 * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
)
if args.mixup > 0.:
# smoothing is handled with mixup label transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = train_loss_fn
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = ''
if args.local_rank == 0:
output_base = args.output if args.output else './output'
exp_name = '-'.join([
datetime.now().strftime("%Y%m%d-%H%M%S"),
args.model,
str(data_config['input_size'][-1])
])
output_dir = get_outdir(output_base, 'train', exp_name)
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(checkpoint_dir=output_dir, decreasing=decreasing)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed:
loader_train.sampler.set_epoch(epoch)
train_metrics = train_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args,
lr_scheduler=lr_scheduler, saver=saver, output_dir=output_dir,
use_amp=use_amp, model_ema=model_ema)
eval_metrics = validate(model, loader_eval, validate_loss_fn, args)
if model_ema is not None and not args.model_ema_force_cpu:
ema_eval_metrics = validate(
model_ema.ema, loader_eval, validate_loss_fn, args, log_suffix=' (EMA)')
eval_metrics = ema_eval_metrics
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
update_summary(
epoch, train_metrics, eval_metrics, os.path.join(output_dir, 'summary.csv'),
write_header=best_metric is None)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(
model, optimizer, args,
epoch=epoch, model_ema=model_ema, metric=save_metric)
except KeyboardInterrupt:
pass
if best_metric is not None:
logging.info('*** Best metric: {0} (epoch {1})'.format(best_metric, best_epoch))
def train_epoch(
epoch, model, loader, optimizer, loss_fn, args,
lr_scheduler=None, saver=None, output_dir='', use_amp=False, model_ema=None):
if args.prefetcher and args.mixup > 0 and loader.mixup_enabled:
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
loader.mixup_enabled = False
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.mixup > 0.:
lam = 1.
if not args.mixup_off_epoch or epoch < args.mixup_off_epoch:
lam = np.random.beta(args.mixup, args.mixup)
input.mul_(lam).add_(1 - lam, input.flip(0))
target = mixup_target(target, args.num_classes, lam, args.smoothing)
output = model(input)
loss = loss_fn(output, target)
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group['lr'] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
if args.local_rank == 0:
logging.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx),
padding=0,
normalize=True)
if saver is not None and args.recovery_interval and (
last_batch or (batch_idx + 1) % args.recovery_interval == 0):
saver.save_recovery(
model, optimizer, args, epoch, model_ema=model_ema, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
return OrderedDict([('loss', losses_m.avg)])
def validate(model, loader, loss_fn, args, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
prec1_m = AverageMeter()
prec5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
prec1, prec5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
prec1 = reduce_tensor(prec1, args.world_size)
prec5 = reduce_tensor(prec5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
prec1_m.update(prec1.item(), output.size(0))
prec5_m.update(prec5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0):
log_name = 'Test' + log_suffix
logging.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Prec@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Prec@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx,
batch_time=batch_time_m, loss=losses_m,
top1=prec1_m, top5=prec5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('prec1', prec1_m.avg), ('prec5', prec5_m.avg)])
return metrics
if __name__ == '__main__':
main()
| [
"torch.distributed.get_world_size",
"torch.cuda.synchronize",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.distributed.init_process_group",
"torch.no_grad",
"torch.nn.parallel.DistributedDataParallel",
"torch.manual_seed",
"torch.cuda.set_device",
"torch.distributed.get_rank",
"torch.nn.CrossEntropyLoss"
] | 1.0 | woffett/pytorch-image-models | d6ac5bbc481271efecee8bc8756caa864a253fdd |
1.1 | import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import copy
import math
from pcdet.ops.iou3d_nms.iou3d_nms_utils import boxes_iou3d_gpu
from pcdet.models.dense_heads.utils import _sigmoid
from ...utils import box_coder_utils, common_utils
from pcdet.utils import matcher
from pcdet.utils.set_crit import SetCriterion
from pcdet.models.dense_heads.e2e_fusion_modules import \
OneNetSeqFusionHead, OneNetSeqFusionHeadCST, OneNetSeqFusionHeadTCS, \
OneNetSeqFusionHeadDense, OneNetSeqFusionHeadTSC, OneNetSeqFusionHeadCLSCTS, \
OneNetSeqFusionHeadCLSTSC, OneNetSeqFusionHeadCLSTCS, \
OneNetSeqFusionHeadCLSSTC
from pcdet.models.dense_heads.target_assigner.merged_assigner import MergedAssigner
from pcdet.utils import loss_utils
from ...ops.iou3d_nms import iou3d_nms_cuda
SingleHeadDict = {
'OneNetSeqFusionHeadCTS': OneNetSeqFusionHead,
'OneNetSeqFusionHead': OneNetSeqFusionHead,
'OneNetSeqFusionHeadCST': OneNetSeqFusionHeadCST,
'OneNetSeqFusionHeadTCS': OneNetSeqFusionHeadTCS,
'OneNetSeqFusionHeadDense': OneNetSeqFusionHeadDense,
'OneNetSeqFusionHeadTSC': OneNetSeqFusionHeadTSC,
'OneNetSeqFusionHeadCLSCTS': OneNetSeqFusionHeadCLSCTS,
'OneNetSeqFusionHeadCLSTSC': OneNetSeqFusionHeadCLSTSC,
'OneNetSeqFusionHeadCLSTCS': OneNetSeqFusionHeadCLSTCS,
'OneNetSeqFusionHeadCLSSTC': OneNetSeqFusionHeadCLSSTC,
}
class E2ESeqFusionHead(nn.Module):
def __init__(self, model_cfg, input_channels, num_class, class_names, grid_size, voxel_size,
point_cloud_range, predict_boxes_when_training, **kwargs):
super().__init__()
self.xoffset = None
self.yoffset = None
self.no_log = False
self.forward_ret_dict = {}
self.model_cfg = model_cfg
self.voxel_size = [model_cfg.TARGET_ASSIGNER_CONFIG['out_size_factor'] * iter for iter in voxel_size]
self.period = 2 * np.pi
# if self.use_dir_classifier:
# self.period = self.period / self.num_dir_bins
self.single_head = self.model_cfg.get('SingleHead', 'OneNetSeqFusionHead')
self.post_cfg = model_cfg.TEST_CONFIG
self.in_channels = input_channels
self.predict_boxes_when_training = predict_boxes_when_training
self.grid_size = grid_size
self.point_cloud_range = point_cloud_range
self.out_size_factor = model_cfg.OUT_SIZE_FACTOR
self._generate_offset_grid()
self.num_classes = [t["num_class"] for t in model_cfg.TASKS]
self.class_names = [t["class_names"] for t in model_cfg.TASKS]
self.template_boxes = [t["template_box"] for t in model_cfg.TASKS]
self.total_classes = sum(self.num_classes)
box_coder_config = self.model_cfg.CODER_CONFIG.get('BOX_CODER_CONFIG', {})
box_coder_config['period'] = self.period
box_coder = getattr(box_coder_utils, self.model_cfg.CODER_CONFIG.BOX_CODER)(**box_coder_config)
set_crit_settings = model_cfg.SET_CRIT_CONFIG
matcher_settings = model_cfg.MATCHER_CONFIG
self.matcher_weight_dict = matcher_settings['weight_dict']
self.use_focal_loss = model_cfg.USE_FOCAL_LOSS
self.box_coder = box_coder
matcher_settings['box_coder'] = box_coder
matcher_settings['period'] = self.period
self.matcher_weight_dict = matcher_settings['weight_dict']
self.matcher = getattr(matcher, self.model_cfg.MATCHER)(**matcher_settings)
set_crit_settings['box_coder'] = box_coder
set_crit_settings['matcher'] = self.matcher
self.set_crit = SetCriterion(**set_crit_settings)
self.aux_loss_weights = self.model_cfg.AUX_LOSS_WEIGHTS
self.loss_center = loss_utils.CenterNetFocalLoss()
self.loss_corner = loss_utils.CenterNetFocalLoss()
self.loss_foreground = loss_utils.ForegroundFocalLoss()
self.target_assigner = MergedAssigner(model_cfg.TARGET_ASSIGNER_CONFIG, num_classes=sum(self.num_classes),
no_log=self.no_log, grid_size=grid_size, pc_range=point_cloud_range,
voxel_size=voxel_size)
# self.box_n_dim = 9 if self.dataset == 'nuscenes' else 7
# self.bev_only = True if model_cfg.MODE == "bev" else False
shared_ch = model_cfg.PARAMETERS.shared_ch
self.shared_conv = nn.Sequential(
nn.Conv2d(self.in_channels, shared_ch, kernel_size=3, padding=1, bias=True),
nn.BatchNorm2d(shared_ch),
nn.ReLU(inplace=True)
)
self.common_heads = model_cfg.PARAMETERS.common_heads
self.output_box_attrs = [k for k in self.common_heads]
self.tasks = nn.ModuleList()
for num_cls, template_box in zip(self.num_classes, self.template_boxes):
heads = copy.deepcopy(self.common_heads)
heads.update(
dict(
num_classes=num_cls,
template_box=template_box,
pc_range=self.point_cloud_range,
offset_grid=self.offset_grid,
voxel_size=self.voxel_size
)
)
self.tasks.append(
SingleHeadDict[self.single_head](shared_ch, heads)
)
def _nms_gpu_3d(self, boxes, scores, thresh, pre_maxsize=None, post_max_size=None):
"""
:param boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
:param scores: (N)
:param thresh:
:return:
"""
assert boxes.shape[1] == 7
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.LongTensor(boxes.size(0))
num_out = iou3d_nms_cuda.nms_gpu(boxes, keep, thresh)
selected = order[keep[:num_out].cuda()].contiguous()
if post_max_size is not None:
selected = selected[:post_max_size]
return selected
def _generate_offset_grid(self):
x, y = self.grid_size[:2] // self.out_size_factor
xmin, ymin, zmin, xmax, ymax, zmax = self.point_cloud_range
xoffset = (xmax - xmin) / x
yoffset = (ymax - ymin) / y
yv, xv = torch.meshgrid([torch.arange(0, y), torch.arange(0, x)])
yvp = (yv.float() + 0.5) * yoffset + ymin
xvp = (xv.float() + 0.5) * xoffset + xmin
yvc = yv.float() * yoffset + ymin
xvc = xv.float() * xoffset + xmin
# size (1, 2, h, w)
self.register_buffer('offset_grid', torch.stack([xvp, yvp], dim=0)[None])
self.register_buffer('xy_offset', torch.stack([xvc, yvc], dim=0)[None])
def forward(self, data_dict):
multi_head_features = []
spatial_features_2d = data_dict['spatial_features_2d']
spatial_features_2d = self.shared_conv(spatial_features_2d)
for task in self.tasks:
multi_head_features.append(task(spatial_features_2d))
self.forward_ret_dict['multi_head_features'] = multi_head_features
final_feat = torch.cat([iter['final_feat'] for iter in multi_head_features] + [spatial_features_2d, ], dim=1)
data_dict['final_feat'] = final_feat
if self.training:
self.forward_ret_dict['gt_dicts'] = self.target_assigner.assign_targets_cuda(data_dict['gt_boxes'])
if not self.training and not self.predict_boxes_when_training:
data_dict = self.generate_predicted_boxes(data_dict)
# else:
# data_dict = self.generate_predicted_boxes_for_roi_head(data_dict)
return data_dict
def get_proper_xy(self, pred_boxes):
tmp, res = pred_boxes[:, :2, :, :], pred_boxes[:, 2:, :, :]
tmp = tmp + self.offset_grid
return torch.cat([tmp, res], dim=1)
def _reshape_corner_map(self, corner_map):
bs, c, h, w = corner_map.size()
return corner_map.view(bs, c // 4, 4, h, w)
def get_loss(self, curr_epoch, **kwargs):
tb_dict = {}
pred_dicts = self.forward_ret_dict['multi_head_features']
losses = []
self.forward_ret_dict['pred_box_encoding'] = {}
for task_id, pred_dict in enumerate(pred_dicts):
task_pred_boxes = self.get_proper_xy(pred_dict['pred_boxes'])
bs, code, h, w = task_pred_boxes.size()
task_pred_boxes = task_pred_boxes.permute(0, 2, 3, 1).view(bs, h * w, code)
task_pred_logits = pred_dict['pred_logits']
_, cls, _, _ = task_pred_logits.size()
task_pred_logits = task_pred_logits.permute(0, 2, 3, 1).view(bs, h * w, cls)
task_pred_dicts = {
'pred_logits': task_pred_logits,
'pred_boxes': task_pred_boxes
}
task_gt_dicts = self.forward_ret_dict['gt_dicts'][task_id]
task_loss_dicts = self.set_crit(task_pred_dicts, task_gt_dicts, curr_epoch)
aux_loss_dict = {}
pred_dict['center_map'] = _sigmoid(pred_dict['center_map'])
pred_dict['corner_map'] = _sigmoid(self._reshape_corner_map(pred_dict['corner_map']))
pred_dict['foreground_map'] = pred_dict['foreground_map']
aux_loss_dict['loss_center'] = self.loss_center(
pred_dict['center_map'],
self.forward_ret_dict['gt_dicts']['center_map'][task_id]
)
aux_loss_dict['loss_corner'] = self.loss_corner(
pred_dict['corner_map'],
self.forward_ret_dict['gt_dicts']['corner_map'][task_id]
)
aux_loss_dict['loss_foreground'] = self.loss_foreground(
pred_dict['foreground_map'],
self.forward_ret_dict['gt_dicts']['foreground_map'][task_id]
)
tmp_loss = task_loss_dicts['loss']
for k in self.aux_loss_weights:
tmp_loss = tmp_loss + self.aux_loss_weights[k] * aux_loss_dict[k]
task_loss_dicts.update(aux_loss_dict)
task_loss_dicts['loss'] = tmp_loss
tb_key = 'task_' + str(task_id) + '/'
tb_dict.update({
tb_key + 'loss_x': task_loss_dicts['loc_loss_elem'][0].item(),
tb_key + 'loss_y': task_loss_dicts['loc_loss_elem'][1].item(),
tb_key + 'loss_z': task_loss_dicts['loc_loss_elem'][2].item(),
tb_key + 'loss_w': task_loss_dicts['loc_loss_elem'][3].item(),
tb_key + 'loss_l': task_loss_dicts['loc_loss_elem'][4].item(),
tb_key + 'loss_h': task_loss_dicts['loc_loss_elem'][5].item(),
tb_key + 'loss_sin': task_loss_dicts['loc_loss_elem'][6].item(),
tb_key + 'loss_cos': task_loss_dicts['loc_loss_elem'][7].item(),
tb_key + 'loss_ce': task_loss_dicts['loss_ce'],
tb_key + 'loss_bbox': task_loss_dicts['loss_bbox'],
tb_key + 'loss_center': task_loss_dicts['loss_center'],
tb_key + 'loss_corner': task_loss_dicts['loss_corner'],
tb_key + 'loss_foreground': task_loss_dicts['loss_foreground'],
})
losses.append(task_loss_dicts['loss'])
return sum(losses), tb_dict
@torch.no_grad()
def generate_predicted_boxes_for_roi_head(self, data_dict):
pred_dicts = self.forward_ret_dict['multi_head_features']
task_box_preds = {}
task_score_preds = {}
k_list = self.post_cfg.k_list
for task_id, pred_dict in enumerate(pred_dicts):
tmp = {}
tmp.update(pred_dict)
_pred_boxes = self.get_proper_xy(tmp['pred_boxes'])
if self.use_focal_loss:
_pred_score = tmp['pred_logits'].sigmoid()
else:
_pred_score = tmp['pred_logits'].softmax(2)
_pred_score = _pred_score.flatten(2).permute(0, 2, 1)
_pred_boxes = self.box_coder.decode_torch(_pred_boxes.flatten(2).permute(0, 2, 1))
task_box_preds[task_id] = _pred_boxes
task_score_preds[task_id] = _pred_score
batch_cls_preds = []
batch_box_preds = []
bs = len(task_box_preds[0])
for idx in range(bs):
cls_offset = 1
pred_boxes, pred_scores, pred_labels = [], [], []
for task_id, class_name in enumerate(self.class_names):
raw_scores = task_score_preds[task_id][idx]
raw_boxes = task_box_preds[task_id][idx]
cls_num = raw_scores.size(1)
tmp_scores, tmp_cat_inds = torch.topk(raw_scores, k=k_list[task_id], dim=0)
final_score_task, tmp_inds = torch.topk(tmp_scores.reshape(-1), k=k_list[task_id])
final_label = (tmp_inds % cls_num) + cls_offset
topk_boxes_cat = raw_boxes[tmp_cat_inds.reshape(-1), :]
final_box = topk_boxes_cat[tmp_inds, :]
raw_scores = raw_scores[tmp_cat_inds.reshape(-1), :]
final_score = final_score_task.new_zeros((final_box.shape[0], self.total_classes))
final_score[:, cls_offset - 1: cls_offset - 1 + cls_num] = raw_scores
pred_boxes.append(final_box)
pred_scores.append(final_score)
pred_labels.append(final_label)
cls_offset += len(class_name)
batch_box_preds.append(torch.cat(pred_boxes))
batch_cls_preds.append(torch.cat(pred_scores))
data_dict['batch_cls_preds'] = torch.stack(batch_cls_preds, dim=0)
data_dict['batch_box_preds'] = torch.stack(batch_box_preds, dim=0)
if self.training:
data_dict['gt_dicts'] = self.forward_ret_dict['gt_dicts']
return data_dict
@torch.no_grad()
def generate_predicted_boxes(self, data_dict):
cur_epoch = data_dict['cur_epoch']
pred_dicts = self.forward_ret_dict['multi_head_features']
task_box_preds = {}
task_score_preds = {}
k_list = self.post_cfg.k_list
thresh_list = self.post_cfg.thresh_list
num_queries = self.post_cfg.num_queries
# use_nms = self.post_cfg.use_nms
# vis_dir = getattr(self.post_cfg, 'bev_vis_dir', None)
for task_id, pred_dict in enumerate(pred_dicts):
tmp = {}
tmp.update(pred_dict)
_pred_boxes = self.get_proper_xy(tmp['pred_boxes'])
if self.use_focal_loss:
_pred_score = tmp['pred_logits'].sigmoid()
else:
_pred_score = tmp['pred_logits'].softmax(2)
_pred_score = _pred_score.flatten(2).permute(0, 2, 1)
_pred_boxes = self.box_coder.decode_torch(_pred_boxes.flatten(2).permute(0, 2, 1))
task_box_preds[task_id] = _pred_boxes
task_score_preds[task_id] = _pred_score
pred_dicts = []
bs = len(task_box_preds[0])
for idx in range(bs):
cls_offset = 1
final_boxes, final_scores, final_labels = [], [], []
for task_id, class_name in enumerate(self.class_names):
task_scores = task_score_preds[task_id][idx]
task_boxes = task_box_preds[task_id][idx]
cls_num = task_scores.size(1)
topk_scores_cat, topk_inds_cat = torch.topk(task_scores, k=k_list[task_id], dim=0)
topk_scores, topk_inds = torch.topk(topk_scores_cat.reshape(-1), k=k_list[task_id])
topk_labels = (topk_inds % cls_num) + cls_offset
topk_boxes_cat = task_boxes[topk_inds_cat.reshape(-1), :]
topk_boxes = topk_boxes_cat[topk_inds, :]
mask = topk_scores >= thresh_list[task_id]
task_boxes = topk_boxes[mask]
task_scores = topk_scores[mask]
task_labels = topk_labels[mask]
final_boxes.append(task_boxes)
final_scores.append(task_scores)
final_labels.append(task_labels)
cls_offset += len(class_name)
final_boxes = torch.cat(final_boxes)
final_scores = torch.cat(final_scores)
final_labels = torch.cat(final_labels)
end = min(final_scores.size(0), num_queries)
record_dict = {
"pred_boxes": final_boxes[:end],
"pred_scores": final_scores[:end],
"pred_labels": final_labels[:end]
}
pred_dicts.append(record_dict)
# import pdb; pdb.set_trace()
data_dict['pred_dicts'] = pred_dicts
data_dict['has_class_labels'] = True # Force to be true
return data_dict
| [
"torch.cat",
"torch.stack",
"torch.nn.ModuleList",
"torch.arange",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.topk"
] | 1.1 | ocNflag/point2seq | 710686f576b3df5469a06c66860758b25f852dbd |
1.10 | import torch
from torch import nn
from pytti.Image.differentiable_image import DifferentiableImage
class EMAImage(DifferentiableImage):
"""
Base class for differentiable images with Exponential Moving Average filtering
Based on code by Katherine Crowson
"""
def __init__(self, width, height, tensor, decay):
super().__init__(width, height)
self.tensor = nn.Parameter(tensor)
self.register_buffer("biased", torch.zeros_like(tensor))
self.register_buffer("average", torch.zeros_like(tensor))
self.decay = decay
self.register_buffer("accum", torch.tensor(1.0))
self.update()
@torch.no_grad()
def update(self):
if not self.training:
raise RuntimeError("update() should only be called during training")
self.accum.mul_(self.decay)
self.biased.mul_(self.decay)
self.biased.add_((1 - self.decay) * self.tensor)
self.average.copy_(self.biased)
self.average.div_(1 - self.accum)
@torch.no_grad()
def reset(self):
if not self.training:
raise RuntimeError("reset() should only be called during training")
self.biased.set_(torch.zeros_like(self.biased))
self.average.set_(torch.zeros_like(self.average))
self.accum.set_(torch.ones_like(self.accum))
self.update()
def decode_training_tensor(self):
return self.decode(self.tensor)
def decode_tensor(self):
return self.decode(self.average)
def decode(self, tensor):
raise NotImplementedError
| [
"torch.no_grad",
"torch.nn.Parameter",
"torch.tensor",
"torch.ones_like",
"torch.zeros_like"
] | 1.10.1 | wizardhead/pytti-core | 6030f6154ad7d17b93cf76e2d42905d4231a0abd |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from models.ResNet50Encoder import Bottleneck
class DeepLabClassificationHead(nn.Module):
def __init__(self, num_classes):
super().__init__()
self.aspp = ASPP(2048, 256)
self.low_level_feature_reducer = nn.Sequential(
nn.Conv2d(256, 48, 1),
nn.BatchNorm2d(48, momentum=0.0003),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.Conv2d(256 + 48, 256, 3, padding=1),
nn.BatchNorm2d(256, momentum=0.0003),
nn.ReLU(),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256, momentum=0.0003),
nn.ReLU(),
nn.Conv2d(256, num_classes, 3, padding=1),
)
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(7*7*256, num_classes),
)
def forward(self, x):
# l2_size = tuple(x["block1"].shape[-2:])
# label_size = tuple(x["img"].shape[-2:])
x_backbone = x["block4"].float()
x_aspp = self.aspp(x_backbone)
# x_aspp = nn.Upsample(l2_size, mode='bilinear', align_corners=True)(x_aspp)
x = self.classifier(x_aspp)
# x = torch.cat((self.low_level_feature_reducer(x["block1"]), x_aspp), dim=1)
# x = self.decoder(x)
# x = nn.Upsample(label_size, mode='bilinear', align_corners=True)(x)
return x
def eval(self):
# self.block4.eval()
self.aspp.eval()
self.decoder.eval()
return self
def train(self, mode=True):
# self.block4.eval()
self.aspp.train(mode)
self.decoder.train(mode)
return self
def required_encoding(self):
return ["block4"]
class ASPP(nn.Module):
def __init__(self, C, depth, conv=nn.Conv2d, norm=nn.BatchNorm2d, momentum=0.0003, mult=1):
super(ASPP, self).__init__()
self._C = C
self._depth = depth
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.relu = nn.ReLU(inplace=True)
self.aspp1 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp2 = conv(C, depth, kernel_size=3, stride=1,
dilation=int(6*mult), padding=int(6*mult),
bias=False)
self.aspp3 = conv(C, depth, kernel_size=3, stride=1,
dilation=int(12*mult), padding=int(12*mult),
bias=False)
self.aspp4 = conv(C, depth, kernel_size=3, stride=1,
dilation=int(18*mult), padding=int(18*mult),
bias=False)
self.aspp5 = conv(C, depth, kernel_size=1, stride=1, bias=False)
self.aspp1_bn = norm(depth, momentum)
self.aspp2_bn = norm(depth, momentum)
self.aspp3_bn = norm(depth, momentum)
self.aspp4_bn = norm(depth, momentum)
self.aspp5_bn = norm(depth, momentum)
self.conv2 = conv(depth * 5, depth, kernel_size=1, stride=1,
bias=False)
self.bn2 = norm(depth, momentum)
def forward(self, x):
x1 = self.aspp1(x)
x1 = self.aspp1_bn(x1)
x1 = self.relu(x1)
x2 = self.aspp2(x)
x2 = self.aspp2_bn(x2)
x2 = self.relu(x2)
x3 = self.aspp3(x)
x3 = self.aspp3_bn(x3)
x3 = self.relu(x3)
x4 = self.aspp4(x)
x4 = self.aspp4_bn(x4)
x4 = self.relu(x4)
x5 = self.global_pooling(x)
x5 = self.aspp5(x5)
x5 = self.aspp5_bn(x5)
x5 = self.relu(x5)
x5 = nn.Upsample((x.shape[2], x.shape[3]), mode='bilinear',
align_corners=True)(x5)
x = torch.cat((x1, x2, x3, x4, x5), 1)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
return x
class CascadeBlock(nn.Module):
def __init__(self, block, planes, inplanes, blocks, stride=1, dilation=1):
super(CascadeBlock, self).__init__()
self.conv = nn.Conv2d
# downsample = None
# if stride != 1 or dilation != 1 or inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# self.conv(inplanes, planes * block.expansion,
# kernel_size=1, stride=stride, dilation=max(1, dilation // 2), bias=False),
# self._make_norm(planes * block.expansion),
# )
#
# layers = []
# self.upsample_layer = block(inplanes, planes, stride, downsample, dilation=max(1, dilation // 2),
# conv=self.conv, norm=self._make_norm)
# inplanes = planes * block.expansion
# for i in range(1, blocks):
# layers.append(block(inplanes, planes, dilation=dilation, conv=self.conv, norm=self._make_norm))
# self.conv = nn.Sequential(*layers)
downsample = nn.Sequential(
self.conv(inplanes, planes*block.expansion, kernel_size=1, stride=stride,
dilation=dilation, bias=False),
self._make_norm(planes * block.expansion),
)
self.upsample_layer = block(inplanes, planes, stride, downsample, dilation=dilation,
conv=self.conv, norm=self._make_norm)
inplanes = planes * block.expansion
self.conv = nn.Sequential(
block(inplanes, planes, dilation=dilation*2, conv=self.conv, norm=self._make_norm),
block(inplanes, planes, dilation=dilation, conv=self.conv, norm=self._make_norm)
)
def forward(self, x, backbone=None):
out = self.upsample_layer(x)
if backbone is not None:
out = out + backbone
out = self.conv(out)
return out
def _make_norm(self, planes, momentum=0.05):
return nn.BatchNorm2d(planes, momentum=momentum)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.Flatten"
] | 1.7.0 | allenai/ViRB | fbe1c42571ce0994b1e41bc4bdf88cf9658ae48b |
1.4 | import geoopt
import torch
import pytest
@pytest.mark.parametrize(
"line_search_params",
[dict(), dict(c1=1e-3, c2=0.99), dict(amax=1, amin=1e-12), dict(stabilize=10)],
)
@pytest.mark.parametrize("batch_size", [None, 1, 16])
@pytest.mark.parametrize("line_search_method", ["armijo", "wolfe"])
@pytest.mark.parametrize("cg_method", ["steepest", "fr", "pr"])
def test_rwolfe_stiefel(line_search_params, batch_size, line_search_method, cg_method):
# Use line search to solve orthogonal procrustes
stiefel = geoopt.manifolds.Stiefel()
torch.manual_seed(42)
(n, m) = (10, 20)
A = torch.randn(n, m, dtype=torch.float64)
Q = stiefel.random((n, n), dtype=torch.float64)
B = Q @ A
with torch.no_grad():
if batch_size is None:
X = stiefel.random((n, n), dtype=torch.float64)
else:
X = stiefel.random((batch_size, n, n), dtype=torch.float64)
X.requires_grad = True
def closure():
optim.zero_grad()
loss = (X @ A - B).norm() ** 2
loss.backward()
return loss.item()
optim = geoopt.optim.RiemannianLineSearch(
[X],
line_search_method=line_search_method,
line_search_params=line_search_params,
cg_method=cg_method,
)
loss = None
for i in range(1000):
loss = optim.step(closure)
# Stop when no new step can be found, or goal reached
if optim.last_step_size is None or loss < 1e-4:
break
assert loss < 1e-4
| [
"torch.manual_seed",
"torch.no_grad",
"torch.randn"
] | 1.4.0 | tao-harald/geoopt | d6fea4d44f146877c5a430e9fd6ba0fb7e821b92 |
1.9 | """Copyright (c) 2018, Haavard Kvamme
2021, Schrod Stefan"""
import numpy as np
from torch import nn
class DenseVanillaBlock(nn.Module):
def __init__(self, in_features, out_features, bias=True, batch_norm=True, dropout=0., activation=nn.ReLU,
w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):
super().__init__()
self.linear = nn.Linear(in_features, out_features, bias)
if w_init_:
w_init_(self.linear.weight.data)
self.activation = activation()
self.batch_norm = nn.BatchNorm1d(out_features) if batch_norm else None
self.dropout = nn.Dropout(dropout) if dropout else None
def forward(self, input):
input = self.activation(self.linear(input))
if self.batch_norm:
input = self.batch_norm(input)
if self.dropout:
input = self.dropout(input)
return input
class MLPVanilla(nn.Module):
def __init__(self, in_features, num_nodes, out_features, batch_norm=True, dropout=None, activation=nn.ReLU,
output_activation=None, output_bias=True,
w_init_=lambda w: nn.init.kaiming_normal_(w, nonlinearity='relu')):
super().__init__()
num_nodes=np.append(in_features, num_nodes)
if not hasattr(dropout, '__iter__'):
dropout = [dropout for _ in range(len(num_nodes) - 1)]
net = []
for n_in, n_out, p in zip(num_nodes[:-1], num_nodes[1:], dropout):
net.append(DenseVanillaBlock(n_in, n_out, True, batch_norm, p, activation, w_init_))
net.append(nn.Linear(num_nodes[-1], out_features, output_bias))
if output_activation:
net.append(output_activation)
self.net = nn.Sequential(*net)
def forward(self, input):
return self.net(input) | [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.Sequential",
"torch.nn.init.kaiming_normal_",
"torch.nn.BatchNorm1d"
] | 1.9.1 | sschrod/BITES | 64c76feebd8b0869e74938f79d93b1946dcf88b5 |
1.7 | import os
import numpy as np
import pandas as pd
import torch
import torchvision
import tqdm
from PIL import Image
from matplotlib import pyplot as plt
from torch.utils.data import Dataset
from torchvision import transforms
class Flowers(Dataset):
def __init__(self, root, train=True, download=False, transform=None, rand_number=0, imb_factor=1, imb_type='exp'):
np.random.seed(rand_number)
root = os.path.join(root, 'flowers')
if train:
excel_file = os.path.join(root, 'train.txt')
else:
excel_file = os.path.join(root, 'valid.txt')
self.samples = pd.read_csv(excel_file, delimiter=' ')
self.root_dir = root
self.transform = transform
self.targets = self.samples['TARGET'].array
self.classes = np.unique(self.targets)
self.cls_num = len(self.classes)
self.samples = np.array(self.samples)
self.targets = np.array(self.targets, dtype=np.int64)
num_in_class = []
for class_idx in np.unique(self.targets):
num_in_class.append(len(np.where(self.targets == class_idx)[0]))
self.num_in_class = num_in_class
if train:
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.samples) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor ** (cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
classes = np.unique(self.targets)
# np.random.shuffle(classes)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(self.targets == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
self.num_per_cls_dict[the_class] = len(selec_idx)
new_data.append(self.samples[selec_idx])
new_targets.extend([the_class, ] * the_img_num)
new_data = np.vstack(new_data)
self.samples = new_data
self.targets = new_targets
self.labels = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
img_path = os.path.join(self.root_dir, self.samples[index, 0])
y_label = torch.tensor(self.samples[index, 1]).long()
image = Image.open(img_path)
if self.transform:
if isinstance(self.transform, list):
sample1 = self.transform[0](image)
sample2 = self.transform[1](image)
image = [sample1, sample2]
else:
image = self.transform(image)
return image, y_label
if __name__ == '__main__':
train_transform = transforms.Compose([
transforms.ToTensor(),
])
# train_dataset = Flowers(root='/data', train=True, download=False, transform=train_transform, imb_factor=1)
# train_loader = torch.utils.data.DataLoader(
# train_dataset, batch_size=1, shuffle=False,
# num_workers=0, persistent_workers=False, pin_memory=True)
# for i in range(len(train_dataset.get_cls_num_list())):
# images = torch.empty(train_dataset.get_cls_num_list()[0], 3, 224, 224)
# idx = 0
# for image, y in train_loader:
# if y == i:
# images[idx] = image
# idx += 1
#
# plt.figure()
# plt.title(f'{i}')
# plt.clf()
# plt.imshow(torchvision.utils.make_grid(images, normalize=True).permute(1, 2, 0))
# plt.savefig(f'Flowers_{i}.png')
train_dataset = Flowers('/data', train=True, download=False, transform=train_transform, imb_factor=0.1)
test_dataset = Flowers('/data', train=False, download=False, transform=train_transform)
# train_loader = torch.utils.data.DataLoader(
# train_dataset, batch_size=128, shuffle=False,
# num_workers=0, persistent_workers=False, pin_memory=True)
# for images, y in train_loader:
# print(y)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=1, shuffle=False,
num_workers=0, persistent_workers=False, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=1, shuffle=False,
num_workers=0, persistent_workers=False, pin_memory=True)
# classes_freq = np.zeros(102)
# for x, y in tqdm.tqdm(train_loader):
# classes_freq[np.array(y)] += 1
# print(classes_freq)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=1, shuffle=False,
num_workers=0, persistent_workers=False, pin_memory=True)
# classes_freq = np.zeros(102)
# for x, y in tqdm.tqdm(test_loader):
# classes_freq[np.array(y)] += 1
# print(classes_freq)
# print(train_dataset.get_cls_num_list())
mean = 0.
std = 0.
classes_freq = np.zeros(102)
for images, y in train_loader:
batch_samples = images.size(0) # batch size (the last batch can have smaller size!)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
std += images.std(2).sum(0)
classes_freq[np.array(y)] += 1
mean /= len(train_loader.dataset)
std /= len(train_loader.dataset)
print(classes_freq)
print(mean, std)
# classes_freq = np.zeros(102)
# for images, y in test_loader:
# classes_freq[np.array(y)] += 1
# print(classes_freq) | [
"torch.utils.data.DataLoader",
"torch.tensor"
] | 1.7 | caisarl76/TADE-AgnosticLT | 8a23f6609622dd30feb22101067e644666810400 |
1.9 | # Copyright 2021 Dakewe Biotech Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# ============================================================================
# File description: Realize the model training function.
# ============================================================================
from torch.utils.data import DataLoader
from config import *
from dataset import BaseDataset
def train_generator(train_dataloader, epoch) -> None:
"""Training the generator network.
Args:
train_dataloader (torch.utils.data.DataLoader): The loader of the training dataset.
epoch (int): number of training cycles.
"""
# Calculate how many iterations there are under epoch.
batches = len(train_dataloader)
# Set generator network in training mode.
generator.train()
for index, (lr, hr) in enumerate(train_dataloader):
# Copy the data to the specified device.
lr = lr.to(device)
hr = hr.to(device)
# Initialize the gradient of the generator model.
generator.zero_grad()
# Generate super-resolution images.
sr = generator(lr)
# Calculate the difference between the super-resolution image and the high-resolution image at the pixel level.
pixel_loss = pixel_criterion(sr, hr)
# Update the weights of the generator model.
pixel_loss.backward()
p_optimizer.step()
# Write the loss during training into Tensorboard.
iters = index + epoch * batches + 1
writer.add_scalar("Train_Generator/Loss", pixel_loss.item(), iters)
# Print the loss function every ten iterations and the last iteration in this epoch.
if (index + 1) % 10 == 0 or (index + 1) == batches:
print(f"Train Epoch[{epoch + 1:04d}/{p_epochs:04d}]({index + 1:05d}/{batches:05d}) "
f"Loss: {pixel_loss.item():.6f}.")
def train_adversarial(train_dataloader, epoch) -> None:
"""Training the adversarial network.
Args:
train_dataloader (torch.utils.data.DataLoader): The loader of the training dataset.
epoch (int): number of training cycles.
"""
# Calculate how many iterations there are under Epoch.
batches = len(train_dataloader)
# Set adversarial network in training mode.
discriminator.train()
generator.train()
for index, (lr, hr) in enumerate(train_dataloader):
# Copy the data to the specified device.
lr = lr.to(device)
hr = hr.to(device)
label_size = lr.size(0)
# Create label. Set the real sample label to 1, and the false sample label to 0.
real_label = torch.full([label_size, 1], 1.0, dtype=lr.dtype, device=device)
fake_label = torch.full([label_size, 1], 0.0, dtype=lr.dtype, device=device)
# Initialize the gradient of the discriminator model.
discriminator.zero_grad()
# Generate super-resolution images.
sr = generator(lr)
# Calculate the loss of the discriminator model on the high-resolution image.
hr_output = discriminator(hr)
sr_output = discriminator(sr.detach())
d_loss_hr = adversarial_criterion(hr_output - torch.mean(sr_output), real_label)
d_loss_hr.backward()
d_hr = hr_output.mean().item()
# Calculate the loss of the discriminator model on the super-resolution image.
hr_output = discriminator(hr)
sr_output = discriminator(sr.detach())
d_loss_sr = adversarial_criterion(sr_output - torch.mean(hr_output), fake_label)
d_loss_sr.backward()
d_sr1 = sr_output.mean().item()
# Update the weights of the discriminator model.
d_loss = d_loss_hr + d_loss_sr
d_optimizer.step()
# Initialize the gradient of the generator model.
generator.zero_grad()
# Generate super-resolution images.
sr = generator(lr)
# Calculate the loss of the discriminator model on the super-resolution image.
hr_output = discriminator(hr.detach())
sr_output = discriminator(sr)
# Perceptual loss=0.01 * pixel loss + 1.0 * content loss + 0.005 * adversarial loss.
pixel_loss = pixel_weight * pixel_criterion(sr, hr.detach())
content_loss = content_weight * content_criterion(sr, hr.detach())
adversarial_loss = adversarial_weight * adversarial_criterion(sr_output - torch.mean(hr_output), real_label)
# Update the weights of the generator model.
g_loss = pixel_loss + content_loss + adversarial_loss
g_loss.backward()
g_optimizer.step()
d_sr2 = sr_output.mean().item()
# Write the loss during training into Tensorboard.
iters = index + epoch * batches + 1
writer.add_scalar("Train_Adversarial/D_Loss", d_loss.item(), iters)
writer.add_scalar("Train_Adversarial/G_Loss", g_loss.item(), iters)
writer.add_scalar("Train_Adversarial/D_HR", d_hr, iters)
writer.add_scalar("Train_Adversarial/D_SR1", d_sr1, iters)
writer.add_scalar("Train_Adversarial/D_SR2", d_sr2, iters)
# Print the loss function every ten iterations and the last iteration in this epoch.
if (index + 1) % 10 == 0 or (index + 1) == batches:
print(f"Train stage: adversarial "
f"Epoch[{epoch + 1:04d}/{epochs:04d}]({index + 1:05d}/{batches:05d}) "
f"D Loss: {d_loss.item():.6f} G Loss: {g_loss.item():.6f} "
f"D(HR): {d_hr:.6f} D(SR1)/D(SR2): {d_sr1:.6f}/{d_sr2:.6f}.")
def validate(valid_dataloader, epoch, stage) -> float:
"""Verify the generator model.
Args:
valid_dataloader (torch.utils.data.DataLoader): loader for validating dataset.
epoch (int): number of training cycles.
stage (str): In which stage to verify, one is `generator`, the other is `adversarial`.
Returns:
PSNR value(float).
"""
# Calculate how many iterations there are under epoch.
batches = len(valid_dataloader)
# Set generator model in verification mode.
generator.eval()
# Initialize the evaluation index.
total_psnr_value = 0.0
with torch.no_grad():
for index, (lr, hr) in enumerate(valid_dataloader):
# Copy the data to the specified device.
lr = lr.to(device)
hr = hr.to(device)
# Generate super-resolution images.
sr = generator(lr)
# Calculate the PSNR indicator.
mse_loss = psnr_criterion(sr, hr)
psnr_value = 10 * torch.log10(1 / mse_loss).item()
total_psnr_value += psnr_value
avg_psnr_value = total_psnr_value / batches
# Write the value of each round of verification indicators into Tensorboard.
if stage == "generator":
writer.add_scalar("Val_Generator/PSNR", avg_psnr_value, epoch + 1)
elif stage == "adversarial":
writer.add_scalar("Val_Adversarial/PSNR", avg_psnr_value, epoch + 1)
# Print evaluation indicators.
print(f"Valid stage: {stage} Epoch[{epoch + 1:04d}] avg PSNR: {avg_psnr_value:.2f}.\n")
return avg_psnr_value
def main() -> None:
# Create a super-resolution experiment result folder.
if not os.path.exists(exp_dir1):
os.makedirs(exp_dir1)
if not os.path.exists(exp_dir2):
os.makedirs(exp_dir2)
# Load the dataset.
train_dataset = BaseDataset(train_dir, image_size, upscale_factor, "train")
valid_dataset = BaseDataset(valid_dir, image_size, upscale_factor, "valid")
train_dataloader = DataLoader(train_dataset, batch_size, True, pin_memory=True)
valid_dataloader = DataLoader(valid_dataset, batch_size, False, pin_memory=True)
# Check whether the training progress of the last abnormal end is restored, for example, the power is
# cut off in the middle of the training.
if resume:
print("Resuming...")
if resume_p_weight != "":
generator.load_state_dict(torch.load(resume_p_weight))
else:
discriminator.load_state_dict(torch.load(resume_d_weight))
generator.load_state_dict(torch.load(resume_g_weight))
# Initialize the evaluation indicators for the training stage of the generator model.
best_psnr_value = 0.0
# Train the generative network stage.
for epoch in range(start_p_epoch, p_epochs):
# Train each epoch for generator network.
train_generator(train_dataloader, epoch)
# Verify each epoch for generator network.
psnr_value = validate(valid_dataloader, epoch, "generator")
# Determine whether the performance of the generator network under epoch is the best.
is_best = psnr_value > best_psnr_value
best_psnr_value = max(psnr_value, best_psnr_value)
# Save the weight of the generator network under epoch. If the performance of the generator network under epoch
# is best, save a file ending with `-best.pth` in the `results` directory.
torch.save(generator.state_dict(), os.path.join(exp_dir1, f"p_epoch{epoch + 1}.pth"))
if is_best:
torch.save(generator.state_dict(), os.path.join(exp_dir2, "p-best.pth"))
# Adjust the learning rate of the generator model.
p_scheduler.step()
# Save the weight of the last generator network under epoch in this stage.
torch.save(generator.state_dict(), os.path.join(exp_dir2, "p-last.pth"))
# Initialize the evaluation index of the adversarial network training phase.
best_psnr_value = 0.0
# Load the model weights with the best indicators in the previous round of training.
generator.load_state_dict(torch.load(os.path.join(exp_dir2, "p-best.pth")))
# Training the adversarial network stage.
for epoch in range(start_epoch, epochs):
# Train each epoch for adversarial network.
train_adversarial(train_dataloader, epoch)
# Verify each epoch for adversarial network.
psnr_value = validate(valid_dataloader, epoch, "adversarial")
# Determine whether the performance of the adversarial network under epoch is the best.
is_best = psnr_value > best_psnr_value
best_psnr_value = max(psnr_value, best_psnr_value)
# Save the weight of the adversarial network under epoch. If the performance of the adversarial network
# under epoch is the best, it will save two additional files ending with `-best.pth` in the `results` directory.
torch.save(discriminator.state_dict(), os.path.join(exp_dir1, f"d_epoch{epoch + 1}.pth"))
torch.save(generator.state_dict(), os.path.join(exp_dir1, f"g_epoch{epoch + 1}.pth"))
if is_best:
torch.save(discriminator.state_dict(), os.path.join(exp_dir2, "d-best.pth"))
torch.save(generator.state_dict(), os.path.join(exp_dir2, "g-best.pth"))
# Adjust the learning rate of the adversarial model.
d_scheduler.step()
g_scheduler.step()
# Save the weight of the adversarial model under the last Epoch in this stage.
torch.save(discriminator.state_dict(), os.path.join(exp_dir2, "d-last.pth"))
torch.save(generator.state_dict(), os.path.join(exp_dir2, "g-last.pth"))
if __name__ == "__main__":
main()
| [
"torch.utils.data.DataLoader"
] | 1.9.0 | peaceofmind123/esrgan_modified | 33a0f2478185eff90a7233b968b7901f7cf3a04a |
1.5 | """Extra generative modeling benchmark datasets not provided by PyTorch."""
import os
import urllib
import PIL
import numpy as np
import torch
from torch.utils import data
from torchvision.datasets import utils
from torchvision.datasets import vision
def _read_image_file(path, shape):
with open(path, 'rb') as f:
images = np.loadtxt(f, delimiter=" ", dtype=np.uint8) * 255
return torch.from_numpy(images).view(-1, *shape)
class BinarizedMNIST(vision.VisionDataset):
"""A specific binarization of the MNIST images.
Originally used in Salakhutdinov & Murray (2008). This dataset is used to
evaluate generative models of images, so labels are not provided.
NOTE: The evaluation split is merged into the training set.
"""
_URL = ('http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/'
'binarized_mnist_')
resources = [_URL + "train.amat", _URL + "valid.amat", _URL + "test.amat"]
train_file = 'train.pt'
valid_file = 'valid.pt'
test_file = 'test.pt'
def __init__(self, root, split='train', transform=None):
"""Initializes a new BinarizedMNIST instance.
Args:
root: The directory containing the data. If the data does not exist, it
will be download to this directory.
split: Which split to use. Must be one of 'train', 'valid', or 'test'.
transform: A torchvision.transform to apply to the data.
"""
super().__init__(root, transform=transform)
assert split in ('train', 'valid', 'test')
self._raw_folder = os.path.join(self.root, 'BinarizedMNIST', 'raw')
self._folder = os.path.join(self.root, 'BinarizedMNIST')
self.train = train
if not self._check_exists():
self.download()
self.data = torch.load(os.path.join(self._folder, split + '.pt'))
def __getitem__(self, index):
"""Returns the tuple (img, None) with the given index."""
img = self.data[index]
# Return PIL images to be connsistent with other datasets.
img = PIL.Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
return img
def __len__(self):
return len(self.data)
def _check_exists(self):
return (os.path.exists(os.path.join(self._folder, self.train_file)) and
os.path.exists(os.path.join(self._folder, self.test_file)))
def download(self):
"""Download the MNIST data if it doesn't exist in the root folder."""
if self._check_exists():
return
# Download files.
os.makedirs(self._folder, exist_ok=True)
os.makedirs(self._raw_folder, exist_ok=True)
for url in self.resources:
filename = url.rpartition('/')[-1]
utils.download_url(url, root=self._raw_folder, filename=filename)
# Process and save.
shape = 28, 28
train_set = _read_image_file(
os.path.join(self._raw_folder, 'binarized_mnist_train.amat'), shape)
with open(os.path.join(self._folder, self.train_file), 'wb') as f:
torch.save(train_set, f)
valid_set = _read_image_file(
os.path.join(self._raw_folder, 'binarized_mnist_valid.amat'), shape)
with open(os.path.join(self._folder, self.valid_file), 'wb') as f:
torch.save(valid_set, f)
test_set = _read_image_file(
os.path.join(self._raw_folder, 'binarized_mnist_test.amat'), shape)
with open(os.path.join(self._folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
def extra_repr(self):
return "Split: {}".format("Train" if self.train else "Test")
| [
"torch.save",
"torch.from_numpy"
] | 1.5.1 | eyalbetzalel/pytorch-generative-1 | d491fa0a8ab37ad3b8aa1092b24ff7d863c9fbd8 |
1.0 | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import json
import os
import sys
import torch
#=====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from torch.utils.data.distributed import DistributedSampler
#=====END: ADDED FOR DISTRIBUTED======
from torch.utils.data import DataLoader
from glow import WaveGlow, WaveGlowLoss
# from mel2samp import Mel2Samp
from data_utils import TextMelLoader, TextMelCollate
from hparams import create_hparams
from utils import to_gpu
from logger import waveglowLogger
#os.environ["CUDA_VISIBLE_DEVICES"] = "3"
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = WaveGlow(hparams).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def parse_batch(batch):
text_padded, input_lengths, mel_padded, gate_padded, output_lengths = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
output_lengths = to_gpu(output_lengths).long()
return text_padded, input_lengths, mel_padded, max_len, output_lengths
def prepare_directories_and_logger(output_directory, log_directory):
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = waveglowLogger(os.path.join(output_directory, log_directory))
return logger
def load_pretrained_taco(taco2_path, hparams):
assert os.path.isfile(taco2_path)
print("Loading pretrain2 tacotron2 model, '{}'".format(taco2_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
Taco2 = Tacotron2(hparams).cuda()
Taco2.load_state_dict(checkpoint_dict['state_dict'])
print("Loaded pretrain2 tacotron2 model, '{}'".format(taco2_path))
return Taco2
def train(num_gpus, rank, group_name, output_directory, log_directory, checkpoint_path, hparams):
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
#=====END: ADDED FOR DISTRIBUTED======
criterion = WaveGlowLoss(hparams.sigma)
model = WaveGlow(hparams).cuda()
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
model = apply_gradient_allreduce(model)
#=====END: ADDED FOR DISTRIBUTED======
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
# Load checkpoint if one exists
iteration = 0
if checkpoint_path:
model, optimizer, iteration = load_checkpoint(checkpoint_path, model, optimizer)
iteration += 1 # next iteration is iteration + 1
trainset = TextMelLoader(hparams.training_files, hparams)
collate_fn = TextMelCollate()
# =====START: ADDED FOR DISTRIBUTED======
train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
# =====END: ADDED FOR DISTRIBUTED======
batch_size = hparams.batch_size
train_loader = DataLoader(trainset, num_workers=0, shuffle=False,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True, collate_fn=collate_fn)
# Get shared output_directory readya
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
if hparams.with_tensorboard and rank == 0:
logger = prepare_directories_and_logger(output_directory, log_directory)
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
print ("Total Epochs: {}".format(hparams.epochs))
print ("Batch Size: {}".format(hparams.batch_size))
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
model.zero_grad()
text_padded, input_lengths, mel_padded, max_len, output_lengths = parse_batch(batch)
# mel_padded = mel_padded.transpose(1, 2)
src_pos = torch.arange(hparams.n_position)
src_pos = to_gpu(src_pos).long().unsqueeze(0)
src_pos = src_pos.expand(hparams.batch_size, -1)
z, log_s_list, log_det_w_list, enc_slf_attn, dec_enc_attn, out_mel = model(mel_padded, text_padded, src_pos)
outputs = (z, log_s_list, log_det_w_list, out_mel)
loss = criterion(outputs, mel_padded, iteration)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
print("{}:\t{:.9f}".format(iteration, reduced_loss))
if hparams.with_tensorboard and rank == 0:
logger.log_training(reduced_loss, grad_norm, learning_rate, iteration)
if (iteration % hparams.iters_per_checkpoint == 0):
if rank == 0:
logger.log_alignment(model, enc_slf_attn, dec_enc_attn, out_mel, mel_padded, iteration)
checkpoint_path = "{}/waveglow_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str, help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
'''parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')'''
parser.add_argument('-p', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
num_gpus = 1
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1
if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(num_gpus, args.rank, args.group_name, args.output_directory, args.log_directory, args.checkpoint_path, hparams)
| [
"torch.cuda.manual_seed",
"torch.arange",
"torch.max",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.utils.data.distributed.DistributedSampler",
"torch.load"
] | 1.0 | dodohow1011/waveglow | 53d9883a73f7f0569b25eb665788ca59368f9413 |
1.8 | import math
import random
import torch
class KMeans:
"""Test"""
def __init__(self, k: int, distance_fn=None, dim_size: int = 2, nstart: int = 2):
# TODO use nstart
assert distance_fn is not None, "please provide a distance function"
self._params = torch.empty(k, dim_size, dtype=torch.float32)
self._distance_fn = distance_fn
self._best_distance_score = math.inf
def fit(self, points: torch.Tensor):
assert (
len(points.shape) == 2
), "shape length of the points \
must be 2 but found {}".format(
len(points.shape)
)
assert isinstance(
points, torch.Tensor
), "points must be torch.tensor but found {}".format(type(points))
sample_size = points.size(0)
k = self._params.size(0)
self._params = points[random.sample(range(sample_size), k=k), :]
# self._params: torch.Tensor(k, dim_size)
latest_cluster = torch.zeros(sample_size, dtype=torch.long)
# latest_cluster: torch.Tensor(sample_size)
while 1:
# points: torch.Tensor(sample_size, dim_size)
# self._params: torch.Tensor(k, dim_size)
dists = self._distance_fn(points, self._params)
# dists: torch.Tensor(sample_size, k)
assigned_clusters = torch.argmin(dists, dim=1)
# assigned_clusters: torch.Tensor(sample_size)
if (latest_cluster == assigned_clusters).all():
# break if converged
break
for i in range(k):
self._params[i] = points[assigned_clusters == i, :].median(dim=0)[0]
latest_cluster = assigned_clusters
| [
"torch.zeros",
"torch.empty",
"torch.argmin"
] | 1.8.1 | mdornseif/fastface | 72772db1fae4af17e829cd5479c4848fe5eb8948 |
1.9 | #!/usr/bin/env python3
import torch
from torch.fft import fft, ifft
from ..utils import broadcasting
def toeplitz(toeplitz_column, toeplitz_row):
"""
Constructs tensor version of toeplitz matrix from column vector
Args:
- toeplitz_column (vector n) - column of toeplitz matrix
- toeplitz_row (vector n-1) - row of toeplitz matrix
Returns:
- Matrix (n x n) - matrix representation
"""
if toeplitz_column.ndimension() != 1:
raise RuntimeError("toeplitz_column must be a vector.")
if toeplitz_row.ndimension() != 1:
raise RuntimeError("toeplitz_row must be a vector.")
if toeplitz_column[0] != toeplitz_row[0]:
raise RuntimeError(
"The first column and first row of the Toeplitz matrix should have "
"the same first otherwise the value of T[0,0] is ambiguous. "
"Got: c[0]={} and r[0]={}".format(toeplitz_column[0], toeplitz_row[0])
)
if len(toeplitz_column) != len(toeplitz_row):
raise RuntimeError("c and r should have the same length " "(Toeplitz matrices are necessarily square).")
if type(toeplitz_column) != type(toeplitz_row):
raise RuntimeError("toeplitz_column and toeplitz_row should be the same type.")
if len(toeplitz_column) == 1:
return toeplitz_column.view(1, 1)
res = torch.empty(
len(toeplitz_column), len(toeplitz_column), dtype=toeplitz_column.dtype, device=toeplitz_column.device
)
for i, val in enumerate(toeplitz_column):
for j in range(len(toeplitz_column) - i):
res[j + i, j] = val
for i, val in list(enumerate(toeplitz_row))[1:]:
for j in range(len(toeplitz_row) - i):
res[j, j + i] = val
return res
def sym_toeplitz(toeplitz_column):
"""
Constructs tensor version of symmetric toeplitz matrix from column vector
Args:
- toeplitz_column (vector n) - column of Toeplitz matrix
Returns:
- Matrix (n x n) - matrix representation
"""
return toeplitz(toeplitz_column, toeplitz_column)
def toeplitz_getitem(toeplitz_column, toeplitz_row, i, j):
"""
Gets the (i,j)th entry of a Toeplitz matrix T.
Args:
- toeplitz_column (vector n) - column of Toeplitz matrix
- toeplitz_row (vector n) - row of Toeplitz matrix
- i (scalar) - row of entry to get
- j (scalar) - column of entry to get
Returns:
- T[i,j], where T is the Toeplitz matrix specified by c and r.
"""
index = i - j
if index < 0:
return toeplitz_row[abs(index)]
else:
return toeplitz_column[index]
def sym_toeplitz_getitem(toeplitz_column, i, j):
"""
Gets the (i,j)th entry of a symmetric Toeplitz matrix T.
Args:
- toeplitz_column (vector n) - column of symmetric Toeplitz matrix
- i (scalar) - row of entry to get
- j (scalar) - column of entry to get
Returns:
- T[i,j], where T is the Toeplitz matrix specified by c and r.
"""
return toeplitz_getitem(toeplitz_column, toeplitz_column, i, j)
def toeplitz_matmul(toeplitz_column, toeplitz_row, tensor):
"""
Performs multiplication T * M where the matrix T is Toeplitz.
Args:
- toeplitz_column (vector n or b x n) - First column of the Toeplitz matrix T.
- toeplitz_row (vector n or b x n) - First row of the Toeplitz matrix T.
- tensor (matrix n x p or b x n x p) - Matrix or vector to multiply the Toeplitz matrix with.
Returns:
- tensor (n x p or b x n x p) - The result of the matrix multiply T * M.
"""
if toeplitz_column.size() != toeplitz_row.size():
raise RuntimeError("c and r should have the same length (Toeplitz matrices are necessarily square).")
toeplitz_shape = torch.Size((*toeplitz_column.shape, toeplitz_row.size(-1)))
output_shape = broadcasting._matmul_broadcast_shape(toeplitz_shape, tensor.shape)
broadcasted_t_shape = output_shape[:-1] if tensor.dim() > 1 else output_shape
if tensor.ndimension() == 1:
tensor = tensor.unsqueeze(-1)
toeplitz_column = toeplitz_column.expand(*broadcasted_t_shape)
toeplitz_row = toeplitz_row.expand(*broadcasted_t_shape)
tensor = tensor.expand(*output_shape)
if not torch.equal(toeplitz_column[..., 0], toeplitz_row[..., 0]):
raise RuntimeError(
"The first column and first row of the Toeplitz matrix should have "
"the same first element, otherwise the value of T[0,0] is ambiguous. "
"Got: c[0]={} and r[0]={}".format(toeplitz_column[0], toeplitz_row[0])
)
if type(toeplitz_column) != type(toeplitz_row) or type(toeplitz_column) != type(tensor):
raise RuntimeError("The types of all inputs to ToeplitzMV must match.")
*batch_shape, orig_size, num_rhs = tensor.size()
r_reverse = toeplitz_row[..., 1:].flip(dims=(-1,))
c_r_rev = torch.zeros(*batch_shape, orig_size + r_reverse.size(-1), dtype=tensor.dtype, device=tensor.device)
c_r_rev[..., :orig_size] = toeplitz_column
c_r_rev[..., orig_size:] = r_reverse
temp_tensor = torch.zeros(
*batch_shape, 2 * orig_size - 1, num_rhs, dtype=toeplitz_column.dtype, device=toeplitz_column.device
)
temp_tensor[..., :orig_size, :] = tensor
fft_M = fft(temp_tensor.transpose(-1, -2).contiguous())
fft_c = fft(c_r_rev).unsqueeze(-2).expand_as(fft_M)
fft_product = fft_M.mul_(fft_c)
output = ifft(fft_product).real.transpose(-1, -2)
output = output[..., :orig_size, :]
return output
def sym_toeplitz_matmul(toeplitz_column, tensor):
"""
Performs a matrix-matrix multiplication TM where the matrix T is symmetric Toeplitz.
Args:
- toeplitz_column (vector n) - First column of the symmetric Toeplitz matrix T.
- matrix (matrix n x p) - Matrix or vector to multiply the Toeplitz matrix with.
Returns:
- tensor
"""
return toeplitz_matmul(toeplitz_column, toeplitz_column, tensor)
def sym_toeplitz_derivative_quadratic_form(left_vectors, right_vectors):
r"""
Given a left vector v1 and a right vector v2, computes the quadratic form:
v1'*(dT/dc_i)*v2
for all i, where dT/dc_i is the derivative of the Toeplitz matrix with respect to
the ith element of its first column. Note that dT/dc_i is the same for any symmetric
Toeplitz matrix T, so we do not require it as an argument.
In particular, dT/dc_i is given by:
[0 0; I_{m-i+1} 0] + [0 I_{m-i+1}; 0 0]
where I_{m-i+1} is the (m-i+1) dimensional identity matrix. In other words, dT/dc_i
for i=1..m is the matrix with ones on the ith sub- and superdiagonal.
Args:
- left_vectors (vector m or matrix s x m) - s left vectors u[j] in the quadratic form.
- right_vectors (vector m or matrix s x m) - s right vectors v[j] in the quadratic form.
Returns:
- vector m - a vector so that the ith element is the result of \sum_j(u[j]*(dT/dc_i)*v[j])
"""
if left_vectors.ndimension() == 1:
left_vectors = left_vectors.unsqueeze(1)
right_vectors = right_vectors.unsqueeze(1)
batch_shape = left_vectors.shape[:-2]
toeplitz_size = left_vectors.size(-2)
num_vectors = left_vectors.size(-1)
left_vectors = left_vectors.transpose(-1, -2).contiguous()
right_vectors = right_vectors.transpose(-1, -2).contiguous()
columns = torch.zeros_like(left_vectors)
columns[..., 0] = left_vectors[..., 0]
res = toeplitz_matmul(columns, left_vectors, right_vectors.unsqueeze(-1))
rows = left_vectors.flip(dims=(-1,))
columns[..., 0] = rows[..., 0]
res += toeplitz_matmul(columns, rows, torch.flip(right_vectors, dims=(-1,)).unsqueeze(-1))
res = res.reshape(*batch_shape, num_vectors, toeplitz_size).sum(-2)
res[..., 0] -= (left_vectors * right_vectors).view(*batch_shape, -1).sum(-1)
return res
| [
"torch.zeros",
"torch.fft.ifft",
"torch.fft.fft",
"torch.zeros_like",
"torch.equal",
"torch.flip"
] | 1.9 | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 |
1.9 | #!/usr/bin/env python3
import torch
from ..utils.broadcasting import _pad_with_singletons
from ..utils.getitem import _noop_index
from .block_lazy_tensor import BlockLazyTensor
class SumBatchLazyTensor(BlockLazyTensor):
"""
Represents a lazy tensor that is actually the sum of several lazy tensors blocks.
The :attr:`block_dim` attribute specifies which dimension of the base LazyTensor
specifies the blocks.
For example, (with `block_dim=-3` a `k x n x n` tensor represents `k` `n x n` blocks (a `n x n` matrix).
A `b x k x n x n` tensor represents `k` `b x n x n` blocks (a `b x n x n` batch matrix).
Args:
:attr:`base_lazy_tensor` (LazyTensor):
A `k x n x n` LazyTensor, or a `b x k x n x n` LazyTensor.
:attr:`block_dim` (int):
The dimension that specifies the blocks.
"""
def _add_batch_dim(self, other):
shape = list(other.shape)
expand_shape = list(other.shape)
shape.insert(-2, 1)
expand_shape.insert(-2, self.base_lazy_tensor.size(-3))
other = other.reshape(*shape).expand(*expand_shape)
return other
def _get_indices(self, row_index, col_index, *batch_indices):
# Create an extra index for the summed dimension
sum_index = torch.arange(0, self.base_lazy_tensor.size(-3), device=self.device)
sum_index = _pad_with_singletons(sum_index, row_index.dim(), 0)
row_index = row_index.unsqueeze(-1)
col_index = col_index.unsqueeze(-1)
batch_indices = [index.unsqueeze(-1) for index in batch_indices]
res = self.base_lazy_tensor._get_indices(row_index, col_index, *batch_indices, sum_index)
return res.sum(-1)
def _getitem(self, row_index, col_index, *batch_indices):
res = self.base_lazy_tensor._getitem(row_index, col_index, *batch_indices, _noop_index)
return self.__class__(res, **self._kwargs)
def _remove_batch_dim(self, other):
return other.sum(-3)
def _size(self):
shape = list(self.base_lazy_tensor.shape)
del shape[-3]
return torch.Size(shape)
def diag(self):
diag = self.base_lazy_tensor.diag().sum(-2)
return diag
def evaluate(self):
return self.base_lazy_tensor.evaluate().sum(dim=-3) # BlockLazyTensors always use dim3 for the block_dim
| [
"torch.Size"
] | 1.9 | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 |
1.10 | import os
import torch
import colossalai
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator)
def set_missing_distributed_environ(key, value):
if key not in os.environ:
os.environ[str(key)] = str(value)
def init_dap(tensor_model_parallel_size_=None):
colossalai.logging.disable_existing_loggers()
if tensor_model_parallel_size_ == None:
if 'WORLD_SIZE' in os.environ:
tensor_model_parallel_size_ = int(os.environ['WORLD_SIZE'])
else:
tensor_model_parallel_size_ = 1
if torch.distributed.is_initialized():
_logger = colossalai.logging.get_dist_logger()
_logger.error(
"use fastfold.distributed.init_dap instead of torch.distributed.init_process_group!")
exit(-1)
# set distributed environ for single device launch
set_missing_distributed_environ('WORLD_SIZE', 1)
set_missing_distributed_environ('RANK', 0)
set_missing_distributed_environ('LOCAL_RANK', 0)
set_missing_distributed_environ('MASTER_ADDR', "localhost")
set_missing_distributed_environ('MASTER_PORT', -1)
colossalai.launch_from_torch(
config={"parallel": dict(tensor=dict(size=tensor_model_parallel_size_))})
| [
"torch.distributed.is_initialized"
] | 1.10 | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a |
1.4 | import math
from typing import List, Optional, Tuple
import torch
__all__ = ["Emformer"]
def _lengths_to_padding_mask(lengths: torch.Tensor) -> torch.Tensor:
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
def _gen_padding_mask(
utterance: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
lengths: torch.Tensor,
mems: torch.Tensor,
left_context_key: Optional[torch.Tensor] = None,
) -> Optional[torch.Tensor]:
T = right_context.size(0) + utterance.size(0) + summary.size(0)
B = right_context.size(1)
if B == 1:
padding_mask = None
else:
right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0)
left_context_blocks_length = (
left_context_key.size(0) if left_context_key is not None else 0
)
klengths = (
lengths
+ mems.size(0)
+ right_context_blocks_length
+ left_context_blocks_length
)
padding_mask = _lengths_to_padding_mask(lengths=klengths)
return padding_mask
def _get_activation_module(activation: str) -> torch.nn.Module:
if activation == "relu":
return torch.nn.ReLU()
elif activation == "gelu":
return torch.nn.GELU()
elif activation == "silu":
return torch.nn.SiLU()
else:
raise ValueError(f"Unsupported activation {activation}")
def _get_weight_init_gains(
weight_init_scale_strategy: Optional[str], num_layers: int
) -> List[Optional[float]]:
if weight_init_scale_strategy is None:
return [None for _ in range(num_layers)]
elif weight_init_scale_strategy == "depthwise":
return [1.0 / math.sqrt(layer_idx + 1) for layer_idx in range(num_layers)]
elif weight_init_scale_strategy == "constant":
return [1.0 / math.sqrt(2) for layer_idx in range(num_layers)]
else:
raise ValueError(
f"Unsupported weight_init_scale_strategy value {weight_init_scale_strategy}"
)
def _gen_attention_mask_block(
col_widths: List[int], col_mask: List[bool], num_rows: int, device: torch.device
) -> torch.Tensor:
assert len(col_widths) == len(
col_mask
), "Length of col_widths must match that of col_mask"
mask_block = [
torch.ones(num_rows, col_width, device=device)
if is_ones_col
else torch.zeros(num_rows, col_width, device=device)
for col_width, is_ones_col in zip(col_widths, col_mask)
]
return torch.cat(mask_block, dim=1)
class _EmformerAttention(torch.nn.Module):
r"""Emformer layer attention module.
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads in each Emformer layer.
dropout (float, optional): dropout probability. (Default: 0.0)
weight_init_gain (float or None, optional): scale factor to apply when initializing
attention module parameters. (Default: ``None``)
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
dropout: float = 0.0,
weight_init_gain: Optional[float] = None,
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
if input_dim % num_heads != 0:
raise ValueError(
f"input_dim ({input_dim}) is not a multiple of num_heads ({num_heads})."
)
self.input_dim = input_dim
self.num_heads = num_heads
self.dropout = dropout
self.tanh_on_mem = tanh_on_mem
self.negative_inf = negative_inf
self.scaling = (self.input_dim // self.num_heads) ** -0.5
self.emb_to_key_value = torch.nn.Linear(input_dim, 2 * input_dim, bias=True)
self.emb_to_query = torch.nn.Linear(input_dim, input_dim, bias=True)
self.out_proj = torch.nn.Linear(input_dim, input_dim, bias=True)
if weight_init_gain:
torch.nn.init.xavier_uniform_(
self.emb_to_key_value.weight, gain=weight_init_gain
)
torch.nn.init.xavier_uniform_(
self.emb_to_query.weight, gain=weight_init_gain
)
def _gen_key_value(
self, input: torch.Tensor, mems: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
T, _, _ = input.shape
summary_length = mems.size(0) + 1
right_ctx_utterance_block = input[: T - summary_length]
mems_right_ctx_utterance_block = torch.cat([mems, right_ctx_utterance_block])
key, value = self.emb_to_key_value(mems_right_ctx_utterance_block).chunk(
chunks=2, dim=2
)
return key, value
def _gen_attention_probs(
self,
attention_weights: torch.Tensor,
attention_mask: torch.Tensor,
padding_mask: Optional[torch.Tensor],
) -> torch.Tensor:
attention_weights_float = attention_weights.float()
attention_weights_float = attention_weights_float.masked_fill(
attention_mask.unsqueeze(0), self.negative_inf
)
T = attention_weights.size(1)
B = attention_weights.size(0) // self.num_heads
if padding_mask is not None:
attention_weights_float = attention_weights_float.view(
B, self.num_heads, T, -1
)
attention_weights_float = attention_weights_float.masked_fill(
padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), self.negative_inf
)
attention_weights_float = attention_weights_float.view(
B * self.num_heads, T, -1
)
attention_probs = torch.nn.functional.softmax(
attention_weights_float, dim=-1
).type_as(attention_weights)
return torch.nn.functional.dropout(
attention_probs, p=float(self.dropout), training=self.training
)
def _forward_impl(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
left_context_key: Optional[torch.Tensor] = None,
left_context_val: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
B = utterance.size(1)
T = right_context.size(0) + utterance.size(0) + summary.size(0)
# Compute query with [right context, utterance, summary].
query = self.emb_to_query(torch.cat([right_context, utterance, summary]))
# Compute key and value with [mems, right context, utterance].
key, value = self.emb_to_key_value(
torch.cat([mems, right_context, utterance])
).chunk(chunks=2, dim=2)
if left_context_key is not None and left_context_val is not None:
right_context_blocks_length = T - torch.max(lengths).int() - summary.size(0)
key = torch.cat(
[
key[: mems.size(0) + right_context_blocks_length],
left_context_key,
key[mems.size(0) + right_context_blocks_length:],
],
)
value = torch.cat(
[
value[: mems.size(0) + right_context_blocks_length],
left_context_val,
value[mems.size(0) + right_context_blocks_length:],
],
)
# Compute attention weights from query, key, and value.
reshaped_query, reshaped_key, reshaped_value = [
tensor.contiguous()
.view(-1, B * self.num_heads, self.input_dim // self.num_heads)
.transpose(0, 1)
for tensor in [query, key, value]
]
attention_weights = torch.bmm(
reshaped_query * self.scaling, reshaped_key.transpose(1, 2)
)
# Compute padding mask.
padding_mask = _gen_padding_mask(
utterance, right_context, summary, lengths, mems, left_context_key
)
# Compute attention probabilities.
attention_probs = self._gen_attention_probs(
attention_weights, attention_mask, padding_mask
)
# Compute attention.
attention = torch.bmm(attention_probs, reshaped_value)
assert attention.shape == (
B * self.num_heads,
T,
self.input_dim // self.num_heads,
)
attention = attention.transpose(0, 1).contiguous().view(T, B, self.input_dim)
# Apply output projection.
output_right_context_mems = self.out_proj(attention)
summary_length = summary.size(0)
output_right_context = output_right_context_mems[: T - summary_length]
output_mems = output_right_context_mems[T - summary_length:]
if self.tanh_on_mem:
output_mems = torch.tanh(output_mems)
else:
output_mems = torch.clamp(output_mems, min=-10, max=10)
return output_right_context, output_mems, key, value
def forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
S: number of summary elements;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape (T, B, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape (R, B, D).
summary (torch.Tensor): summary elements, with shape (S, B, D).
mems (torch.Tensor): memory elements, with shape (M, B, D).
attention_mask (torch.Tensor): attention mask for underlying attention module.
Returns:
torch.Tensor and torch.Tensor:
torch.Tensor
output frames corresponding to utterance and right_context, with shape (T + R, B, D).
torch.Tensor
updated memory elements, with shape (M, B, D).
"""
output, output_mems, _, _ = self._forward_impl(
utterance, lengths, right_context, summary, mems, attention_mask
)
return output, output_mems[:-1]
@torch.jit.export
def infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
summary: torch.Tensor,
mems: torch.Tensor,
left_context_key: torch.Tensor,
left_context_val: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Forward pass for inference.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
S: number of summary elements;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape (T, B, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape (R, B, D).
summary (torch.Tensor): summary elements, with shape (S, B, D).
mems (torch.Tensor): memory elements, with shape (M, B, D).
left_context_key (torch.Tensor): left context attention key computed from preceding invocation.
left_context_val (torch.Tensor): left context attention value computed from preceding invocation.
Returns:
torch.Tensor, torch.Tensor, torch.Tensor, and torch.Tensor:
torch.Tensor
output frames corresponding to utterance and right_context, with shape (T + R, B, D).
torch.Tensor
updated memory elements, with shape (M, B, D).
torch.Tensor
attention key computed for left context and utterance.
torch.Tensor
attention value computed for left context and utterance.
"""
query_dim = right_context.size(0) + utterance.size(0) + summary.size(0)
key_dim = (
right_context.size(0)
+ utterance.size(0)
+ mems.size(0)
+ left_context_key.size(0)
)
attention_mask = torch.zeros(query_dim, key_dim).to(
dtype=torch.bool, device=utterance.device
)
attention_mask[-1, : mems.size(0)] = True
output, output_mems, key, value = self._forward_impl(
utterance,
lengths,
right_context,
summary,
mems,
attention_mask,
left_context_key=left_context_key,
left_context_val=left_context_val,
)
return (
output,
output_mems,
key[mems.size(0) + right_context.size(0):],
value[mems.size(0) + right_context.size(0):],
)
class _EmformerLayer(torch.nn.Module):
r"""Emformer layer that constitutes Emformer.
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads.
ffn_dim: (int): hidden layer dimension of feedforward network.
dropout (float, optional): dropout probability. (Default: 0.0)
activation (str, optional): activation function to use in feedforward network.
Must be one of ("relu", "gelu", "silu"). (Default: "relu")
left_context_length (int, optional): length of left context. (Default: 0)
segment_length (int, optional): length of each input segment. (Default: 128)
max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0)
weight_init_gain (float or None, optional): scale factor to apply when initializing
attention module parameters. (Default: ``None``)
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
ffn_dim: int,
dropout: float = 0.0,
activation: str = "relu",
left_context_length: int = 0,
segment_length: int = 128,
max_memory_size: int = 0,
weight_init_gain: Optional[float] = None,
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
self.attention = _EmformerAttention(
input_dim=input_dim,
num_heads=num_heads,
dropout=dropout,
weight_init_gain=weight_init_gain,
tanh_on_mem=tanh_on_mem,
negative_inf=negative_inf,
)
self.dropout = torch.nn.Dropout(dropout)
self.memory_op = torch.nn.AvgPool1d(
kernel_size=segment_length, stride=segment_length, ceil_mode=True
)
activation_module = _get_activation_module(activation)
self.pos_ff = torch.nn.Sequential(
torch.nn.LayerNorm(input_dim),
torch.nn.Linear(input_dim, ffn_dim),
activation_module,
torch.nn.Dropout(dropout),
torch.nn.Linear(ffn_dim, input_dim),
torch.nn.Dropout(dropout),
)
self.layer_norm_input = torch.nn.LayerNorm(input_dim)
self.layer_norm_output = torch.nn.LayerNorm(input_dim)
self.left_context_length = left_context_length
self.segment_length = segment_length
self.max_memory_size = max_memory_size
self.input_dim = input_dim
self.use_mem = max_memory_size > 0
def _init_state(
self, batch_size: int, device: Optional[torch.device]
) -> List[torch.Tensor]:
empty_memory = torch.zeros(
self.max_memory_size, batch_size, self.input_dim, device=device
)
left_context_key = torch.zeros(
self.left_context_length, batch_size, self.input_dim, device=device
)
left_context_val = torch.zeros(
self.left_context_length, batch_size, self.input_dim, device=device
)
past_length = torch.zeros(1, batch_size, dtype=torch.int32, device=device)
return [empty_memory, left_context_key, left_context_val, past_length]
def _unpack_state(
self, utterance: torch.Tensor, mems: torch.Tensor, state: List[torch.Tensor]
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
past_length = state[3][0][0].item()
past_left_context_length = min(self.left_context_length, past_length)
past_mem_length = min(
self.max_memory_size, math.ceil(past_length / self.segment_length)
)
pre_mems = state[0][self.max_memory_size - past_mem_length:]
lc_key = state[1][self.left_context_length - past_left_context_length:]
lc_val = state[2][self.left_context_length - past_left_context_length:]
return pre_mems, lc_key, lc_val
def _pack_state(
self,
next_k: torch.Tensor,
next_v: torch.Tensor,
update_length: int,
mems: torch.Tensor,
state: List[torch.Tensor],
) -> List[torch.Tensor]:
new_k = torch.cat([state[1], next_k])
new_v = torch.cat([state[2], next_v])
state[0] = torch.cat([state[0], mems])[-self.max_memory_size:]
state[1] = new_k[new_k.shape[0] - self.left_context_length:]
state[2] = new_v[new_v.shape[0] - self.left_context_length:]
state[3] = state[3] + update_length
return state
def _process_attention_output(
self,
rc_output: torch.Tensor,
utterance: torch.Tensor,
right_context: torch.Tensor,
) -> torch.Tensor:
result = self.dropout(rc_output) + torch.cat([right_context, utterance])
result = self.pos_ff(result) + result
result = self.layer_norm_output(result)
return result
def _apply_pre_attention_layer_norm(
self, utterance: torch.Tensor, right_context: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
layer_norm_input = self.layer_norm_input(torch.cat([right_context, utterance]))
return (
layer_norm_input[right_context.size(0):],
layer_norm_input[: right_context.size(0)],
)
def _apply_post_attention_ffn(
self, rc_output: torch.Tensor, utterance: torch.Tensor, right_context: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
rc_output = self._process_attention_output(rc_output, utterance, right_context)
return rc_output[right_context.size(0):], rc_output[: right_context.size(0)]
def _apply_attention_forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
attention_mask: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
if attention_mask is None:
raise ValueError(
"attention_mask must be not None when for_inference is False"
)
if self.use_mem:
summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
else:
summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device)
rc_output, next_m = self.attention(
utterance=utterance,
lengths=lengths,
right_context=right_context,
summary=summary,
mems=mems,
attention_mask=attention_mask,
)
return rc_output, next_m
def _apply_attention_infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
state: Optional[List[torch.Tensor]],
) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:
if state is None:
state = self._init_state(utterance.size(1), device=utterance.device)
pre_mems, lc_key, lc_val = self._unpack_state(utterance, mems, state)
if self.use_mem:
summary = self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
summary = summary[:1]
else:
summary = torch.empty(0).to(dtype=utterance.dtype, device=utterance.device)
rc_output, next_m, next_k, next_v = self.attention.infer(
utterance=utterance,
lengths=lengths,
right_context=right_context,
summary=summary,
mems=pre_mems,
left_context_key=lc_key,
left_context_val=lc_val,
)
state = self._pack_state(next_k, next_v, utterance.size(0), mems, state)
return rc_output, next_m, state
def forward(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
mems: torch.Tensor,
attention_mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape (T, B, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape (R, B, D).
mems (torch.Tensor): memory elements, with shape (M, B, D).
attention_mask (torch.Tensor): attention mask for underlying attention module.
Returns:
torch.Tensor, torch.Tensor, and torch.Tensor:
torch.Tensor
encoded utterance frames, with shape (T, B, D).
torch.Tensor
updated right context frames, with shape (R, B, D).
torch.Tensor
updated memory elements, with shape (M, B, D).
"""
(
layer_norm_utterance,
layer_norm_right_context,
) = self._apply_pre_attention_layer_norm(utterance, right_context)
rc_output, output_mems = self._apply_attention_forward(
layer_norm_utterance,
lengths,
layer_norm_right_context,
mems,
attention_mask,
)
output_utterance, output_right_context = self._apply_post_attention_ffn(
rc_output, utterance, right_context
)
return output_utterance, output_right_context, output_mems
@torch.jit.export
def infer(
self,
utterance: torch.Tensor,
lengths: torch.Tensor,
right_context: torch.Tensor,
state: Optional[List[torch.Tensor]],
mems: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor], torch.Tensor]:
r"""Forward pass for inference.
B: batch size;
D: feature dimension of each frame;
T: number of utterance frames;
R: number of right context frames;
M: number of memory elements.
Args:
utterance (torch.Tensor): utterance frames, with shape (T, B, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``utterance``.
right_context (torch.Tensor): right context frames, with shape (R, B, D).
state (List[torch.Tensor] or None): list of tensors representing layer internal state
generated in preceding invocation of ``infer``.
mems (torch.Tensor): memory elements, with shape (M, B, D).
Returns:
torch.Tensor, torch.Tensor, List[torch.Tensor], and torch.Tensor:
torch.Tensor
encoded utterance frames, with shape (T, B, D).
torch.Tensor
updated right context frames, with shape (R, B, D).
List[torch.Tensor]
list of tensors representing layer internal state
generated in current invocation of ``infer``.
torch.Tensor
updated memory elements, with shape (M, B, D).
"""
(
layer_norm_utterance,
layer_norm_right_context,
) = self._apply_pre_attention_layer_norm(utterance, right_context)
rc_output, output_mems, output_state = self._apply_attention_infer(
layer_norm_utterance, lengths, layer_norm_right_context, mems, state
)
output_utterance, output_right_context = self._apply_post_attention_ffn(
rc_output, utterance, right_context
)
return output_utterance, output_right_context, output_state, output_mems
class Emformer(torch.nn.Module):
r"""Implements the Emformer architecture introduced in
*Emformer: Efficient Memory Transformer Based Acoustic Model for Low Latency Streaming Speech Recognition*
[:footcite:`shi2021emformer`].
Args:
input_dim (int): input dimension.
num_heads (int): number of attention heads in each Emformer layer.
ffn_dim (int): hidden layer dimension of each Emformer layer's feedforward network.
num_layers (int): number of Emformer layers to instantiate.
dropout (float, optional): dropout probability. (Default: 0.0)
activation (str, optional): activation function to use in each Emformer layer's
feedforward network. Must be one of ("relu", "gelu", "silu"). (Default: "relu")
left_context_length (int, optional): length of left context. (Default: 0)
right_context_length (int, optional): length of right context. (Default: 0)
segment_length (int, optional): length of each input segment. (Default: 128)
max_memory_size (int, optional): maximum number of memory elements to use. (Default: 0)
weight_init_scale_strategy (str, optional): per-layer weight initialization scaling
strategy. Must be one of ("depthwise", "constant", ``None``). (Default: "depthwise")
tanh_on_mem (bool, optional): if ``True``, applies tanh to memory elements. (Default: ``False``)
negative_inf (float, optional): value to use for negative infinity in attention weights. (Default: -1e8)
Examples:
>>> emformer = Emformer(512, 8, 2048, 20)
>>> input = torch.rand(128, 400, 512) # batch, num_frames, feature_dim
>>> lengths = torch.randint(1, 200, (128,)) # batch
>>> output = emformer(input, lengths)
>>> output, lengths, states = emformer.infer(input, lengths, None)
"""
def __init__(
self,
input_dim: int,
num_heads: int,
ffn_dim: int,
num_layers: int,
dropout: float = 0.0,
activation: str = "relu",
left_context_length: int = 0,
right_context_length: int = 0,
segment_length: int = 128,
max_memory_size: int = 0,
weight_init_scale_strategy: str = "depthwise",
tanh_on_mem: bool = False,
negative_inf: float = -1e8,
):
super().__init__()
self.use_mem = max_memory_size > 0
self.memory_op = torch.nn.AvgPool1d(
kernel_size=segment_length, stride=segment_length, ceil_mode=True,
)
weight_init_gains = _get_weight_init_gains(
weight_init_scale_strategy, num_layers
)
self.emformer_layers = torch.nn.ModuleList(
[
_EmformerLayer(
input_dim,
num_heads,
ffn_dim,
dropout=dropout,
activation=activation,
left_context_length=left_context_length,
segment_length=segment_length,
max_memory_size=max_memory_size,
weight_init_gain=weight_init_gains[layer_idx],
tanh_on_mem=tanh_on_mem,
negative_inf=negative_inf,
)
for layer_idx in range(num_layers)
]
)
self.left_context_length = left_context_length
self.right_context_length = right_context_length
self.segment_length = segment_length
self.max_memory_size = max_memory_size
def _gen_right_context(self, input: torch.Tensor) -> torch.Tensor:
right_context_blocks = []
T, B, D = input.shape
num_segs = math.ceil((T - self.right_context_length) / self.segment_length)
right_context_blocks = []
for seg_idx in range(num_segs - 1):
start = (seg_idx + 1) * self.segment_length
end = start + self.right_context_length
right_context_blocks.append(input[start:end])
right_context_blocks.append(input[T - self.right_context_length:])
return torch.cat(right_context_blocks)
def _gen_attention_mask_col_widths(
self, seg_idx: int, utterance_length: int
) -> List[int]:
num_segs = math.ceil(utterance_length / self.segment_length)
rc = self.right_context_length
lc = self.left_context_length
rc_start = seg_idx * rc
rc_end = rc_start + rc
seg_start = max(seg_idx * self.segment_length - lc, 0)
seg_end = min((seg_idx + 1) * self.segment_length, utterance_length)
rc_length = self.right_context_length * num_segs
if self.use_mem:
m_start = max(seg_idx - self.max_memory_size, 0)
mem_length = num_segs - 1
col_widths = [
m_start, # before memory
seg_idx - m_start, # memory
mem_length - seg_idx, # after memory
rc_start, # before right context
rc, # right context
rc_length - rc_end, # after right context
seg_start, # before query segment
seg_end - seg_start, # query segment
utterance_length - seg_end, # after query segment
]
else:
col_widths = [
rc_start, # before right context
rc, # right context
rc_length - rc_end, # after right context
seg_start, # before query segment
seg_end - seg_start, # query segment
utterance_length - seg_end, # after query segment
]
return col_widths
def _gen_attention_mask(self, input: torch.Tensor) -> torch.Tensor:
utterance_length, batch_size, _ = input.shape
num_segs = math.ceil(utterance_length / self.segment_length)
rc_mask = []
query_mask = []
summary_mask = []
if self.use_mem:
num_cols = 9
# memory, right context, query segment
rc_q_cols_mask = [idx in [1, 4, 7] for idx in range(num_cols)]
# right context, query segment
s_cols_mask = [idx in [4, 7] for idx in range(num_cols)]
masks_to_concat = [rc_mask, query_mask, summary_mask]
else:
num_cols = 6
# right context, query segment
rc_q_cols_mask = [idx in [1, 4] for idx in range(num_cols)]
s_cols_mask = None
masks_to_concat = [rc_mask, query_mask]
for seg_idx in range(num_segs):
col_widths = self._gen_attention_mask_col_widths(seg_idx, utterance_length)
rc_mask_block = _gen_attention_mask_block(
col_widths, rc_q_cols_mask, self.right_context_length, input.device
)
rc_mask.append(rc_mask_block)
query_mask_block = _gen_attention_mask_block(
col_widths,
rc_q_cols_mask,
min(
self.segment_length,
utterance_length - seg_idx * self.segment_length,
),
input.device,
)
query_mask.append(query_mask_block)
if s_cols_mask is not None:
summary_mask_block = _gen_attention_mask_block(
col_widths, s_cols_mask, 1, input.device
)
summary_mask.append(summary_mask_block)
attention_mask = (
1 - torch.cat([torch.cat(mask) for mask in masks_to_concat])
).to(torch.bool)
return attention_mask
def forward(
self, input: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""Forward pass for training.
B: batch size;
T: number of frames;
D: feature dimension of each frame.
Args:
input (torch.Tensor): utterance frames right-padded with right context frames, with
shape (B, T, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``input``.
Returns:
torch.Tensor and torch.Tensor:
torch.Tensor
output frames, with shape (B, T - ``right_context_length``, D).
torch.Tensor
output lengths, with shape (B,) and i-th element representing
number of valid frames for i-th batch element in output frames.
"""
input = input.permute(1, 0, 2)
right_context = self._gen_right_context(input)
utterance = input[: input.size(0) - self.right_context_length]
attention_mask = self._gen_attention_mask(utterance)
mems = (
self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)[:-1]
if self.use_mem
else torch.empty(0).to(dtype=input.dtype, device=input.device)
)
output = utterance
for layer in self.emformer_layers:
output, right_context, mems = layer(
output, lengths, right_context, mems, attention_mask
)
return output.permute(1, 0, 2), lengths
@torch.jit.export
def infer(
self,
input: torch.Tensor,
lengths: torch.Tensor,
states: Optional[List[List[torch.Tensor]]] = None,
) -> Tuple[torch.Tensor, torch.Tensor, List[List[torch.Tensor]]]:
r"""Forward pass for inference.
B: batch size;
T: number of frames;
D: feature dimension of each frame.
Args:
input (torch.Tensor): utterance frames right-padded with right context frames, with
shape (B, T, D).
lengths (torch.Tensor): with shape (B,) and i-th element representing
number of valid frames for i-th batch element in ``input``.
states (List[List[torch.Tensor]] or None, optional): list of lists of tensors
representing Emformer internal state generated in preceding invocation of ``infer``. (Default: ``None``)
Returns:
torch.Tensor, torch.Tensor, and List[List[torch.Tensor]]:
torch.Tensor
output frames, with shape (B, T - ``right_context_length``, D).
torch.Tensor
output lengths, with shape (B,) and i-th element representing
number of valid frames for i-th batch element in output frames.
List[List[torch.Tensor]]
output states; list of lists of tensors representing Emformer internal state
generated in current invocation of ``infer``.
"""
input = input.permute(1, 0, 2)
right_context_start_idx = input.size(0) - self.right_context_length
right_context = input[right_context_start_idx:]
utterance = input[:right_context_start_idx]
output_lengths = torch.clamp(lengths - self.right_context_length, min=0)
mems = (
self.memory_op(utterance.permute(1, 2, 0)).permute(2, 0, 1)
if self.use_mem
else torch.empty(0).to(dtype=input.dtype, device=input.device)
)
output = utterance
output_states: List[List[torch.Tensor]] = []
for layer_idx, layer in enumerate(self.emformer_layers):
output, right_context, output_state, mems = layer.infer(
output,
output_lengths,
right_context,
None if states is None else states[layer_idx],
mems,
)
output_states.append(output_state)
return output.permute(1, 0, 2), output_lengths, output_states
| [
"torch.nn.Linear",
"torch.zeros",
"torch.cat",
"torch.nn.Dropout",
"torch.nn.AvgPool1d",
"torch.nn.LayerNorm",
"torch.arange",
"torch.max",
"torch.nn.SiLU",
"torch.nn.init.xavier_uniform_",
"torch.bmm",
"torch.clamp",
"torch.nn.ReLU",
"torch.ones",
"torch.empty",
"torch.nn.functional.softmax",
"torch.tanh",
"torch.nn.GELU"
] | 1.4.0 | videodanchik/audio | c22962d125e929dd8d4c171e76f5aa48baac3d49 |
1.3 | #!/usr/bin/env python3
import os
import sys
import torch
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from core.logger import Logger
from core.filter.zfilter import ZFilter
from core.algorithm.mcpo import mcpo_step
from core.agent import Agent_sync as Agent
from core.model import PolicyWithValue as Policy
from core.common import ParamDict, ARGConfig, ARG
from core.utilities import running_time, model_dir, loadInitConfig
from environment import FakeRLBench
default_config = ARGConfig(
"PyTorch MC-PO example",
ARG("env name", "ReachTarget", critical=True, fields=["naming"],
desc="name of the environment to run"),
ARG("action mode", "delta joint position", critical=True, fields=["naming"],
desc="name of the action mode, (default: {})"),
ARG("tag", "new_r", fields=["naming"],
desc="tag of this experiment"),
ARG("short", "mcpo", critical=True, fields=["naming"],
desc="short name of this method"),
ARG("seed", 1, critical=True, fields=["naming"], desc="random seed (default: {})"),
ARG("load name", "~final.pkl", desc="name of pre-trained model"),
ARG("demo path", "RLBench/1000_djp_ReachTarget.demo.pkl", desc="demo package path"),
# ---- model parameters ---- #
ARG("activation", "tanh", critical=True,
desc="activation function name('tanh', 'sigmoid', 'relu')"),
ARG("gamma", 0.99, critical=True, key_name="advantage gamma", fields=["policy init"],
desc="discount factor (default: {})"),
ARG("tau", 0.95, critical=True, key_name="advantage tau", fields=["policy init"],
desc="gae (default: {})"),
ARG("damping", 1.e-2, critical=True, desc="damping (default: {})"),
ARG("l2 reg", 1.e-3, critical=True, desc="l2 regularization regression (default: {})"),
ARG("lr", 1.e-4, critical=True, desc="Learning rate (default: {})"),
ARG("max kl", 1.e-2, critical=True, desc="max kl value (default: {})"),
ARG("bc method", "l2", critical=True, desc="method for determining distance (default: {})"),
ARG("constraint", -6.e-3, critical=True, desc="constraint limit of behavior discrepancy (default: {})"),
ARG("constraint factor", 1.e-3, critical=True, desc="constraint limit growth along iter (default: {})"),
ARG("constraint max", 10., critical=True, desc="constraint max growth along iter (default: {})"),
ARG("use zfilter", True, critical=True, desc="filter the state when running (default {})"),
# ---- program config ---- #
ARG("batch size", 32, desc="batch size per update (default: {})"),
ARG("max iter", 5000, desc="maximal number of training iterations (default: {})"),
ARG("eval batch size", 4, desc="batch size used for evaluations (default: {})"),
ARG("eval interval", 1, desc="interval between evaluations (default: {})"),
ARG("save interval", 50, desc="interval between saving (default: {}, 0, means never save)"),
ARG("threads", 4, desc="number of threads for agent (default: {})"),
ARG("gpu threads", 2, desc="number of threads for agent (default: {})"),
ARG("gpu", (0, 1, 2, 3), desc="tuple of available GPUs, empty for cpu only"),
)
def train_loop(cfg, agent, logger):
curr_iter, max_iter, eval_iter, eval_batch_sz, batch_sz, save_iter, demo_loader =\
cfg.require("current training iter", "max iter", "eval interval",
"eval batch size", "batch size", "save interval", "demo loader")
training_cfg = ParamDict({
"policy state dict": agent.policy().getStateDict(),
"filter state dict": agent.filter().getStateDict(),
"trajectory max step": 64,
"batch size": batch_sz,
"fixed environment": False,
"fixed policy": False,
"fixed filter": False
})
validate_cfg = ParamDict({
"policy state dict": None,
"filter state dict": None,
"trajectory max step": 64,
"batch size": eval_batch_sz,
"fixed environment": False,
"fixed policy": True,
"fixed filter": True
})
# we use the entire demo set without sampling
demo_trajectory = demo_loader.generate_all()
if demo_trajectory is None:
print("Warning: No demo loaded, fall back compatible with TRPO method")
else:
print("Info: Demo loaded successfully")
demo_actions = []
demo_states = []
for p in demo_trajectory:
demo_actions.append(torch.as_tensor([t['a'] for t in p], dtype=torch.float32, device=agent.policy().device))
demo_states.append(torch.as_tensor([t['s'] for t in p], dtype=torch.float32, device=agent.policy().device))
demo_states = torch.cat(demo_states, dim=0)
demo_actions = torch.cat(demo_actions, dim=0)
demo_trajectory = [demo_states, demo_actions]
for i_iter in range(curr_iter, max_iter):
s_time = float(running_time(fmt=False))
"""sample new batch and perform MCPO update"""
batch_train, info_train = agent.rollout(training_cfg)
demo_batch = None
if demo_trajectory is not None:
filter_dict = agent.filter().getStateDict()
errsum, mean, n_step = filter_dict["zfilter errsum"], filter_dict["zfilter mean"], filter_dict["zfilter n_step"]
errsum = torch.as_tensor(errsum, dtype=torch.float32, device=agent.policy().device)
mean = torch.as_tensor(mean, dtype=torch.float32, device=agent.policy().device)
std = torch.sqrt(errsum / (n_step - 1)) if n_step > 1 else mean
demo_batch = ((demo_trajectory[0] - mean) / (std + 1e-8), demo_trajectory[1])
mcpo_step(cfg, batch_train, agent.policy(), demo_batch)
e_time = float(running_time(fmt=False))
logger.train()
info_train["duration"] = e_time - s_time
info_train["epoch"] = i_iter
logger(info_train)
cfg["current training iter"] = i_iter + 1
cfg["policy state dict"] = training_cfg["policy state dict"] = validate_cfg["policy state dict"] = agent.policy().getStateDict()
cfg["filter state dict"] = training_cfg["filter state dict"] = validate_cfg["filter state dict"] = agent.filter().getStateDict()
if i_iter % eval_iter == 0:
batch_eval, info_eval = agent.rollout(validate_cfg)
logger.train(False)
info_eval["duration"] = e_time - s_time
info_eval["epoch"] = i_iter
logger(info_eval)
if i_iter != 0 and i_iter % save_iter == 0:
file_name = os.path.join(model_dir(cfg), f"I_{i_iter}.pkl")
cfg.save(file_name)
print(f"Saving current step at {file_name}")
file_name = os.path.join(model_dir(cfg), f"final.pkl")
cfg.save(file_name)
print(f"Total running time: {running_time(fmt=True)}, result saved at {file_name}")
def main(cfg):
env_name, action_mode, use_zf, gamma, tau, policy_state, filter_state =\
cfg.require("env name", "action mode", "use zfilter", "advantage gamma", "advantage tau", "policy state dict", "filter state dict")
logger = Logger()
logger.init(cfg)
filter_op = ZFilter(gamma, tau, enable=use_zf)
env = FakeRLBench(env_name, action_mode=action_mode)
policy = Policy(cfg, env.info())
agent = Agent(cfg, env, policy, filter_op)
# ---- start training ---- #
if policy_state is not None:
agent.policy().reset(policy_state)
if filter_state is not None:
agent.filter().reset(filter_state)
train_loop(cfg, agent, logger)
print("Done")
if __name__ == '__main__':
import torch.multiprocessing as multiprocessing
multiprocessing.set_start_method('spawn')
default_config.parser()
train_cfg = loadInitConfig(default_config)
main(train_cfg)
exit(0)
| [
"torch.cat",
"torch.sqrt",
"torch.multiprocessing.set_start_method"
] | 1.3.0 | id9502/RLFrame | a6fe99c6578e74f74767720b9212365e10f0cefd |
1.3 | ########################
# Importing libraries
########################
# System libraries
import os
import random
from time import gmtime, strftime
import numpy as np
import pickle
# Tensorboard for PyTorch logging and visualization
from torch.utils.tensorboard import SummaryWriter
# Torch libraries
import torch
import torch.backends.cudnn as cudnn
# Custom library
import lib.Models.architectures as architectures
from lib.Models.pixelcnn import PixelCNN
import lib.Datasets.datasets as datasets
from lib.Models.initialization import WeightInit
from lib.Models.architectures import grow_classifier
from lib.cmdparser import parser
from lib.Training.train import train
from lib.Training.validate import validate
from lib.Training.loss_functions import unified_loss_function as criterion
from lib.Utility.utils import save_checkpoint, save_task_checkpoint
from lib.Training.evaluate import get_latent_embedding
from lib.Utility.visualization import args_to_tensorboard
from lib.Utility.visualization import visualize_dataset_in_2d_embedding
# Comment this if CUDNN benchmarking is not desired
cudnn.benchmark = True
def main():
# Command line options
args = parser.parse_args()
print("Command line options:")
for arg in vars(args):
print(arg, getattr(args, arg))
if args.cross_dataset and not args.incremental_data:
raise ValueError('cross-dataset training possible only if incremental-data flag set')
# Check whether GPU is available and can be used
# if CUDA is found then device is set accordingly
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Launch a writer for the tensorboard summary writer instance
save_path = 'runs/' + strftime("%Y-%m-%d_%H-%M-%S", gmtime()) + '_' + args.dataset + '_' + args.architecture +\
'_variational_samples_' + str(args.var_samples) + '_latent_dim_' + str(args.var_latent_dim)
# add option specific naming to separate tensorboard log files later
if args.autoregression:
save_path += '_pixelcnn'
if args.incremental_data:
save_path += '_incremental'
if args.train_incremental_upper_bound:
save_path += '_upper_bound'
if args.generative_replay:
save_path += '_genreplay'
if args.openset_generative_replay:
save_path += '_opensetreplay'
if args.cross_dataset:
save_path += '_cross_dataset_' + args.dataset_order
# if we are resuming a previous training, note it in the name
if args.resume:
save_path = save_path + '_resumed'
writer = SummaryWriter(save_path)
# saving the parsed args to file
log_file = os.path.join(save_path, "stdout")
log = open(log_file, "a")
for arg in vars(args):
log.write(arg + ':' + str(getattr(args, arg)) + '\n')
# Dataset loading
data_init_method = getattr(datasets, args.dataset)
dataset = data_init_method(torch.cuda.is_available(), args)
# get the number of classes from the class dictionary
num_classes = dataset.num_classes
# we set an epoch multiplier to 1 for isolated training and increase it proportional to amount of tasks in CL
epoch_multiplier = 1
if args.incremental_data:
from lib.Datasets.incremental_dataset import get_incremental_dataset
# get the method to create the incremental dataste (inherits from the chosen data loader)
inc_dataset_init_method = get_incremental_dataset(data_init_method, args)
# different options for class incremental vs. cross-dataset experiments
if args.cross_dataset:
# if a task order file is specified, load the task order from it
if args.load_task_order:
# check if file exists and if file ends with extension '.txt'
if os.path.isfile(args.load_task_order) and len(args.load_task_order) >= 4\
and args.load_task_order[-4:] == '.txt':
print("=> loading task order from '{}'".format(args.load_task_order))
with open(args.load_task_order, 'rb') as fp:
task_order = pickle.load(fp)
# if no file is found default to cmd line task order
else:
# parse and split string at commas
task_order = args.dataset_order.split(',')
for i in range(len(task_order)):
# remove blank spaces in dataset names
task_order[i] = task_order[i].replace(" ", "")
# use task order as specified in command line
else:
# parse and split string at commas
task_order = args.dataset_order.split(',')
for i in range(len(task_order)):
# remove blank spaces in dataset names
task_order[i] = task_order[i].replace(" ", "")
# just for getting the number of classes in the first dataset
num_classes = 0
for i in range(args.num_base_tasks):
temp_dataset_init_method = getattr(datasets, task_order[i])
temp_dataset = temp_dataset_init_method(torch.cuda.is_available(), args)
num_classes += temp_dataset.num_classes
del temp_dataset
# multiply epochs by number of tasks
if args.num_increment_tasks:
epoch_multiplier = ((len(task_order) - args.num_base_tasks) / args.num_increment_tasks) + 1
else:
# this branch will get active if num_increment_tasks is set to zero. This is useful when training
# any isolated upper bound with all datasets present from the start.
epoch_multiplier = 1.0
else:
# class incremental
# if specified load task order from file
if args.load_task_order:
if os.path.isfile(args.load_task_order):
print("=> loading task order from '{}'".format(args.load_task_order))
task_order = np.load(args.load_task_order).tolist()
else:
# if no file is found a random task order is created
print("=> no task order found. Creating randomized task order")
task_order = np.random.permutation(num_classes).tolist()
else:
# if randomize task order is specified create a random task order, else task order is sequential
task_order = []
for i in range(dataset.num_classes):
task_order.append(i)
if args.randomize_task_order:
task_order = np.random.permutation(num_classes).tolist()
# save the task order
np.save(os.path.join(save_path, 'task_order.npy'), task_order)
# set the number of classes to base tasks + 1 because base tasks is always one less.
# E.g. if you have 2 classes it's one task. This is a little inconsistent from the naming point of view
# but we wanted a single variable to work for both class incremental as well as cross-dataset experiments
num_classes = args.num_base_tasks + 1
# multiply epochs by number of tasks
epoch_multiplier = ((len(task_order) - (args.num_base_tasks + 1)) / args.num_increment_tasks) + 1
print("Task order: ", task_order)
# log the task order into the text file
log.write('task_order:' + str(task_order) + '\n')
args.task_order = task_order
# this is a little weird, but it needs to be here because the below method pops items from task_order
args_to_tensorboard(writer, args)
assert epoch_multiplier.is_integer(), print("uneven task division, make sure number of tasks are integers.")
# Get the incremental dataset
dataset = inc_dataset_init_method(torch.cuda.is_available(), device, task_order, args)
else:
# add command line options to TensorBoard
args_to_tensorboard(writer, args)
log.close()
# Get a sample input from the data loader to infer color channels/size
net_input, _ = next(iter(dataset.train_loader))
# get the amount of color channels in the input images
num_colors = net_input.size(1)
# import model from architectures class
net_init_method = getattr(architectures, args.architecture)
# if we are not building an autoregressive model the number of output channels of the model is equivalent to
# the amount of input channels. For an autoregressive models we set the number of output channels of the
# non-autoregressive decoder portion according to the command line option below
if not args.autoregression:
args.out_channels = num_colors
# build the model
model = net_init_method(device, num_classes, num_colors, args)
# optionally add the autoregressive decoder
if args.autoregression:
model.pixelcnn = PixelCNN(device, num_colors, args.out_channels, args.pixel_cnn_channels,
num_layers=args.pixel_cnn_layers, k=args.pixel_cnn_kernel_size,
padding=args.pixel_cnn_kernel_size//2)
# Parallel container for multi GPU use and cast to available device
model = torch.nn.DataParallel(model).to(device)
print(model)
# Initialize the weights of the model, by default according to He et al.
print("Initializing network with: " + args.weight_init)
WeightInitializer = WeightInit(args.weight_init)
WeightInitializer.init_model(model)
# Define optimizer and loss function (criterion)
optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)
epoch = 0
best_prec = 0
best_loss = random.getrandbits(128)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
epoch = checkpoint['epoch']
best_prec = checkpoint['best_prec']
best_loss = checkpoint['best_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# optimize until final amount of epochs is reached. Final amount of epochs is determined through the
while epoch < (args.epochs * epoch_multiplier):
# visualize the latent space before each task increment and at the end of training if it is 2-D
if epoch % args.epochs == 0 and epoch > 0 or (epoch + 1) % (args.epochs * epoch_multiplier) == 0:
if model.module.latent_dim == 2:
print("Calculating and visualizing dataset embedding")
# infer the number of current tasks to plot the different classes in the embedding
if args.incremental_data:
if args.cross_dataset:
num_tasks = sum(dataset.num_classes_per_task[:len(dataset.seen_tasks)])
else:
num_tasks = len(dataset.seen_tasks)
else:
num_tasks = num_classes
zs = get_latent_embedding(model, dataset.train_loader, num_tasks, device)
visualize_dataset_in_2d_embedding(writer, zs, args.dataset, save_path, task=num_tasks)
# continual learning specific part
if args.incremental_data:
# at the end of each task increment
if epoch % args.epochs == 0 and epoch > 0:
print('Saving the last checkpoint from the previous task ...')
save_task_checkpoint(save_path, epoch // args.epochs)
print("Incrementing dataset ...")
dataset.increment_tasks(model, args.batch_size, args.workers, writer, save_path,
is_gpu=torch.cuda.is_available(),
upper_bound_baseline=args.train_incremental_upper_bound,
generative_replay=args.generative_replay,
openset_generative_replay=args.openset_generative_replay,
openset_threshold=args.openset_generative_replay_threshold,
openset_tailsize=args.openset_weibull_tailsize,
autoregression=args.autoregression)
# grow the classifier and increment the variable for number of overall classes so we can use it later
if args.cross_dataset:
grow_classifier(device, model.module.classifier,
sum(dataset.num_classes_per_task[:len(dataset.seen_tasks)])
- model.module.num_classes, WeightInitializer)
model.module.num_classes = sum(dataset.num_classes_per_task[:len(dataset.seen_tasks)])
else:
model.module.num_classes += args.num_increment_tasks
grow_classifier(device, model.module.classifier, args.num_increment_tasks, WeightInitializer)
# reset moving averages etc. of the optimizer
optimizer = torch.optim.Adam(model.parameters(), args.learning_rate)
# change the number of seen classes
if epoch % args.epochs == 0:
model.module.seen_tasks = dataset.seen_tasks
# train
train(dataset, model, criterion, epoch, optimizer, writer, device, args)
# evaluate on validation set
prec, loss = validate(dataset, model, criterion, epoch, writer, device, save_path, args)
# remember best prec@1 and save checkpoint
is_best = loss < best_loss
best_loss = min(loss, best_loss)
best_prec = max(prec, best_prec)
save_checkpoint({'epoch': epoch,
'arch': args.architecture,
'state_dict': model.state_dict(),
'best_prec': best_prec,
'best_loss': best_loss,
'optimizer': optimizer.state_dict()},
is_best, save_path)
# increment epoch counters
epoch += 1
# if a new task begins reset the best prec so that new best model can be stored.
if args.incremental_data and epoch % args.epochs == 0:
best_prec = 0
best_loss = random.getrandbits(128)
writer.close()
if __name__ == '__main__':
main()
| [
"torch.cuda.is_available",
"torch.load",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.DataParallel"
] | 1.3.1 | MrtnMndt/OCDVAEContinualLearning | 2c5f778dc7f94ff696b923a84246a56c391a8eff |
0.4 | import numpy as np
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import build_model_with_cfg
from timm.models.layers import ClassifierHead, AvgPool2dSame, ConvBnAct, SEModule, DropPath
def _mcfg(**kwargs):
cfg = dict(
se_ratio=0.,
bottle_ratio=1.,
stem_width=32)
cfg.update(**kwargs)
return cfg
# Model FLOPS = three trailing digits * 10^8
model_cfgs = dict(
regnetx_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13),
regnetx_004=_mcfg(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22),
regnetx_006=_mcfg(w0=48, wa=36.97, wm=2.24, group_w=24, depth=16),
regnetx_008=_mcfg(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16),
regnetx_016=_mcfg(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18),
regnetx_032=_mcfg(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25),
regnetx_040=_mcfg(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23),
regnetx_064=_mcfg(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17),
regnetx_080=_mcfg(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23),
regnetx_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19),
regnetx_160=_mcfg(w0=216, wa=55.59, wm=2.1, group_w=128, depth=22),
regnetx_320=_mcfg(w0=320, wa=69.86, wm=2.0, group_w=168, depth=23),
regnety_002=_mcfg(w0=24, wa=36.44, wm=2.49, group_w=8, depth=13, se_ratio=0.25),
regnety_004=_mcfg(w0=48, wa=27.89, wm=2.09, group_w=8, depth=16, se_ratio=0.25),
regnety_006=_mcfg(w0=48, wa=32.54, wm=2.32, group_w=16, depth=15, se_ratio=0.25),
regnety_008=_mcfg(w0=56, wa=38.84, wm=2.4, group_w=16, depth=14, se_ratio=0.25),
regnety_016=_mcfg(w0=48, wa=20.71, wm=2.65, group_w=24, depth=27, se_ratio=0.25),
regnety_032=_mcfg(w0=80, wa=42.63, wm=2.66, group_w=24, depth=21, se_ratio=0.25),
regnety_040=_mcfg(w0=96, wa=31.41, wm=2.24, group_w=64, depth=22, se_ratio=0.25),
regnety_064=_mcfg(w0=112, wa=33.22, wm=2.27, group_w=72, depth=25, se_ratio=0.25),
regnety_080=_mcfg(w0=192, wa=76.82, wm=2.19, group_w=56, depth=17, se_ratio=0.25),
regnety_120=_mcfg(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, se_ratio=0.25),
regnety_160=_mcfg(w0=200, wa=106.23, wm=2.48, group_w=112, depth=18, se_ratio=0.25),
regnety_320=_mcfg(w0=232, wa=115.89, wm=2.53, group_w=232, depth=20, se_ratio=0.25),
)
def _cfg(url=''):
return {
'url': url,
'num_classes': 1000,
'input_size': (3, 224, 224),
'pool_size': (7, 7),
'crop_pct': 0.875,
'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv',
'classifier': 'head.fc',
}
default_cfgs = dict(
regnetx_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_002-e7e85e5c.pth'),
regnetx_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_004-7d0e9424.pth'),
regnetx_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_006-85ec1baa.pth'),
regnetx_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_008-d8b470eb.pth'),
regnetx_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_016-65ca972a.pth'),
regnetx_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_032-ed0c7f7e.pth'),
regnetx_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_040-73c2a654.pth'),
regnetx_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_064-29278baa.pth'),
regnetx_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_080-7c7fcab1.pth'),
regnetx_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_120-65d5521e.pth'),
regnetx_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_160-c98c4112.pth'),
regnetx_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnetx_320-8ea38b93.pth'),
regnety_002=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_002-e68ca334.pth'),
regnety_004=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_004-0db870e6.pth'),
regnety_006=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_006-c67e57ec.pth'),
regnety_008=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_008-dc900dbe.pth'),
regnety_016=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_016-54367f74.pth'),
regnety_032=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/regnety_032_ra-7f2439f9.pth'),
regnety_040=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_040-f0d569f9.pth'),
regnety_064=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_064-0a48325c.pth'),
regnety_080=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_080-e7f3eb93.pth'),
regnety_120=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_120-721ba79a.pth'),
regnety_160=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_160-d64013cd.pth'),
regnety_320=_cfg(url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-regnet/regnety_320-ba464b29.pth'),
)
def quantize_float(f, q):
"""Converts a float to closest non-zero int divisible by q."""
return int(round(f / q) * q)
def adjust_widths_groups_comp(widths, bottle_ratios, groups):
"""Adjusts the compatibility of widths and groups."""
bottleneck_widths = [int(w * b) for w, b in zip(widths, bottle_ratios)]
groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_widths)]
bottleneck_widths = [quantize_float(w_bot, g) for w_bot, g in zip(bottleneck_widths, groups)]
widths = [int(w_bot / b) for w_bot, b in zip(bottleneck_widths, bottle_ratios)]
return widths, groups
def generate_regnet(width_slope, width_initial, width_mult, depth, q=8):
"""Generates per block widths from RegNet parameters."""
assert width_slope >= 0 and width_initial > 0 and width_mult > 1 and width_initial % q == 0
widths_cont = np.arange(depth) * width_slope + width_initial
width_exps = np.round(np.log(widths_cont / width_initial) / np.log(width_mult))
widths = width_initial * np.power(width_mult, width_exps)
widths = np.round(np.divide(widths, q)) * q
num_stages, max_stage = len(np.unique(widths)), width_exps.max() + 1
widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
return widths, num_stages, max_stage, widths_cont
class Bottleneck(nn.Module):
""" RegNet Bottleneck
This is almost exactly the same as a ResNet Bottlneck. The main difference is the SE block is moved from
after conv3 to after conv2. Otherwise, it's just redefining the arguments for groups/bottleneck channels.
"""
def __init__(self, in_chs, out_chs, stride=1, dilation=1, bottleneck_ratio=1, group_width=1, se_ratio=0.25,
downsample=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, aa_layer=None,
drop_block=None, drop_path=None):
super(Bottleneck, self).__init__()
bottleneck_chs = int(round(out_chs * bottleneck_ratio))
groups = bottleneck_chs // group_width
cargs = dict(act_layer=act_layer, norm_layer=norm_layer, aa_layer=aa_layer, drop_block=drop_block)
self.conv1 = ConvBnAct(in_chs, bottleneck_chs, kernel_size=1, **cargs)
self.conv2 = ConvBnAct(
bottleneck_chs, bottleneck_chs, kernel_size=3, stride=stride, dilation=dilation,
groups=groups, **cargs)
if se_ratio:
se_channels = int(round(in_chs * se_ratio))
self.se = SEModule(bottleneck_chs, reduction_channels=se_channels)
else:
self.se = None
cargs['act_layer'] = None
self.conv3 = ConvBnAct(bottleneck_chs, out_chs, kernel_size=1, **cargs)
self.act3 = act_layer(inplace=True)
self.downsample = downsample
self.drop_path = drop_path
def zero_init_last_bn(self):
nn.init.zeros_(self.conv3.bn.weight)
def forward(self, x):
shortcut = x
x = self.conv1(x)
x = self.conv2(x)
if self.se is not None:
x = self.se(x)
x = self.conv3(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.downsample is not None:
shortcut = self.downsample(shortcut)
x += shortcut
x = self.act3(x)
return x
def downsample_conv(
in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None):
norm_layer = norm_layer or nn.BatchNorm2d
kernel_size = 1 if stride == 1 and dilation == 1 else kernel_size
dilation = dilation if kernel_size > 1 else 1
return ConvBnAct(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, norm_layer=norm_layer, act_layer=None)
def downsample_avg(
in_chs, out_chs, kernel_size, stride=1, dilation=1, norm_layer=None):
""" AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment."""
norm_layer = norm_layer or nn.BatchNorm2d
avg_stride = stride if dilation == 1 else 1
pool = nn.Identity()
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
return nn.Sequential(*[
pool, ConvBnAct(in_chs, out_chs, 1, stride=1, norm_layer=norm_layer, act_layer=None)])
class RegStage(nn.Module):
"""Stage (sequence of blocks w/ the same output shape)."""
def __init__(self, in_chs, out_chs, stride, dilation, depth, bottle_ratio, group_width,
block_fn=Bottleneck, se_ratio=0., drop_path_rates=None, drop_block=None):
super(RegStage, self).__init__()
block_kwargs = {} # FIXME setup to pass various aa, norm, act layer common args
first_dilation = 1 if dilation in (1, 2) else 2
for i in range(depth):
block_stride = stride if i == 0 else 1
block_in_chs = in_chs if i == 0 else out_chs
block_dilation = first_dilation if i == 0 else dilation
if drop_path_rates is not None and drop_path_rates[i] > 0.:
drop_path = DropPath(drop_path_rates[i])
else:
drop_path = None
if (block_in_chs != out_chs) or (block_stride != 1):
proj_block = downsample_conv(block_in_chs, out_chs, 1, block_stride, block_dilation)
else:
proj_block = None
name = "b{}".format(i + 1)
self.add_module(
name, block_fn(
block_in_chs, out_chs, block_stride, block_dilation, bottle_ratio, group_width, se_ratio,
downsample=proj_block, drop_block=drop_block, drop_path=drop_path, **block_kwargs)
)
def forward(self, x):
for block in self.children():
x = block(x)
return x
class RegNet(nn.Module):
"""RegNet model.
Paper: https://arxiv.org/abs/2003.13678
Original Impl: https://github.com/facebookresearch/pycls/blob/master/pycls/models/regnet.py
"""
def __init__(self, cfg, in_chans=3, num_classes=1000, output_stride=32, global_pool='avg', drop_rate=0.,
drop_path_rate=0., zero_init_last_bn=True, in_channels=3, in_size=(224, 224)):
super().__init__()
# TODO add drop block, drop path, anti-aliasing, custom bn/act args
self.in_size = in_size
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride in (8, 16, 32)
# Construct the stem
stem_width = cfg['stem_width']
self.stem = ConvBnAct(in_chans, stem_width, 3, stride=2)
self.feature_info = [dict(num_chs=stem_width, reduction=2, module='stem')]
# Construct the stages
prev_width = stem_width
curr_stride = 2
stage_params = self._get_stage_params(cfg, output_stride=output_stride, drop_path_rate=drop_path_rate)
se_ratio = cfg['se_ratio']
for i, stage_args in enumerate(stage_params):
stage_name = "s{}".format(i + 1)
self.add_module(stage_name, RegStage(prev_width, se_ratio=se_ratio, **stage_args))
prev_width = stage_args['out_chs']
curr_stride *= stage_args['stride']
self.feature_info += [dict(num_chs=prev_width, reduction=curr_stride, module=stage_name)]
# Construct the head
self.num_features = prev_width
self.head = ClassifierHead(
in_chs=prev_width, num_classes=num_classes, pool_type=global_pool, drop_rate=drop_rate)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0.0, std=0.01)
nn.init.zeros_(m.bias)
if zero_init_last_bn:
for m in self.modules():
if hasattr(m, 'zero_init_last_bn'):
m.zero_init_last_bn()
def _get_stage_params(self, cfg, default_stride=2, output_stride=32, drop_path_rate=0.):
# Generate RegNet ws per block
w_a, w_0, w_m, d = cfg['wa'], cfg['w0'], cfg['wm'], cfg['depth']
widths, num_stages, _, _ = generate_regnet(w_a, w_0, w_m, d)
# Convert to per stage format
stage_widths, stage_depths = np.unique(widths, return_counts=True)
# Use the same group width, bottleneck mult and stride for each stage
stage_groups = [cfg['group_w'] for _ in range(num_stages)]
stage_bottle_ratios = [cfg['bottle_ratio'] for _ in range(num_stages)]
stage_strides = []
stage_dilations = []
net_stride = 2
dilation = 1
for _ in range(num_stages):
if net_stride >= output_stride:
dilation *= default_stride
stride = 1
else:
stride = default_stride
net_stride *= stride
stage_strides.append(stride)
stage_dilations.append(dilation)
stage_dpr = np.split(np.linspace(0, drop_path_rate, d), np.cumsum(stage_depths[:-1]))
# Adjust the compatibility of ws and gws
stage_widths, stage_groups = adjust_widths_groups_comp(stage_widths, stage_bottle_ratios, stage_groups)
param_names = ['out_chs', 'stride', 'dilation', 'depth', 'bottle_ratio', 'group_width', 'drop_path_rates']
stage_params = [
dict(zip(param_names, params)) for params in
zip(stage_widths, stage_strides, stage_dilations, stage_depths, stage_bottle_ratios, stage_groups,
stage_dpr)]
return stage_params
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
for block in list(self.children())[:-1]:
x = block(x)
return x
def forward(self, x):
for block in self.children():
x = block(x)
return x
def _create_regnet(variant, pretrained, **kwargs):
return build_model_with_cfg(
model_cls=RegNet,
variant=variant,
pretrained=pretrained,
default_cfg=default_cfgs[variant],
model_cfg=model_cfgs[variant],
**kwargs)
def regnetx_002(pretrained=False, **kwargs):
"""RegNetX-200MF"""
return _create_regnet('regnetx_002', pretrained, **kwargs)
def regnetx_004(pretrained=False, **kwargs):
"""RegNetX-400MF"""
return _create_regnet('regnetx_004', pretrained, **kwargs)
def regnetx_006(pretrained=False, **kwargs):
"""RegNetX-600MF"""
return _create_regnet('regnetx_006', pretrained, **kwargs)
def regnetx_008(pretrained=False, **kwargs):
"""RegNetX-800MF"""
return _create_regnet('regnetx_008', pretrained, **kwargs)
def regnetx_016(pretrained=False, **kwargs):
"""RegNetX-1.6GF"""
return _create_regnet('regnetx_016', pretrained, **kwargs)
def regnetx_032(pretrained=False, **kwargs):
"""RegNetX-3.2GF"""
return _create_regnet('regnetx_032', pretrained, **kwargs)
def regnetx_040(pretrained=False, **kwargs):
"""RegNetX-4.0GF"""
return _create_regnet('regnetx_040', pretrained, **kwargs)
def regnetx_064(pretrained=False, **kwargs):
"""RegNetX-6.4GF"""
return _create_regnet('regnetx_064', pretrained, **kwargs)
def regnetx_080(pretrained=False, **kwargs):
"""RegNetX-8.0GF"""
return _create_regnet('regnetx_080', pretrained, **kwargs)
def regnetx_120(pretrained=False, **kwargs):
"""RegNetX-12GF"""
return _create_regnet('regnetx_120', pretrained, **kwargs)
def regnetx_160(pretrained=False, **kwargs):
"""RegNetX-16GF"""
return _create_regnet('regnetx_160', pretrained, **kwargs)
def regnetx_320(pretrained=False, **kwargs):
"""RegNetX-32GF"""
return _create_regnet('regnetx_320', pretrained, **kwargs)
def regnety_002(pretrained=False, **kwargs):
"""RegNetY-200MF"""
return _create_regnet('regnety_002', pretrained, **kwargs)
def regnety_004(pretrained=False, **kwargs):
"""RegNetY-400MF"""
return _create_regnet('regnety_004', pretrained, **kwargs)
def regnety_006(pretrained=False, **kwargs):
"""RegNetY-600MF"""
return _create_regnet('regnety_006', pretrained, **kwargs)
def regnety_008(pretrained=False, **kwargs):
"""RegNetY-800MF"""
return _create_regnet('regnety_008', pretrained, **kwargs)
def regnety_016(pretrained=False, **kwargs):
"""RegNetY-1.6GF"""
return _create_regnet('regnety_016', pretrained, **kwargs)
def regnety_032(pretrained=False, **kwargs):
"""RegNetY-3.2GF"""
return _create_regnet('regnety_032', pretrained, **kwargs)
def regnety_040(pretrained=False, **kwargs):
"""RegNetY-4.0GF"""
return _create_regnet('regnety_040', pretrained, **kwargs)
def regnety_064(pretrained=False, **kwargs):
"""RegNetY-6.4GF"""
return _create_regnet('regnety_064', pretrained, **kwargs)
def regnety_080(pretrained=False, **kwargs):
"""RegNetY-8.0GF"""
return _create_regnet('regnety_080', pretrained, **kwargs)
def regnety_120(pretrained=False, **kwargs):
"""RegNetY-12GF"""
return _create_regnet('regnety_120', pretrained, **kwargs)
def regnety_160(pretrained=False, **kwargs):
"""RegNetY-16GF"""
return _create_regnet('regnety_160', pretrained, **kwargs)
def regnety_320(pretrained=False, **kwargs):
"""RegNetY-32GF"""
return _create_regnet('regnety_320', pretrained, **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
regnetx_002,
regnetx_004,
regnetx_006,
regnetx_008,
regnetx_016,
regnetx_032,
regnetx_040,
regnetx_064,
regnetx_080,
regnetx_120,
regnetx_160,
regnetx_320,
regnety_002,
regnety_004,
regnety_006,
regnety_008,
regnety_016,
regnety_032,
regnety_040,
regnety_064,
regnety_080,
regnety_120,
regnety_160,
regnety_320,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx_002 or weight_count == 2684792)
assert (model != regnetx_004 or weight_count == 5157512)
assert (model != regnetx_006 or weight_count == 6196040)
assert (model != regnetx_008 or weight_count == 7259656)
assert (model != regnetx_016 or weight_count == 9190136)
assert (model != regnetx_032 or weight_count == 15296552)
assert (model != regnetx_040 or weight_count == 22118248)
assert (model != regnetx_064 or weight_count == 26209256)
assert (model != regnetx_080 or weight_count == 39572648)
assert (model != regnetx_120 or weight_count == 46106056)
assert (model != regnetx_160 or weight_count == 54278536)
assert (model != regnetx_320 or weight_count == 107811560)
assert (model != regnety_002 or weight_count == 3162996)
assert (model != regnety_004 or weight_count == 4344144)
assert (model != regnety_006 or weight_count == 6055160)
assert (model != regnety_008 or weight_count == 6263168)
assert (model != regnety_016 or weight_count == 11202430)
assert (model != regnety_032 or weight_count == 19436338)
assert (model != regnety_040 or weight_count == 20646656)
assert (model != regnety_064 or weight_count == 30583252)
assert (model != regnety_080 or weight_count == 39180068)
assert (model != regnety_120 or weight_count == 51822544)
assert (model != regnety_160 or weight_count == 83590140)
assert (model != regnety_320 or weight_count == 145046770)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
| [
"torch.nn.Identity",
"torch.nn.init.kaiming_normal_",
"torch.nn.init.ones_",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.randn"
] | 0.4.0 | sahilparekh/imgclsmob | 74d52457b4bf00c82d063b3f4a1a73fb6ba3863a |
1.7 | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .helper import *
###############################################################################
# Functions
###############################################################################
def define_G(
input_nc,
output_nc,
ngf,
netG,
n_downsample_global=3,
n_blocks_global=9,
n_local_enhancers=1,
n_blocks_local=3,
norm='instance',
init_type='normal',
init_gain=0.02,
device='cpu',
gpu_ids=[]
):
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'global':
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
elif netG == 'local':
netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, n_local_enhancers, n_blocks_local, norm_layer)
elif netG == 'encoder':
netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)
else:
raise('generator not implemented!')
return init_net(netG, init_type, init_gain, device, gpu_ids)
def define_D(
input_nc,
ndf,
n_layers_D,
num_D=1,
norm='instance',
init_type='normal',
init_gain=0.02,
use_sigmoid=False,
getIntermFeat=False,
device='cpu',
gpu_ids=[],
):
norm_layer = get_norm_layer(norm_type=norm)
netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)
return init_net(netD, init_type, init_gain, device, gpu_ids)
##############################################################################
# Losses
##############################################################################
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0, device=None):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.device = device
self.loss = nn.MSELoss() if use_lsgan else nn.BCELoss()
def forward(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or (self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = torch.FloatTensor(input.size()).fill_(self.real_label).to(self.device)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or (self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = torch.FloatTensor(input.size()).fill_(self.fake_label).to(self.device)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
class VGGLoss(nn.Module):
def __init__(self, device):
super(VGGLoss, self).__init__()
self.vgg = Vgg19()
self.vgg.to(device)
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
##############################################################################
# Generator
##############################################################################
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super().__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation
]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)
]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class GlobalGenerator(nn.Module):
def __init__(
self,
input_nc,
output_nc,
ngf=64,
n_downsampling=3,
n_blocks=9,
norm_layer=nn.BatchNorm2d,
padding_type='reflect'
):
assert(n_blocks >= 0)
super().__init__()
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
nn.ReLU(True)
]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True)
]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=nn.ReLU(True), norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)
]
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
class LocalEnhancer(nn.Module):
def __init__(
self,
input_nc,
output_nc,
ngf=32,
n_downsample_global=3,
n_blocks_global=9,
n_local_enhancers=1,
n_blocks_local=3,
norm_layer=nn.BatchNorm2d,
padding_type='reflect'
):
super().__init__()
self.n_local_enhancers = n_local_enhancers
###### global generator model #####
ngf_global = ngf * (2**n_local_enhancers)
model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model
model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers
self.globalG = nn.Sequential(*model_global)
###### local enhancer layers #####
for n in range(1, n_local_enhancers+1):
### downsample
ngf_global = ngf * (2**(n_local_enhancers-n))
model_downsample = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
norm_layer(ngf_global),
nn.ReLU(True),
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_global * 2),
nn.ReLU(True)
]
### residual blocks
model_upsample = []
for i in range(n_blocks_local):
model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
### upsample
model_upsample += [
nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_global),
nn.ReLU(True)
]
### final convolution
if n == n_local_enhancers:
model_upsample += [
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
setattr(self, f'localG_{n}_F', nn.Sequential(*model_downsample))
setattr(self, f'localG_{n}_B', nn.Sequential(*model_upsample))
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
### create input pyramid
input_downsampled = [input]
for i in range(self.n_local_enhancers):
input_downsampled.append(self.downsample(input_downsampled[-1]))
### output at coarest level
output_prev = self.globalG(input_downsampled[-1])
### build up one layer at a time
for n_local_enhancers in range(1, self.n_local_enhancers+1):
model_downsample = getattr(self, f'localG_{n_local_enhancers}_F')
model_upsample = getattr(self, f'localG_{n_local_enhancers}_B')
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
output_prev = model_upsample(model_downsample(input_i) + output_prev)
return output_prev
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
nn.ReLU(True)
]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [
nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2),
nn.ReLU(True)
]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)
]
model += [
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, image, inst):
outputs = self.model(image)
# instance-wise average pooling
outputs_mean = outputs.clone()
inst_list = np.unique(inst.cpu().numpy().astype(int))
for i in inst_list:
for b in range(image.size()[0]):
indices = (inst[b:b+1] == int(i)).nonzero() # n x 4
for j in range(self.output_nc):
output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]
mean_feat = torch.mean(output_ins).expand_as(output_ins)
outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat
return outputs_mean
##############################################################################
# Discriminator
##############################################################################
class MultiscaleDiscriminator(nn.Module):
def __init__(
self,
input_nc,
ndf=64,
n_layers=3,
norm_layer=nn.BatchNorm2d,
use_sigmoid=False,
num_D=3,
getIntermFeat=False
):
super().__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(
self,
input_nc,
ndf=64,
n_layers=3,
norm_layer=nn.BatchNorm2d,
use_sigmoid=False,
getIntermFeat=False
):
super().__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super().__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| [
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.nn.ReplicationPad2d",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.Tanh",
"torch.autograd.Variable",
"torch.nn.ConvTranspose2d",
"torch.nn.LeakyReLU",
"torch.nn.Sigmoid",
"torch.nn.L1Loss",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.ReflectionPad2d",
"torch.nn.BCELoss",
"torch.mean"
] | 1.7.1 | bruceli-rw0/edge2pic-generation | e9ee6f89361d1a12b044c0ab665a09fca4a47089 |
1.2 | from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
import numpy as np
import networkx as nx
import random
import pickle as pk
import torch
import torch.nn.functional as F
class Node:
def __init__(self, node, embedding, features, walk):
self.node = node
self.embedding = embedding
self.features = features
self.walk = walk
class InstagramDataset(Dataset):
def __init__(self, graph, features):
self.graph = graph
self.features = features
def __getitem__(self, index):
nodes = random.choice(self.features)
return torch.tensor(nodes[0]), torch.tensor(nodes[1]).float(), torch.tensor(nodes[2]), torch.tensor(nodes[3]).float(), nodes[4]
def __len__(self):
return len(self.graph)
def split_dataset(dataset, batch_size, validation_split):
"""
Generates training and validation splits
Arguments:
dataset -- A InstagramDataset object dataset, based torch's class Dataset
batch_size -- size of the batch of the datasets
validation_split -- percentage of the dataset that will be used in validation.
Return:
train_dataloader -- training torch dataloader
test_dataloader -- test torch dataloader
"""
# Creating data indexes for training and validation splits:
dataset_size = len(dataset)
indexes = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
np.random.shuffle(indexes)
train_indexes, val_indexes = indexes[split:], indexes[:split]
# Creating data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indexes)
valid_sampler = SubsetRandomSampler(val_indexes)
train_dataloader = DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler, num_workers=8,)
validation_dataloader = DataLoader(dataset, batch_size=1,
sampler=valid_sampler, num_workers=8)
return train_dataloader, validation_dataloader
def get_features(node_features, no_features):
"""
For a given node, returns its features shaped to the convolutional matrix features
Arguments:
node_features -- list of lists containing the features of a node
no_features -- Which set of features will be used
Return:
np array containing the features of a node
"""
if(no_features==1):
return node_features.embedding
features = np.concatenate((node_features.features))
if(no_features==2):
return np.concatenate((node_features.embedding, features))
else:
walk = np.concatenate((node_features.walk[0], node_features.walk[1], node_features.walk[2]))
return np.concatenate((node_features.embedding, features, walk))
def sample_graph_features(graph, graph_features, no_edges, no_features=1, siamese=0):
"""
Generates sampled nodes to train the models.
Arguments:
graph -- graph file used.
graph_features -- A list where the indexes are the node's id and the values are the node'srepresentation
no_edges -- no_edges of each class to be sampled
no_features -- Which set of features will be used.
siamese -- 1 If the dataset is for a siamese network, else 0
Return:
sampled_graph -- a list with 2*no_edges pairs of nodes (no_edges adjacent and no_edges non adjacent nodes)
"""
sampled_graph = []
edges = list(graph.edges)
nodes = list(graph.nodes)
for i in range(no_edges):
r = np.random.randint(0,len(edges) - 1)
node1_pos = edges[r][0]
node2_pos = edges[r][1]
node1_pos_features = get_features(graph_features[node1_pos], no_features)
node2_pos_features = get_features(graph_features[node2_pos], no_features)
sampled_graph.append([node1_pos, node1_pos_features, node2_pos, node2_pos_features, 1])
node1_neg = nodes[np.random.randint(0,len(nodes) - 1)]
node2_neg = nodes[np.random.randint(0,len(nodes) - 1)]
while(graph.has_edge(node1_neg, node2_neg)):
node1_neg = nodes[np.random.randint(0,len(nodes) - 1)]
node2_neg = nodes[np.random.randint(0,len(nodes) - 1)]
node1_neg_features = get_features(graph_features[node1_neg], no_features)
node2_neg_features = get_features(graph_features[node2_neg], no_features)
neg_edge = -1 if (siamese == 1) else 0
sampled_graph.append([node1_neg, node1_neg_features, node2_neg, node2_neg_features, neg_edge])
return sampled_graph
def gcn_features(graph, graph_features, no_features, size):
"""
Generates the matrix features used on convolutional models.
Arguments:
graph -- graph file used.
graph_features -- A list where the indexes are the node's id and the values are the node'srepresentation
no_features -- Which set of features will be used.
size -- size of the feature array
Return:
features -- A special matrix, mode of numpy arrays, of features used on convolutional models, similar to the graph_features
"""
nodes = list(graph.nodes)
features = np.zeros((len(nodes),size))
for i in nodes:
features[i] = get_features(graph_features[i], no_features)
return features
def generate_dataset(graph_name, no_edges, no_features, siamese=0):
"""
Generates all the necessary data to train the models.
Arguments:
graph_name -- Name of the graph file used.
no_edges -- No. of edges that will be sampled to the dataset
no_features -- Which set of features will be used.
siamese -- 1 If the dataset is for a siamese network, else 0
Return:
dataset -- A InstagramDataset object dataset, based torch's class Dataset
graph_features -- A list where the indexes are the node's id and the values are a list of lists with node's representation
edge_index -- A COO adjacency matrix of the graph
features -- A special matrix of features used on convolutional models, similar to the graph_features
"""
print('Generating dataset... ', end='\r')
file = open(graph_name, 'rb')
graph = pk.load(file)
file.close()
file = open(graph_name+'_features', 'rb')
full_graph_features = pk.load(file)
file.close()
graph = graph.to_directed()
graph = nx.convert_node_labels_to_integers(graph)
graph_features = sample_graph_features(graph, full_graph_features, no_edges, no_features, siamese)
dataset = InstagramDataset(graph, graph_features)
edge_index = torch.tensor(list(graph.edges)).t().contiguous()
features = gcn_features(graph, full_graph_features, no_features, len(graph_features[0][1]))
print('Dataset ok! ')
return dataset, graph_features, edge_index, features | [
"torch.utils.data.SubsetRandomSampler",
"torch.tensor",
"torch.utils.data.DataLoader"
] | 1.2.0 | ggapp1/microinfl-instagram | e3fefbc09f9ee1bc5010618ccae647e4d763f503 |
1.8 | import numpy as np
import torch
from torch import nn
import os
## Network functions
# Model
class Net(nn.Module):
"""
A class for a deep neural net architecture.
Parameters
----------
in_dim: int
Input dimension.
out_dim: int
Output dimension.
hidden_size: int, default = 10
Number of nodes in every hidden layer.
n_hidden: int, default = 2
Number of hidden layers
activation: ACTIVATION, default = torch.nn.ReLU()
Activation function to be used by the hidden layers.
bias: bool, default = False
A boolean indicating if a bias shall be added.
bn: bool, default = False
A boolean indicating if batch norm shall be applied.
"""
def __init__(
self,
in_dim,
out_dim,
hidden_size=10,
n_hidden=2,
activation=torch.nn.ReLU(),
bias=False,
bn=False,
):
super(Net, self).__init__()
module = nn.ModuleList()
module.append(nn.Linear(in_dim, hidden_size, bias=bias))
for ll in range(n_hidden):
module.append(activation)
if bn:
module.append(nn.BatchNorm1d(hidden_size))
module.append(nn.Linear(hidden_size, hidden_size, bias=bias))
module.append(activation)
if bn:
module.append(nn.BatchNorm1d(hidden_size))
module.append(nn.Linear(hidden_size, out_dim, bias=bias))
self.sequential = nn.Sequential(*module)
def forward(self, x):
return self.sequential(x)
## functions
def weight_reset(m):
"""
Reinitializes parameters of a model [m] according to default initialization scheme.
"""
if isinstance(m, torch.nn.Conv2d) or isinstance(m, torch.nn.Linear):
m.reset_parameters()
def train_model(model, train_x, train_y, multi_label=False, verbose=False):
"""
Performs training of a model given training data.
Parameters
----------
model : Net
A deep neural net model to be trained
train_x : Tensor
Training features
train_y: Tensor
Training labels
multi_label: bool, default = False
A boolean indicating if it is a multi-label classification
verbose: bool, default = False
A boolean indicating the production of detailed logging information during training
Returns
-------
losses : Tensor
The accumulated losses during training.
"""
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
loss_func = torch.nn.BCEWithLogitsLoss()
losses = []
for step in range(1000):
optimizer.zero_grad()
outputs = model(train_x)
if multi_label:
train_y = train_y.type_as(outputs)
loss = loss_func(outputs, train_y)
trainL = loss.detach().item()
if verbose and (step % 500 == 0):
print("train loss = ", trainL)
losses.append(trainL)
loss.backward()
optimizer.step()
return losses
def get_model(
in_dim=2,
out_dim=1,
hidden_size=20,
n_hidden=5,
activation=torch.nn.ReLU(),
bias=True,
bn=False,
use_gpu=True,
):
"""
Initializes the deep neural net model and send to gpu
Parameters
----------
in_dim: int
Input dimension.
out_dim: int
Output dimension.
hidden_size: int, default = 10
Number of nodes in every hidden layer
n_hidden: int, default = 2
Number of hidden layers
activation: ACTIVATION, default = torch.nn.ReLU()
Activation function to be used by the hidden layers
bias: bool, default = False
A boolean indicating if a bias shall be added
bn: bool, default = False
A boolean indicating if batch norm shall be applied
use_gpu: bool, default = True
A boolean indicating if a gpu is available
Returns
-------
model : Net
A deep neural net model
"""
model = Net(
in_dim,
out_dim,
n_hidden=n_hidden,
hidden_size=hidden_size,
activation=activation,
bias=bias,
bn=bn,
)
if use_gpu:
model = model.cuda()
return model
| [
"torch.nn.Linear",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.nn.BCEWithLogitsLoss"
] | 1.8.1 | rflperry/double_descent | 5001613791b3bbfa77c86f8426458253e8989bea |
1.3 | ''' DQN agent
The code is derived from https://github.com/dennybritz/reinforcement-learning/blob/master/DQN/dqn.py
Copyright (c) 2019 Matthew Judell
Copyright (c) 2019 DATA Lab at Texas A&M University
Copyright (c) 2016 Denny Britz
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import numpy as np
import torch
import torch.nn as nn
from collections import namedtuple
from copy import deepcopy
from rlcard3.agents.dqn_agent import Memory
from rlcard3.utils.utils import remove_illegal
Transition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'done'])
class DQNAgent(object):
'''
Approximate clone of rlcard3.agents.dqn_agent.DQNAgent
that depends on PyTorch instead of Tensorflow
'''
def __init__(self,
scope,
replay_memory_size=20000,
replay_memory_init_size=100,
update_target_estimator_every=1000,
discount_factor=0.99,
epsilon_start=1.0,
epsilon_end=0.1,
epsilon_decay_steps=20000,
batch_size=32,
action_num=2,
state_shape=None,
train_every=1,
mlp_layers=None,
learning_rate=0.00005,
device=None):
'''
Q-Learning algorithm for off-policy TD control using Function Approximation.
Finds the optimal greedy policy while following an epsilon-greedy policy.
Args:
scope (str): The name of the DQN agent
replay_memory_size (int): Size of the replay memory
replay_memory_init_size (int): Number of random experiences to sampel when initializing
the reply memory.
update_target_estimator_every (int): Copy parameters from the Q estimator to the
target estimator every N steps
discount_factor (float): Gamma discount factor
epsilon_start (int): Chance to sample a random action when taking an action.
Epsilon is decayed over time and this is the start value
epsilon_end (int): The final minimum value of epsilon after decaying is done
epsilon_decay_steps (int): Number of steps to decay epsilon over
batch_size (int): Size of batches to sample from the replay memory
evaluate_every (int): Evaluate every N steps
action_num (int): The number of the actions
state_space (list): The space of the state vector
train_every (int): Train the network every X steps.
mlp_layers (list): The layer number and the dimension of each layer in MLP
learning_rate (float): The learning rate of the DQN agent.
device (torch.device): whether to use the cpu or gpu
'''
self.use_raw = False
self.scope = scope
self.replay_memory_init_size = replay_memory_init_size
self.update_target_estimator_every = update_target_estimator_every
self.discount_factor = discount_factor
self.epsilon_decay_steps = epsilon_decay_steps
self.batch_size = batch_size
self.action_num = action_num
self.train_every = train_every
# Torch device
if device is None:
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
else:
self.device = device
# Total timesteps
self.total_t = 0
# Total training step
self.train_t = 0
# The epsilon decay scheduler
self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)
# Create estimators
self.q_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \
mlp_layers=mlp_layers, device=self.device)
self.target_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \
mlp_layers=mlp_layers, device=self.device)
# Create replay memory
self.memory = Memory(replay_memory_size, batch_size)
def feed(self, ts):
''' Store data in to replay buffer and train the agent. There are two stages.
In stage 1, populate the memory without training
In stage 2, train the agent every several timesteps
Args:
ts (list): a list of 5 elements that represent the transition
'''
(state, action, reward, next_state, done) = tuple(ts)
self.feed_memory(state['obs'], action, reward, next_state['obs'], done)
self.total_t += 1
tmp = self.total_t - self.replay_memory_init_size
if tmp>=0 and tmp%self.train_every == 0:
self.train()
def step(self, state):
''' Predict the action for genrating training data but
have the predictions disconnected from the computation graph
Args:
state (numpy.array): current state
Returns:
action (int): an action id
'''
A = self.predict(state['obs'])
A = remove_illegal(A, state['legal_actions'])
action = np.random.choice(np.arange(len(A)), p=A)
return action
def eval_step(self, state):
''' Predict the action for evaluation purpose.
Args:
state (numpy.array): current state
Returns:
action (int): an action id
'''
q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]
probs = remove_illegal(np.exp(q_values), state['legal_actions'])
best_action = np.argmax(probs)
return best_action, probs
def predict(self, state):
''' Predict the action probabilities but have them
disconnected from the computation graph
Args:
state (numpy.array): current state
Returns:
q_values (numpy.array): a 1-d array where each entry represents a Q value
'''
epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]
A = np.ones(self.action_num, dtype=float) * epsilon / self.action_num
q_values = self.q_estimator.predict_nograd(np.expand_dims(state, 0))[0]
best_action = np.argmax(q_values)
A[best_action] += (1.0 - epsilon)
return A
def train(self):
''' Train the network
Returns:
loss (float): The loss of the current batch.
'''
state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample()
# Calculate best next actions using Q-network (Double DQN)
q_values_next = self.q_estimator.predict_nograd(next_state_batch)
best_actions = np.argmax(q_values_next, axis=1)
# Evaluate best next actions using Target-network (Double DQN)
q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)
target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \
self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]
# Perform gradient descent update
state_batch = np.array(state_batch)
loss = self.q_estimator.update(state_batch, action_batch, target_batch)
print('\rINFO - Agent {}, step {}, rl-loss: {}'.format(self.scope, self.total_t, loss), end='')
# Update the target estimator
if self.train_t % self.update_target_estimator_every == 0:
self.target_estimator = deepcopy(self.q_estimator)
print("\nINFO - Copied model parameters to target network.")
self.train_t += 1
def feed_memory(self, state, action, reward, next_state, done):
''' Feed transition to memory
Args:
state (numpy.array): the current state
action (int): the performed action ID
reward (float): the reward received
next_state (numpy.array): the next state after performing the action
done (boolean): whether the episode is finished
'''
self.memory.save(state, action, reward, next_state, done)
def get_state_dict(self):
''' Get the state dict to save models
Returns:
(dict): A dict of model states
'''
q_key = self.scope + '_q_estimator'
q_value = self.q_estimator.qnet.state_dict()
target_key = self.scope + '_target_estimator'
target_value = self.target_estimator.qnet.state_dict()
return {q_key: q_value, target_key: target_value}
def load(self, checkpoint):
''' Load model
Args:
checkpoint (dict): the loaded state
'''
q_key = self.scope + '_q_estimator'
self.q_estimator.qnet.load_state_dict(checkpoint[q_key])
target_key = self.scope + '_target_estimator'
self.target_estimator.qnet.load_state_dict(checkpoint[target_key])
class Estimator(object):
'''
Approximate clone of rlcard3.agents.dqn_agent.Estimator that
uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.
Q-Value Estimator neural network.
This network is used for both the Q-Network and the Target Network.
'''
def __init__(self, action_num=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):
''' Initilalize an Estimator object.
Args:
action_num (int): the number output actions
state_shape (list): the shape of the state space
mlp_layers (list): size of outputs of mlp layers
device (torch.device): whether to use cpu or gpu
'''
self.action_num = action_num
self.learning_rate=learning_rate
self.state_shape = state_shape
self.mlp_layers = mlp_layers
self.device = device
# set up Q model and place it in eval mode
qnet = EstimatorNetwork(action_num, state_shape, mlp_layers)
qnet = qnet.to(self.device)
self.qnet = qnet
self.qnet.eval()
# initialize the weights using Xavier init
for p in self.qnet.parameters():
if len(p.data.shape) > 1:
nn.init.xavier_uniform_(p.data)
# set up loss function
self.mse_loss = nn.MSELoss(reduction='mean')
# set up optimizer
self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)
def predict_nograd(self, s):
''' Predicts action values, but prediction is not included
in the computation graph. It is used to predict optimal next
actions in the Double-DQN algorithm.
Args:
s (np.ndarray): (batch, state_len)
Returns:
np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated
action values.
'''
with torch.no_grad():
s = torch.from_numpy(s).float().to(self.device)
q_as = self.qnet(s).cpu().numpy()
return q_as
def update(self, s, a, y):
''' Updates the estimator towards the given targets.
In this case y is the target-network estimated
value of the Q-network optimal actions, which
is labeled y in Algorithm 1 of Minh et al. (2015)
Args:
s (np.ndarray): (batch, state_shape) state representation
a (np.ndarray): (batch,) integer sampled actions
y (np.ndarray): (batch,) value of optimal actions according to Q-target
Returns:
The calculated loss on the batch.
'''
self.optimizer.zero_grad()
self.qnet.train()
s = torch.from_numpy(s).float().to(self.device)
a = torch.from_numpy(a).long().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
# (batch, state_shape) -> (batch, action_num)
q_as = self.qnet(s)
# (batch, action_num) -> (batch, )
Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)
# update model
batch_loss = self.mse_loss(Q, y)
batch_loss.backward()
self.optimizer.step()
batch_loss = batch_loss.item()
self.qnet.eval()
return batch_loss
class EstimatorNetwork(nn.Module):
''' The function approximation network for Estimator
It is just a series of tanh layers. All in/out are torch.tensor
'''
def __init__(self, action_num=2, state_shape=None, mlp_layers=None):
''' Initialize the Q network
Args:
action_num (int): number of legal actions
state_shape (list): shape of state tensor
mlp_layers (list): output size of each fc layer
'''
super(EstimatorNetwork, self).__init__()
self.action_num = action_num
self.state_shape = state_shape
self.mlp_layers = mlp_layers
# build the Q network
layer_dims = [np.prod(self.state_shape)] + self.mlp_layers
fc = [nn.Flatten()]
fc.append(nn.BatchNorm1d(layer_dims[0]))
for i in range(len(layer_dims)-1):
fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))
fc.append(nn.Tanh())
fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))
self.fc_layers = nn.Sequential(*fc)
def forward(self, s):
''' Predict action values
Args:
s (Tensor): (batch, state_shape)
'''
return self.fc_layers(s)
| [
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.Flatten",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.nn.init.xavier_uniform_",
"torch.from_numpy",
"torch.nn.BatchNorm1d"
] | 1.3 | cogitoergoread/muszi-macrohard.hu | e9bbd36b789e670f96622a3a2ba8327f0d897561 |
1.4 | #!/usr/bin/env python
""" Translator Class and builder """
from __future__ import print_function
import codecs
import os
import time
import numpy as np
from itertools import count, zip_longest
import torch
import onmt.model_builder
import onmt.inputters as inputters
import onmt.decoders.ensemble
from onmt.translate.beam_search import BeamSearch
from onmt.translate.greedy_search import GreedySearch
from onmt.utils.misc import tile, set_random_seed, report_matrix
from onmt.utils.alignment import extract_alignment, build_align_pharaoh
from onmt.modules.copy_generator import collapse_copy_scores
def build_translator(opt, report_score=True, logger=None, out_file=None):
if out_file is None:
out_file = codecs.open(opt.output, 'w+', 'utf-8')
load_test_model = onmt.decoders.ensemble.load_test_model \
if len(opt.models) > 1 else onmt.model_builder.load_test_model
fields, model, model_opt = load_test_model(opt)
scorer = onmt.translate.GNMTGlobalScorer.from_opt(opt)
translator = Translator.from_opt(
model,
fields,
opt,
model_opt,
global_scorer=scorer,
out_file=out_file,
report_align=opt.report_align,
report_score=report_score,
logger=logger
)
model.decoder.set_eval_status(True)
return translator
def max_tok_len(new, count, sofar):
"""
In token batching scheme, the number of sequences is limited
such that the total number of src/tgt tokens (including padding)
in a batch <= batch_size
"""
# Maintains the longest src and tgt length in the current batch
global max_src_in_batch # this is a hack
# Reset current longest length at a new batch (count=1)
if count == 1:
max_src_in_batch = 0
# max_tgt_in_batch = 0
# Src: [<bos> w1 ... wN <eos>]
max_src_in_batch = max(max_src_in_batch, len(new.src[0]) + 2)
# Tgt: [w1 ... wM <eos>]
src_elements = count * max_src_in_batch
return src_elements
class Translator(object):
"""Translate a batch of sentences with a saved model.
Args:
model (onmt.modules.NMTModel): NMT model to use for translation
fields (dict[str, torchtext.data.Field]): A dict
mapping each side to its list of name-Field pairs.
src_reader (onmt.inputters.DataReaderBase): Source reader.
tgt_reader (onmt.inputters.TextDataReader): Target reader.
gpu (int): GPU device. Set to negative for no GPU.
n_best (int): How many beams to wait for.
min_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
max_length (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
beam_size (int): Number of beams.
random_sampling_topk (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
random_sampling_temp (int): See
:class:`onmt.translate.greedy_search.GreedySearch`.
stepwise_penalty (bool): Whether coverage penalty is applied every step
or not.
dump_beam (bool): Debugging option.
block_ngram_repeat (int): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
ignore_when_blocking (set or frozenset): See
:class:`onmt.translate.decode_strategy.DecodeStrategy`.
replace_unk (bool): Replace unknown token.
data_type (str): Source data type.
verbose (bool): Print/log every translation.
report_time (bool): Print/log total time/frequency.
copy_attn (bool): Use copy attention.
global_scorer (onmt.translate.GNMTGlobalScorer): Translation
scoring/reranking object.
out_file (TextIO or codecs.StreamReaderWriter): Output file.
report_score (bool) : Whether to report scores
logger (logging.Logger or NoneType): Logger.
"""
def __init__(
self,
model,
fields,
src_reader,
tgt_reader,
gpu=-1,
n_best=1,
min_length=0,
max_length=100,
ratio=0.,
beam_size=30,
random_sampling_topk=1,
random_sampling_temp=1,
stepwise_penalty=None,
dump_beam=False,
block_ngram_repeat=0,
ignore_when_blocking=frozenset(),
replace_unk=False,
phrase_table="",
data_type="text",
verbose=False,
report_time=False,
copy_attn=False,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
logger=None,
seed=-1):
self.model = model
self.fields = fields
tgt_field = dict(self.fields)["tgt"].base_field
self._tgt_vocab = tgt_field.vocab
self._tgt_eos_idx = self._tgt_vocab.stoi[tgt_field.eos_token]
self._tgt_pad_idx = self._tgt_vocab.stoi[tgt_field.pad_token]
self._tgt_bos_idx = self._tgt_vocab.stoi[tgt_field.init_token]
self._tgt_unk_idx = self._tgt_vocab.stoi[tgt_field.unk_token]
self._tgt_vocab_len = len(self._tgt_vocab)
self._gpu = gpu
self._use_cuda = gpu > -1
self._dev = torch.device("cuda", self._gpu) \
if self._use_cuda else torch.device("cpu")
self.n_best = n_best
self.max_length = max_length
self.beam_size = beam_size
self.random_sampling_temp = random_sampling_temp
self.sample_from_topk = random_sampling_topk
self.min_length = min_length
self.ratio = ratio
self.stepwise_penalty = stepwise_penalty
self.dump_beam = dump_beam
self.block_ngram_repeat = block_ngram_repeat
self.ignore_when_blocking = ignore_when_blocking
self._exclusion_idxs = {
self._tgt_vocab.stoi[t] for t in self.ignore_when_blocking}
self.src_reader = src_reader
self.tgt_reader = tgt_reader
self.replace_unk = replace_unk
if self.replace_unk and not self.model.decoder.attentional:
raise ValueError(
"replace_unk requires an attentional decoder.")
self.phrase_table = phrase_table
self.data_type = data_type
self.verbose = verbose
self.report_time = report_time
self.copy_attn = copy_attn
self.global_scorer = global_scorer
if self.global_scorer.has_cov_pen and \
not self.model.decoder.attentional:
raise ValueError(
"Coverage penalty requires an attentional decoder.")
self.out_file = out_file
self.report_align = report_align
self.report_score = report_score
self.logger = logger
self.use_filter_pred = False
self._filter_pred = None
# for debugging
self.beam_trace = self.dump_beam != ""
self.beam_accum = None
if self.beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
set_random_seed(seed, self._use_cuda)
@classmethod
def from_opt(
cls,
model,
fields,
opt,
model_opt,
global_scorer=None,
out_file=None,
report_align=False,
report_score=True,
logger=None):
"""Alternate constructor.
Args:
model (onmt.modules.NMTModel): See :func:`__init__()`.
fields (dict[str, torchtext.data.Field]): See
:func:`__init__()`.
opt (argparse.Namespace): Command line options
model_opt (argparse.Namespace): Command line options saved with
the model checkpoint.
global_scorer (onmt.translate.GNMTGlobalScorer): See
:func:`__init__()`..
out_file (TextIO or codecs.StreamReaderWriter): See
:func:`__init__()`.
report_align (bool) : See :func:`__init__()`.
report_score (bool) : See :func:`__init__()`.
logger (logging.Logger or NoneType): See :func:`__init__()`.
"""
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
return cls(
model,
fields,
src_reader,
tgt_reader,
gpu=opt.gpu,
n_best=opt.n_best,
min_length=opt.min_length,
max_length=opt.max_length,
ratio=opt.ratio,
beam_size=opt.beam_size,
random_sampling_topk=opt.random_sampling_topk,
random_sampling_temp=opt.random_sampling_temp,
stepwise_penalty=opt.stepwise_penalty,
dump_beam=opt.dump_beam,
block_ngram_repeat=opt.block_ngram_repeat,
ignore_when_blocking=set(opt.ignore_when_blocking),
replace_unk=opt.replace_unk,
phrase_table=opt.phrase_table,
data_type=opt.data_type,
verbose=opt.verbose,
report_time=opt.report_time,
copy_attn=model_opt.copy_attn,
global_scorer=global_scorer,
out_file=out_file,
report_align=report_align,
report_score=report_score,
logger=logger,
seed=opt.seed)
def _log(self, msg):
if self.logger:
self.logger.info(msg)
else:
print(msg)
def _gold_score(self, batch, memory_bank, src_lengths, src_vocabs,
use_src_map, enc_states, batch_size, src):
if "tgt" in batch.__dict__:
gs = self._score_target(
batch, memory_bank, src_lengths, src_vocabs,
batch.src_map if use_src_map else None)
self.model.decoder.init_state(src, memory_bank, enc_states)
else:
gs = [0] * batch_size
return gs
def translate(
self,
src,
tgt=None,
src_dir=None,
batch_size=None,
batch_type="sents",
attn_debug=False,
align_debug=False,
phrase_table=""):
"""Translate content of ``src`` and get gold scores from ``tgt``.
Args:
src: See :func:`self.src_reader.read()`.
tgt: See :func:`self.tgt_reader.read()`.
src_dir: See :func:`self.src_reader.read()` (only relevant
for certain types of data).
batch_size (int): size of examples per mini-batch
attn_debug (bool): enables the attention logging
align_debug (bool): enables the word alignment logging
Returns:
(`list`, `list`)
* all_scores is a list of `batch_size` lists of `n_best` scores
* all_predictions is a list of `batch_size` lists
of `n_best` predictions
"""
if batch_size is None:
raise ValueError("batch_size must be set")
src_data = {"reader": self.src_reader, "data": src, "dir": src_dir}
tgt_data = {"reader": self.tgt_reader, "data": tgt, "dir": None}
_readers, _data, _dir = inputters.Dataset.config(
[('src', src_data), ('tgt', tgt_data)])
data = inputters.Dataset(
self.fields, readers=_readers, data=_data, dirs=_dir,
sort_key=inputters.str2sortkey[self.data_type],
filter_pred=self._filter_pred
)
data_iter = inputters.OrderedIterator(
dataset=data,
device=self._dev,
batch_size=batch_size,
batch_size_fn=max_tok_len if batch_type == "tokens" else None,
train=False,
sort=False,
sort_within_batch=True,
shuffle=False
)
xlation_builder = onmt.translate.TranslationBuilder(
data, self.fields, self.n_best, self.replace_unk, tgt,
self.phrase_table
)
# Statistics
counter = count(1)
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
all_scores = []
all_predictions = []
start_time = time.time()
for batch in data_iter:
batch_data = self.translate_batch(
batch, data.src_vocabs, attn_debug
)
translations = xlation_builder.from_batch(batch_data)
for trans in translations:
all_scores += [trans.pred_scores[:self.n_best]]
pred_score_total += trans.pred_scores[0]
pred_words_total += len(trans.pred_sents[0])
if tgt is not None:
gold_score_total += trans.gold_score
gold_words_total += len(trans.gold_sent) + 1
n_best_preds = [" ".join(pred)
for pred in trans.pred_sents[:self.n_best]]
if self.report_align:
align_pharaohs = [build_align_pharaoh(align) for align
in trans.word_aligns[:self.n_best]]
n_best_preds_align = [" ".join(align) for align
in align_pharaohs]
n_best_preds = [pred + " ||| " + align
for pred, align in zip(
n_best_preds, n_best_preds_align)]
all_predictions += [n_best_preds]
self.out_file.write('\n'.join(n_best_preds) + '\n')
self.out_file.flush()
if self.verbose:
sent_number = next(counter)
output = trans.log(sent_number)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if attn_debug:
preds = trans.pred_sents[0]
preds.append('</s>')
attns = trans.attns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(attns[0]))]
output = report_matrix(srcs, preds, attns)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
if align_debug:
if trans.gold_sent is not None:
tgts = trans.gold_sent
else:
tgts = trans.pred_sents[0]
align = trans.word_aligns[0].tolist()
if self.data_type == 'text':
srcs = trans.src_raw
else:
srcs = [str(item) for item in range(len(align[0]))]
output = report_matrix(srcs, tgts, align)
if self.logger:
self.logger.info(output)
else:
os.write(1, output.encode('utf-8'))
end_time = time.time()
if self.report_score:
msg = self._report_score('PRED', pred_score_total,
pred_words_total)
self._log(msg)
if tgt is not None:
msg = self._report_score('GOLD', gold_score_total,
gold_words_total)
self._log(msg)
if self.report_time:
total_time = end_time - start_time
self._log("Total translation time (s): %f" % total_time)
self._log("Average translation time (s): %f" % (
total_time / len(all_predictions)))
self._log("Tokens per second: %f" % (
pred_words_total / total_time))
if self.dump_beam:
import json
json.dump(self.translator.beam_accum,
codecs.open(self.dump_beam, 'w', 'utf-8'))
return all_scores, all_predictions
def _align_pad_prediction(self, predictions, bos, pad):
"""
Padding predictions in batch and add BOS.
Args:
predictions (List[List[Tensor]]): `(batch, n_best,)`, for each src
sequence contain n_best tgt predictions all of which ended with
eos id.
bos (int): bos index to be used.
pad (int): pad index to be used.
Return:
batched_nbest_predict (torch.LongTensor): `(batch, n_best, tgt_l)`
"""
dtype, device = predictions[0][0].dtype, predictions[0][0].device
flatten_tgt = [best.tolist() for bests in predictions
for best in bests]
paded_tgt = torch.tensor(
list(zip_longest(*flatten_tgt, fillvalue=pad)),
dtype=dtype, device=device).T
bos_tensor = torch.full([paded_tgt.size(0), 1], bos,
dtype=dtype, device=device)
full_tgt = torch.cat((bos_tensor, paded_tgt), dim=-1)
batched_nbest_predict = full_tgt.view(
len(predictions), -1, full_tgt.size(-1)) # (batch, n_best, tgt_l)
return batched_nbest_predict
def _align_forward(self, batch, predictions):
"""
For a batch of input and its prediction, return a list of batch predict
alignment src indice Tensor in size ``(batch, n_best,)``.
"""
# (0) add BOS and padding to tgt prediction
if hasattr(batch, 'tgt'):
batch_tgt_idxs = batch.tgt.transpose(1, 2).transpose(0, 2)
else:
batch_tgt_idxs = self._align_pad_prediction(
predictions, bos=self._tgt_bos_idx, pad=self._tgt_pad_idx)
tgt_mask = (batch_tgt_idxs.eq(self._tgt_pad_idx) |
batch_tgt_idxs.eq(self._tgt_eos_idx) |
batch_tgt_idxs.eq(self._tgt_bos_idx))
n_best = batch_tgt_idxs.size(1)
# (1) Encoder forward.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
# (2) Repeat src objects `n_best` times.
# We use batch_size x n_best, get ``(src_len, batch * n_best, nfeat)``
src = tile(src, n_best, dim=1)
enc_states = tile(enc_states, n_best, dim=1)
if isinstance(memory_bank, tuple):
memory_bank = tuple(tile(x, n_best, dim=1) for x in memory_bank)
else:
memory_bank = tile(memory_bank, n_best, dim=1)
src_lengths = tile(src_lengths, n_best) # ``(batch * n_best,)``
# (3) Init decoder with n_best src,
self.model.decoder.init_state(src, memory_bank, enc_states)
# reshape tgt to ``(len, batch * n_best, nfeat)``
tgt = batch_tgt_idxs.view(-1, batch_tgt_idxs.size(-1)).T.unsqueeze(-1)
dec_in = tgt[:-1] # exclude last target from inputs
_, attns = self.model.decoder(
dec_in, memory_bank, memory_lengths=src_lengths, with_align=True)
alignment_attn = attns["align"] # ``(B, tgt_len-1, src_len)``
# masked_select
align_tgt_mask = tgt_mask.view(-1, tgt_mask.size(-1))
prediction_mask = align_tgt_mask[:, 1:] # exclude bos to match pred
# get aligned src id for each prediction's valid tgt tokens
alignement = extract_alignment(
alignment_attn, prediction_mask, src_lengths, n_best)
return alignement
def translate_batch(self, batch, src_vocabs, attn_debug):
#self.model.decoder.set_eval_status(True)
"""Translate a batch of sentences."""
with torch.no_grad():
if self.beam_size == 1:
decode_strategy = GreedySearch(
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
batch_size=batch.batch_size,
min_length=self.min_length, max_length=self.max_length,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
return_attention=attn_debug or self.replace_unk,
sampling_temp=self.random_sampling_temp,
keep_topk=self.sample_from_topk)
else:
# TODO: support these blacklisted features
assert not self.dump_beam
decode_strategy = BeamSearch(
self.beam_size,
batch_size=batch.batch_size,
pad=self._tgt_pad_idx,
bos=self._tgt_bos_idx,
eos=self._tgt_eos_idx,
n_best=self.n_best,
global_scorer=self.global_scorer,
min_length=self.min_length, max_length=self.max_length,
return_attention=attn_debug or self.replace_unk,
block_ngram_repeat=self.block_ngram_repeat,
exclusion_tokens=self._exclusion_idxs,
stepwise_penalty=self.stepwise_penalty,
ratio=self.ratio)
#self.model.decoder.set_eval_status(False)
return self._translate_batch_with_strategy(batch, src_vocabs,
decode_strategy)
def _run_encoder(self, batch):
src, src_lengths = batch.src if isinstance(batch.src, tuple) \
else (batch.src, None)
enc_states, memory_bank, src_lengths = self.model.encoder(
src, src_lengths)
if src_lengths is None:
assert not isinstance(memory_bank, tuple), \
'Ensemble decoding only supported for text data'
src_lengths = torch.Tensor(batch.batch_size) \
.type_as(memory_bank) \
.long() \
.fill_(memory_bank.size(0))
return src, enc_states, memory_bank, src_lengths
def _decode_and_generate(
self,
decoder_in,
memory_bank,
batch,
src_vocabs,
memory_lengths,
src_map=None,
step=None,
batch_offset=None):
if self.copy_attn:
# Turn any copied words into UNKs.
decoder_in = decoder_in.masked_fill(
decoder_in.gt(self._tgt_vocab_len - 1), self._tgt_unk_idx
)
# Decoder forward, takes [tgt_len, batch, nfeats] as input
# and [src_len, batch, hidden] as memory_bank
# in case of inference tgt_len = 1, batch = beam times batch_size
# in case of Gold Scoring tgt_len = actual length, batch = 1 batch
self.model.decoder.set_copy_info(batch, self._tgt_vocab)
dec_out, dec_attn = self.model.decoder(
decoder_in, memory_bank, memory_lengths=memory_lengths, step=step
)
# Generator forward.
if not self.copy_attn:
if "std" in dec_attn:
attn = dec_attn["std"]
else:
attn = None
log_probs = self.model.generator(dec_out.squeeze(0))
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
else:
attn = dec_attn["copy"]
#print("DEC_OUT: ", dec_out.size())
#print("ATTN: ", attn.size())
scores = self.model.generator(dec_out.view(-1, dec_out.size(2)),
attn.view(-1, attn.size(2)),
src_map)
# here we have scores [tgt_lenxbatch, vocab] or [beamxbatch, vocab]
if batch_offset is None:
scores = scores.view(-1, batch.batch_size, scores.size(-1))
scores = scores.transpose(0, 1).contiguous()
else:
scores = scores.view(-1, self.beam_size, scores.size(-1))
#print("TGT_VOCAB: ", self._tgt_vocab)
scores = collapse_copy_scores(
scores,
batch,
self._tgt_vocab,
src_vocabs,
batch_dim=0,
batch_offset=batch_offset
)
scores = scores.view(decoder_in.size(0), -1, scores.size(-1))
log_probs = scores.squeeze(0).log()
#print(log_probs.size())
# returns [(batch_size x beam_size) , vocab ] when 1 step
# or [ tgt_len, batch_size, vocab ] when full sentence
return log_probs, attn
def _translate_batch_with_strategy(
self,
batch,
src_vocabs,
decode_strategy):
"""Translate a batch of sentences step by step using cache.
Args:
batch: a batch of sentences, yield by data iterator.
src_vocabs (list): list of torchtext.data.Vocab if can_copy.
decode_strategy (DecodeStrategy): A decode strategy to use for
generate translation step by step.
Returns:
results (dict): The translation results.
"""
# (0) Prep the components of the search.
use_src_map = self.copy_attn
parallel_paths = decode_strategy.parallel_paths # beam_size
batch_size = batch.batch_size
# (1) Run the encoder on the src.
src, enc_states, memory_bank, src_lengths = self._run_encoder(batch)
self.model.decoder.init_state(src, memory_bank, enc_states)
results = {
"predictions": None,
"scores": None,
"attention": None,
"batch": batch,
"gold_score": self._gold_score(
batch, memory_bank, src_lengths, src_vocabs, use_src_map,
enc_states, batch_size, src)}
# (2) prep decode_strategy. Possibly repeat src objects.
src_map = batch.src_map if use_src_map else None
fn_map_state, memory_bank, memory_lengths, src_map = \
decode_strategy.initialize(memory_bank, src_lengths, src_map)
if fn_map_state is not None:
self.model.decoder.map_state(fn_map_state)
# (3) Begin decoding step by step:
for step in range(decode_strategy.max_length):
decoder_input = decode_strategy.current_predictions.view(1, -1, 1)
log_probs, attn = self._decode_and_generate(
decoder_input,
memory_bank,
batch,
src_vocabs,
memory_lengths=memory_lengths,
src_map=src_map,
step=step,
batch_offset=decode_strategy.batch_offset)
decode_strategy.advance(log_probs, attn)
any_finished = decode_strategy.is_finished.any()
if any_finished:
decode_strategy.update_finished()
if decode_strategy.done:
break
select_indices = decode_strategy.select_indices
if any_finished:
# Reorder states.
if isinstance(memory_bank, tuple):
memory_bank = tuple(x.index_select(1, select_indices)
for x in memory_bank)
else:
memory_bank = memory_bank.index_select(1, select_indices)
memory_lengths = memory_lengths.index_select(0, select_indices)
if src_map is not None:
src_map = src_map.index_select(1, select_indices)
if parallel_paths > 1 or any_finished:
self.model.decoder.map_state(
lambda state, dim: state.index_select(dim, select_indices))
results["scores"] = decode_strategy.scores
results["predictions"] = decode_strategy.predictions
results["attention"] = decode_strategy.attention
if self.report_align:
results["alignment"] = self._align_forward(
batch, decode_strategy.predictions)
else:
results["alignment"] = [[] for _ in range(batch_size)]
return results
def _score_target(self, batch, memory_bank, src_lengths,
src_vocabs, src_map):
tgt = batch.tgt
tgt_in = tgt[:-1]
log_probs, attn = self._decode_and_generate(
tgt_in, memory_bank, batch, src_vocabs,
memory_lengths=src_lengths, src_map=src_map)
log_probs[:, :, self._tgt_pad_idx] = 0
gold = tgt[1:]
gold_scores = log_probs.gather(2, gold)
gold_scores = gold_scores.sum(dim=0).view(-1)
return gold_scores
def _report_score(self, name, score_total, words_total):
if words_total == 0:
msg = "%s No words predicted" % (name,)
else:
avg_score = score_total / words_total
ppl = np.exp(-score_total.item() / words_total)
msg = ("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, avg_score,
name, ppl))
return msg
| [
"torch.device",
"torch.cat",
"torch.Tensor",
"torch.no_grad"
] | 1.4.0 | GarrettNicolai/OpenNMT-py | 9491d900ac1b50fe39da417bacc0b9d610331888 |
1.1 | import numpy as np
from scipy.misc import imsave
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.nn.functional as F
import torchvision
from torchvision import models
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision.transforms as Transforms
from dataloader import TrainDataset, DevDataset, TestDataset
from networks.unet import UNet, unet_weight_init
from networks.hed import HED, HED_1L, hed_weight_init
from networks.resnet import ResnetGenerator, Upscale4xResnetGenerator, Upscale2xResnetGenerator
from networks.resnet_wdsr import WDSRResnetGenerator
from networks.discriminators import NLayerDiscriminator
from networks.vggfeature import VGGFeatureMap
from utils.visualizer import Visualizer
from utils.loss import BCE2d
from utils.normalize import norm, denorm, weights_init_normal
from utils.target import PSNR, SSIM, batch_compare_filter, batch_SSIM
USE_GPU = torch.cuda.is_available()
NORM = 'batch'
from scipy.misc import imsave
def save_img(img, save_fn=''):
if not os.path.exists(os.path.split(save_fn)[0]):
os.makedirs(os.path.split(save_fn)[0])
if list(img.shape)[0] == 3:
# save_image = img * 125.0
save_image = img
save_image = save_image.clamp(0, 1).numpy().transpose(1, 2, 0)
else:
save_image = img.squeeze().clamp(0, 1).numpy().transpose(1, 2, 0)
imsave(save_fn, save_image)
class Model(object):
def __init__(self, cfg):
# parameter init
self.env = cfg.env
self.train_dataset = cfg.train_dataset
self.valid_dataset = cfg.valid_dataset
self.test_dataset = cfg.test_dataset
self.data_dir = cfg.data_dir
self.save_dir = cfg.save_dir
self.num_threads = int(cfg.num_threads)
self.num_epochs = int(cfg.num_epochs)
self.save_epochs = int(cfg.save_epochs)
self.pretrain_epochs = int(cfg.pretrain_epochs)
self.batch_size = int(cfg.batch_size)
self.valid_batch_size = int(cfg.valid_batch_size)
self.test_batch_size = int(cfg.test_batch_size)
self.plot_iter = int(cfg.plot_iter)
self.crop_size = int(cfg.crop_size)
self.scale_factor = int(cfg.scale_factor)
self.lr = float(cfg.lr)
def load_dataset(self, mode='train', random_scale=True, rotate=True, fliplr=True, fliptb=True):
if mode == 'train':
train_set = TrainDataset(os.path.join(self.data_dir, self.train_dataset),
crop_size=self.crop_size, scale_factor=self.scale_factor,
random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)
return DataLoader(dataset=train_set, num_workers=self.num_threads,
batch_size=self.batch_size, shuffle=True)
elif mode == 'valid':
valid_set = DevDataset(os.path.join(
self.data_dir, self.valid_dataset))
return DataLoader(dataset=valid_set, num_workers=self.num_threads,
batch_size=self.valid_batch_size, shuffle=True)
elif mode == 'test':
test_set = TestDataset(os.path.join(
self.data_dir, self.test_dataset))
return DataLoader(dataset=test_set, num_workers=self.num_threads,
batch_size=self.test_batch_size, shuffle=False)
def train(self, edgenetpath=None, sr2x1_path=None, sr2x2_path=None, srcnn_path=None, srresnet_path=None,
is_fine_tune=False, random_scale=True, rotate=True, fliplr=True, fliptb=True):
vis = Visualizer(self.env)
print('================ Loading datasets =================')
# load training dataset
print('## Current Mode: Train')
# train_data_loader = self.load_dataset(mode='valid')
train_data_loader = self.load_dataset(
mode='train', random_scale=random_scale, rotate=rotate, fliplr=fliplr, fliptb=fliptb)
##########################################################
##################### build network ######################
##########################################################
print('Building Networks and initialize parameters\' weights....')
# init sr resnet
# srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
# norm=NORM, activation='prelu', learn_residual=True)
# srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
# norm=NORM, activation='prelu',learn_residual=True)
srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)
srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)
srresnet2x1.apply(weights_init_normal)
srresnet2x2.apply(weights_init_normal)
# init discriminator
discnet = NLayerDiscriminator(input_nc=3, ndf=64, n_layers=5)
# init edgenet
edgenet = HED_1L()
if edgenetpath is None or not os.path.exists(edgenetpath):
raise Exception('Invalid edgenet model')
else:
pretrained_dict = torch.load(edgenetpath)
model_dict = edgenet.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
edgenet.load_state_dict(model_dict)
# init vgg feature
featuremapping = VGGFeatureMap(models.vgg19(pretrained=True))
# load pretrained srresnet or just initialize
if sr2x1_path is None or not os.path.exists(sr2x1_path):
print('===> initialize the srresnet2x1')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
pretrained_dict = torch.load(sr2x1_path)
model_dict = srresnet2x1.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srresnet2x1.load_state_dict(model_dict)
if sr2x2_path is None or not os.path.exists(sr2x2_path):
print('===> initialize the srresnet2x2')
print('======> No pretrained model')
else:
print('======> loading the weight from pretrained model')
pretrained_dict = torch.load(sr2x2_path)
model_dict = srresnet2x2.state_dict()
pretrained_dict = {k: v for k,
v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
srresnet2x2.load_state_dict(model_dict)
# optimizer init
# different learning rate
lr = self.lr
srresnet2x1_optimizer = optim.Adam(
srresnet2x1.parameters(), lr=lr, betas=(0.9, 0.999))
srresnet2x2_optimizer = optim.Adam(
srresnet2x2.parameters(), lr=lr, betas=(0.9, 0.999))
disc_optimizer = optim.Adam(
discnet.parameters(), lr=lr/10, betas=(0.9, 0.999))
# loss function init
MSE_loss = nn.MSELoss()
BCE_loss = nn.BCELoss()
# cuda accelerate
if USE_GPU:
edgenet.cuda()
srresnet2x1.cuda()
srresnet2x2.cuda()
discnet.cuda()
featuremapping.cuda()
MSE_loss.cuda()
BCE_loss.cuda()
print('\tCUDA acceleration is available.')
##########################################################
##################### train network ######################
##########################################################
import torchnet as tnt
from tqdm import tqdm
from PIL import Image
# batchnorm = nn.BatchNorm2d(1).cuda()
edge_avg_loss = tnt.meter.AverageValueMeter()
total_avg_loss = tnt.meter.AverageValueMeter()
disc_avg_loss = tnt.meter.AverageValueMeter()
# psnr_2x_avg = tnt.meter.AverageValueMeter()
# ssim_2x_avg = tnt.meter.AverageValueMeter()
# psnr_4x_avg = tnt.meter.AverageValueMeter()
# ssim_4x_avg = tnt.meter.AverageValueMeter()
srresnet2x1.train()
srresnet2x2.train()
discnet.train()
itcnt = 0
for epoch in range(self.num_epochs):
edge_avg_loss.reset()
total_avg_loss.reset()
disc_avg_loss.reset()
# psnr_2x_avg.reset()
# ssim_2x_avg.reset()
# psnr_4x_avg.reset()
# ssim_4x_avg.reset()
# learning rate is decayed by a factor every 20 epoch
if (epoch + 1) % 5 == 0:
for param_group in srresnet2x1_optimizer.param_groups:
param_group["lr"] *= 0.5
print("Learning rate decay for srresnet2x1: lr={}".format(
srresnet2x1_optimizer.param_groups[0]["lr"]))
for param_group in srresnet2x2_optimizer.param_groups:
param_group["lr"] *= 0.5
print("Learning rate decay for srresnet2x2: lr={}".format(
srresnet2x2_optimizer.param_groups[0]["lr"]))
for param_group in disc_optimizer.param_groups:
param_group["lr"] *= 0.5
print("Learning rate decay for discnet: lr={}".format(
disc_optimizer.param_groups[0]["lr"]))
itbar = tqdm(enumerate(train_data_loader))
for ii, (hr, lr2x, lr4x, bc2x, bc4x) in itbar:
mini_batch = hr.size()[0]
hr_ = Variable(hr)
lr2x_ = Variable(lr2x)
lr4x_ = Variable(lr4x)
bc2x_ = Variable(bc2x)
bc4x_ = Variable(bc4x)
real_label = Variable(torch.ones(mini_batch))
fake_label = Variable(torch.zeros(mini_batch))
# cuda mode setting
if USE_GPU:
hr_ = hr_.cuda()
lr2x_ = lr2x_.cuda()
lr4x_ = lr4x_.cuda()
bc2x_ = bc2x_.cuda()
bc4x_ = bc4x_.cuda()
real_label = real_label.cuda()
fake_label = fake_label.cuda()
# =============================================================== #
# ================ Edge-based srresnet training ================= #
# =============================================================== #
sr2x_ = srresnet2x1(lr4x_)
sr4x_ = srresnet2x2(lr2x_)
'''===================== Train Discriminator ====================='''
if epoch + 1 > self.pretrain_epochs:
disc_optimizer.zero_grad()
#===== 2x disc loss =====#
real_decision_2x = discnet(lr2x_)
real_loss_2x = BCE_loss(
real_decision_2x, real_label.detach())
fake_decision_2x = discnet(sr2x_.detach())
fake_loss_2x = BCE_loss(
fake_decision_2x, fake_label.detach())
disc_loss_2x = real_loss_2x + fake_loss_2x
disc_loss_2x.backward()
disc_optimizer.step()
#===== 4x disc loss =====#
real_decision_4x = discnet(hr_)
real_loss_4x = BCE_loss(
real_decision_4x, real_label.detach())
fake_decision_4x = discnet(sr4x_.detach())
fake_loss_4x = BCE_loss(
fake_decision_4x, fake_label.detach())
disc_loss_4x = real_loss_4x + fake_loss_4x
disc_loss_4x.backward()
disc_optimizer.step()
disc_avg_loss.add(
(disc_loss_2x + disc_loss_4x).data.item())
'''=================== Train srresnet Generator ==================='''
edge_trade_off = [0.7, 0.2, 0.1, 0.05, 0.01, 0.3]
if epoch + 1 > self.pretrain_epochs:
a1, a2, a3 = 0.75, 0.1, 0.65
else:
a1, a2, a3 = 0.75, 0.0, 0.7
if not is_fine_tune:
#============ calculate 2x loss ==============#
srresnet2x1_optimizer.zero_grad()
#### Edgenet Loss ####
pred = edgenet(sr2x_)
real = edgenet(lr2x_)
edge_loss_2x = BCE_loss(pred.detach(), real.detach())
# for i in range(6):
# edge_loss_2x += edge_trade_off[i] * \
# BCE_loss(pred[i].detach(), real[i].detach())
# edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])
#### Content Loss ####
content_loss_2x = MSE_loss(sr2x_, lr2x_) #+ 0.1*BCE_loss(1-sr2x_, 1-lr2x_)
#### Perceptual Loss ####
real_feature = featuremapping(lr2x_)
fake_feature = featuremapping(sr2x_)
vgg_loss_2x = MSE_loss(fake_feature, real_feature.detach())
#### Adversarial Loss ####
advs_loss_2x = BCE_loss(discnet(sr2x_), real_label) if epoch + 1 > self.pretrain_epochs else 0
# advs_loss_2x = 0
#============== loss backward ===============#
total_loss_2x = a1 * edge_loss_2x + a2 * advs_loss_2x + \
a3 * content_loss_2x + (1.0 - a3) * vgg_loss_2x
# total_loss_2x = 1.0 * content_loss_2x + 0.25 * vgg_loss_2x
total_loss_2x.backward()
srresnet2x1_optimizer.step()
#============ calculate scores ==============#
# psnr_2x_score_process = batch_compare_filter(
# sr2x_.cpu().data, lr2x, PSNR)
# psnr_2x_avg.add(psnr_2x_score_process)
# ssim_2x_score_process = batch_compare_filter(
# sr2x_.cpu().data, lr2x, SSIM)
# ssim_2x_avg.add(ssim_2x_score_process)
#============ calculate 4x loss ==============#
if is_fine_tune:
sr4x_ = srresnet2x2(srresnet2x1(lr4x_))
srresnet2x2_optimizer.zero_grad()
#### Edgenet Loss ####
pred = edgenet(sr4x_)
real = edgenet(hr_)
# edge_loss_4x = 0
edge_loss_4x = BCE_loss(pred.detach(), real.detach())
# for i in range(6):
# edge_loss_4x += edge_trade_off[i] * \
# BCE_loss(pred[i].detach(), real[i].detach())
# edge_loss = 0.7 * BCE2d(pred[0], real[i]) + 0.3 * BCE2d(pred[5], real[i])
#### Content Loss ####
content_loss_4x = MSE_loss(sr4x_, hr_) #+ 0.1*BCE_loss(1-sr4x_, 1-hr_)
#### Perceptual Loss ####
real_feature = featuremapping(hr_)
fake_feature = featuremapping(sr4x_)
vgg_loss_4x = MSE_loss(fake_feature, real_feature.detach())
#### Adversarial Loss ####
advs_loss_4x = BCE_loss(discnet(sr4x_), real_label) if epoch + 1 > self.pretrain_epochs else 0
# advs_loss_4x = 0
#============== loss backward ===============#
total_loss_4x = a1 * edge_loss_4x + a2 * advs_loss_4x + \
a3 * content_loss_4x + (1.0 - a3) * vgg_loss_4x
# total_loss_4x = 1.0 * content_loss_4x + 0.25 * vgg_loss_4x
total_loss_4x.backward()
srresnet2x2_optimizer.step()
#============ calculate scores ==============#
# psnr_4x_score_process = batch_compare_filter(
# sr4x_.cpu().data, hr, PSNR)
# psnr_4x_avg.add(psnr_4x_score_process)
# ssim_4x_score_process = batch_compare_filter(
# sr4x_.cpu().data, hr, SSIM)
# ssim_4x_avg.add(ssim_4x_score_process)
if is_fine_tune:
total_avg_loss.add(total_loss_4x.data.item())
edge_avg_loss.add(edge_loss_4x.data.item())
else:
total_avg_loss.add((total_loss_2x+total_loss_4x).data.item())
edge_avg_loss.add((edge_loss_2x+edge_loss_4x).data.item())
if epoch + 1 > self.pretrain_epochs:
disc_avg_loss.add((advs_loss_2x+advs_loss_4x).data.item())
if (ii+1) % self.plot_iter == self.plot_iter-1:
res = {'edge loss': edge_avg_loss.value()[0],
'generate loss': total_avg_loss.value()[0],
'discriminate loss': disc_avg_loss.value()[0]}
vis.plot_many(res, 'Deblur net Loss')
# psnr_2x_score_origin = batch_compare_filter(
# bc2x, lr2x, PSNR)
# psnr_4x_score_origin = batch_compare_filter(bc4x, hr, PSNR)
# res_psnr = {'2x_origin_psnr': psnr_2x_score_origin,
# '2x_sr_psnr': psnr_2x_score_process,
# '4x_origin_psnr': psnr_4x_score_origin,
# '4x_sr_psnr': psnr_4x_score_process}
# vis.plot_many(res_psnr, 'PSNR Score')
# ssim_2x_score_origin = batch_compare_filter(
# bc2x, lr2x, SSIM)
# ssim_4x_score_origin = batch_compare_filter(bc4x, hr, SSIM)
# res_ssim = {'2x_origin_ssim': ssim_2x_score_origin,
# '2x_sr_ssim': ssim_2x_score_process,
# '4x_origin_ssim': ssim_4x_score_origin,
# '4x_sr_ssim': ssim_4x_score_process}
# vis.plot_many(res_ssim, 'SSIM Score')
#======================= Output result of total training processing =======================#
itcnt += 1
# itbar.set_description("Epoch: [%2d] [%d/%d] PSNR_2x_Avg: %.6f, SSIM_2x_Avg: %.6f, PSNR_4x_Avg: %.6f, SSIM_4x_Avg: %.6f"
# % ((epoch + 1), (ii + 1), len(train_data_loader),
# psnr_2x_avg.value()[0], ssim_2x_avg.value()[
# 0],
# psnr_4x_avg.value()[0], ssim_4x_avg.value()[0]))
itbar.set_description("Epoch: [%2d] [%d/%d]"
% ((epoch + 1), (ii + 1), len(train_data_loader)))
if (ii+1) % self.plot_iter == self.plot_iter-1:
# test_ = deblurnet(torch.cat([y_.detach(), x_edge], 1))
hr_edge = edgenet(hr_)
sr2x_edge = edgenet(sr2x_)
sr4x_edge = edgenet(sr4x_)
vis.images(hr_edge.cpu().data, win='HR edge predict', opts=dict(
title='HR edge predict'))
vis.images(sr2x_edge.cpu().data, win='SR2X edge predict', opts=dict(
title='SR2X edge predict'))
vis.images(sr4x_edge.cpu().data, win='SR4X edge predict', opts=dict(
title='SR4X edge predict'))
vis.images(lr2x, win='LR2X image',
opts=dict(title='LR2X image'))
vis.images(lr4x, win='LR4X image',
opts=dict(title='LR4X image'))
vis.images(bc2x, win='BC2X image',
opts=dict(title='BC2X image'))
vis.images(bc4x, win='BC4X image',
opts=dict(title='BC4X image'))
vis.images(sr2x_.cpu().data, win='SR2X image',
opts=dict(title='SR2X image'))
vis.images(sr4x_.cpu().data, win='SR4X image',
opts=dict(title='SR4X image'))
vis.images(hr, win='HR image',
opts=dict(title='HR image'))
t_save_dir = 'results/train_result/'+self.train_dataset
if not os.path.exists(t_save_dir):
os.makedirs(t_save_dir)
if (epoch + 1) % self.save_epochs == 0 and (ii+1) % 200 == 0:
self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
if (epoch + 1) % self.save_epochs == 0:
self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, epoch+1))
# Save final trained model and results
vis.save([self.env])
self.save_model(srresnet2x1, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x1_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
self.save_model(srresnet2x2, os.path.join(self.save_dir, 'checkpoints', 'srunitnet'), 'srnet2x2_param_batch{}_lr{}_epoch{}'.
format(self.batch_size, self.lr, self.num_epochs))
def test(self, sr2x1_path=None, sr2x2_path=None):
test_data_dir = os.path.join(self.data_dir, self.test_dataset)
result_data_dir = os.path.join(self.save_dir, "test_results", "2x2UnitNet_SR_"+self.test_dataset)
if not os.path.exists(result_data_dir):
os.makedirs(result_data_dir)
# judge whether model exists
if not os.path.exists(sr2x1_path):
raise Exception('sr2x1 resnet model not exists')
if not os.path.exists(sr2x2_path):
raise Exception('sr2x2 resnet model not exists')
# load network params
# srresnet2x1 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
# norm=NORM, activation='prelu', learn_residual=True)
# srresnet2x2 = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
# norm=NORM, activation='prelu', learn_residual=True)
srresnet2x1 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)
srresnet2x2 = WDSRResnetGenerator(input_nc=3, output_nc=3, n_blocks=5)
srresnet2x1.load_state_dict(torch.load(sr2x1_path))
srresnet2x2.load_state_dict(torch.load(sr2x2_path))
if USE_GPU:
srresnet2x1.cuda()
srresnet2x2.cuda()
import torchnet as tnt
from tqdm import tqdm
from PIL import Image
import time
psnr_4x_avg = tnt.meter.AverageValueMeter()
ssim_4x_avg = tnt.meter.AverageValueMeter()
time_avg = tnt.meter.AverageValueMeter()
srresnet2x1.eval()
srresnet2x2.eval()
# processing test data
iterbar = tqdm(os.listdir(test_data_dir))
import cv2
import numpy as np
for img_name in iterbar:
try:
img = cv2.imread(os.path.join(test_data_dir, img_name), cv2.IMREAD_COLOR)
img = cv2.resize(img, None, None, 0.5, 0.5, interpolation=cv2.INTER_AREA)
h, w, c = img.shape[0], img.shape[1], img.shape[2]
w_lr4x, h_lr4x = int(
w // self.scale_factor), int(h // self.scale_factor)
w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2
w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor
w_num, h_num = w // self.crop_size, h // self.crop_size
w_num += 1 if w % self.crop_size != 0 else 0
h_num += 1 if h % self.crop_size != 0 else 0
res = np.zeros((h*2, w*2, c), dtype=np.uint8)
for i in range(w_num):
l = i * self.crop_size
l_new = l * 2
r = min(l+self.crop_size, w)
r_new = w * 2 if r == w else l_new + self.crop_size * 2
for j in range(h_num):
t = j * self.crop_size
t_new = t * 2
b = min(t+self.crop_size, h)
b_new = h * 2 if b == h else t_new + self.crop_size * 2
lr = img[t:b, l:r]
lr = Transforms.ToTensor()(lr).unsqueeze(0)
if USE_GPU:
lr = lr.cuda()
sr = srresnet2x1(lr).squeeze()
res_sr = sr.cpu().data.clamp(0, 1).numpy().transpose(1, 2, 0)*255
res[t_new:b_new, l_new:r_new] = res_sr
cv2.imwrite(os.path.join(result_data_dir, img_name), res)
except IOError:
pass
finally:
pass
# for img_name in iterbar:
# try:
# img = Image.open(os.path.join(test_data_dir, img_name)).convert("RGB")
# transform = Transforms.RandomCrop(self.crop_size)
# img = transform(img)
# w, h = img.size[0], img.size[1]
# w_lr4x, h_lr4x = int(
# w // self.scale_factor), int(h // self.scale_factor)
# w_lr2x, h_lr2x = w_lr4x * 2, h_lr4x * 2
# # w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor
# # transform tensor
# # hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)
# # lr2x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)
# lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)
# lr4x = img.resize((w_lr2x, h_lr2x), Image.ANTIALIAS)
# # hr_ = Transforms.ToTensor()(hr).unsqueeze(0)
# # lr2x_ = Transforms.ToTensor()(lr2x).unsqueeze(0)
# lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)
# if USE_GPU:
# # hr_ = hr_.cuda()
# # lr2x_ = lr2x_.cuda()
# lr4x_ = lr4x_.cuda()
# torch.cuda.synchronize()
# start = time.time()
# sr4x_ = srresnet2x2(srresnet2x1(lr4x_))
# # sr4x_ = srresnet2x1(lr4x_)
# torch.cuda.synchronize()
# end = time.time()
# time_avg.add(end-start)
# except IOError:
# pass
# finally:
# pass
# # calculate PSNR & SSIM
# psnr_4x_score = batch_compare_filter(
# sr4x_.cpu().data, hr_, PSNR)
# ssim_4x_score = batch_compare_filter(
# sr4x_.cpu().data, hr_, SSIM)
# psnr_4x_avg.add(psnr_4x_score)
# ssim_4x_avg.add(ssim_4x_score)
# # save image
# save_img(sr4x_.cpu().data, os.path.join(result_data_dir, img_name))
print(time_avg.value()[0])
print("final PSNR score: {}".format(psnr_4x_avg.value()[0]))
print("final SSIM score: {}".format(ssim_4x_avg.value()[0]))
def test_t(self, sr2x1_1_path=None, sr2x2_1_path=None, sr2x1_2_path=None, sr2x2_2_path=None):
test_data_dir = os.path.join(self.data_dir, self.test_dataset)
sr_edge_dir = os.path.join(self.save_dir, "show_results", "2x2UnitNet_Edge_SR_"+self.test_dataset)
if not os.path.exists(sr_edge_dir):
os.makedirs(sr_edge_dir)
sr_none_dir = os.path.join(self.save_dir, "show_results", "2x2UnitNet_none_SR_"+self.test_dataset)
if not os.path.exists(sr_none_dir):
os.makedirs(sr_none_dir)
bc_dir = os.path.join(self.save_dir, "show_results", "Bicubic_SR_"+self.test_dataset)
if not os.path.exists(bc_dir):
os.makedirs(bc_dir)
hr_dir = os.path.join(self.save_dir, "show_results", "HR_"+self.test_dataset)
if not os.path.exists(hr_dir):
os.makedirs(hr_dir)
lr_dir = os.path.join(self.save_dir, "show_results", "LR_"+self.test_dataset)
if not os.path.exists(lr_dir):
os.makedirs(lr_dir)
# judge whether model exists
if not os.path.exists(sr2x1_1_path):
raise Exception('sr2x1 resnet model not exists')
if not os.path.exists(sr2x2_1_path):
raise Exception('sr2x2 resnet model not exists')
if not os.path.exists(sr2x1_2_path):
raise Exception('sr2x1 resnet model not exists')
if not os.path.exists(sr2x2_2_path):
raise Exception('sr2x2 resnet model not exists')
# load network params
srresnet2x1_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x2_edge = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x1_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x2_none = Upscale2xResnetGenerator(input_nc=3, output_nc=3, n_blocks=5,
norm=NORM, activation='prelu', learn_residual=True)
srresnet2x1_edge.load_state_dict(torch.load(sr2x1_1_path))
srresnet2x2_edge.load_state_dict(torch.load(sr2x2_1_path))
srresnet2x1_none.load_state_dict(torch.load(sr2x1_2_path))
srresnet2x2_none.load_state_dict(torch.load(sr2x2_2_path))
if USE_GPU:
srresnet2x1_edge.cuda()
srresnet2x2_edge.cuda()
srresnet2x1_none.cuda()
srresnet2x2_none.cuda()
import torchnet as tnt
from tqdm import tqdm
from PIL import Image
psnr_edge_4x_avg = tnt.meter.AverageValueMeter()
ssim_edge_4x_avg = tnt.meter.AverageValueMeter()
psnr_none_4x_avg = tnt.meter.AverageValueMeter()
ssim_none_4x_avg = tnt.meter.AverageValueMeter()
# srresnet2x1_edge.eval()
# srresnet2x2_edge.eval()
# srresnet2x1_none.eval()
# srresnet2x2_none.eval()
# processing test data
iterbar = tqdm(os.listdir(test_data_dir))
for img_name in iterbar:
img = Image.open(os.path.join(test_data_dir, img_name)).convert("RGB")
transform = Transforms.RandomCrop(self.crop_size)
img = transform(img)
w, h = img.size[0], img.size[1]
w_lr4x, h_lr4x = int(
w // self.scale_factor), int(h // self.scale_factor)
w_hr, h_hr = w_lr4x * self.scale_factor, h_lr4x * self.scale_factor
# transform tensor
hr = img.resize((w_hr, h_hr), Image.ANTIALIAS)
lr4x = img.resize((w_lr4x, h_lr4x), Image.ANTIALIAS)
bc4x = lr4x.resize((w_hr, h_hr), Image.BICUBIC)
hr_ = Transforms.ToTensor()(hr).unsqueeze(0)
bc4x_ = Transforms.ToTensor()(bc4x).unsqueeze(0)
lr4x_ = Transforms.ToTensor()(lr4x).unsqueeze(0)
if USE_GPU:
hr_ = hr_.cuda()
lr4x_ = lr4x_.cuda()
sr4x_edge_ = srresnet2x2_edge(srresnet2x1_edge(lr4x_))
sr4x_none_ = srresnet2x2_none(srresnet2x1_none(lr4x_))
# calculate PSNR & SSIM
psnr_edge_4x_score = batch_compare_filter(
sr4x_edge_.cpu().data, hr_, PSNR)
ssim_edge_4x_score = batch_compare_filter(
sr4x_edge_.cpu().data, hr_, SSIM)
psnr_edge_4x_avg.add(psnr_edge_4x_score)
ssim_edge_4x_avg.add(ssim_edge_4x_score)
psnr_none_4x_score = batch_compare_filter(
sr4x_none_.cpu().data, hr_, PSNR)
ssim_none_4x_score = batch_compare_filter(
sr4x_none_.cpu().data, hr_, SSIM)
psnr_none_4x_avg.add(psnr_none_4x_score)
ssim_none_4x_avg.add(ssim_none_4x_score)
# save image
save_img(sr4x_edge_.cpu().data, os.path.join(sr_edge_dir, img_name))
save_img(sr4x_none_.cpu().data, os.path.join(sr_none_dir, img_name))
save_img(bc4x_.cpu().data, os.path.join(bc_dir, img_name))
save_img(hr_.cpu().data, os.path.join(hr_dir, img_name))
save_img(lr4x_.cpu().data, os.path.join(lr_dir, img_name))
print("final edge PSNR score: {}".format(psnr_edge_4x_avg.value()[0]))
print("final edge SSIM score: {}".format(ssim_edge_4x_avg.value()[0]))
print("final none PSNR score: {}".format(psnr_none_4x_avg.value()[0]))
print("final none SSIM score: {}".format(ssim_none_4x_avg.value()[0]))
def save_model(self, model, save_dir, model_name, mtype='pkl'):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if mtype == 'pkl':
save_path = os.path.join(save_dir, model_name+'.pkl')
torch.save(model.state_dict(), save_path)
elif mtype == 'pth':
save_path = os.path.join(save_dir, model_name+'.pth')
torch.save(model.state_dict(), save_path)
| [
"torch.zeros",
"torch.nn.MSELoss",
"torch.autograd.Variable",
"torch.ones",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"torch.load"
] | 1.1.0 | JinGyeSetBirdsFree/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b |
1.1 | from roi_align.roi_align import RoIAlign # RoIAlign module
from roi_align.roi_align import CropAndResize # crop_and_resize module
from torchvision import transforms
import torch
import cv2
import numpy as np
from torch.autograd import Variable
def to_varabile(data,requires_grad,is_cuda):
if is_cuda:
data = data.cuda()
data = Variable(data,requires_grad=requires_grad)
return data
# input data
is_cuda = torch.cuda.is_available()
# image_data = cv2.imread('/data/2019AAAI/data/ctw15/test/text_image/1002.jpg')
image_data = np.ones((100,100,3))
image_data = image_data.transpose((2, 0, 1)).astype(np.float32)
image_data = torch.from_numpy((image_data))
boxes_data = torch.Tensor([[0,0,200,200],[0,0,200,200]])
box_index_data = torch.IntTensor([0])
image = to_varabile(image_data, requires_grad=True, is_cuda=is_cuda)
image = image.unsqueeze(0)
print(image.size())
boxes = to_varabile(boxes_data, requires_grad=False, is_cuda=is_cuda)
box_index = to_varabile(box_index_data, requires_grad=False, is_cuda=is_cuda)
print(image,boxes,box_index)
# RoIAlign layer
roi_align = RoIAlign(7, 7,extrapolation_value=0)
crops = roi_align(image, boxes, box_index)
print(crops)
| [
"torch.IntTensor",
"torch.autograd.Variable",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.Tensor"
] | 1.1.0 | JinGyeSetBirdsFree/FudanOCR | fd79b679044ea23fd9eb30691453ed0805d2e98b |
1.8 | """Policies: abstract base class and concrete implementations."""
import collections
import copy
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
ConditionalCategoricalDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(BaseModel, self).__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
pass
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Allow to load policy saved with older version of SB3
if "sde_net_arch" in saved_variables["data"]:
warnings.warn(
"sde_net_arch is deprecated, please downgrade to SB3 v1.2.0 if you need such parameter.",
DeprecationWarning,
)
del saved_variables["data"]["sde_net_arch"]
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
return observation, vectorized_env
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super(BasePolicy, self).__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if episode_start is None:
# episode_start = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions[0]
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super(ActorCriticPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": False,
}
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, (ConditionalCategoricalDistribution)):
self.action_net, self.embedding, self.other = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
if isinstance(self.action_dist, ConditionalCategoricalDistribution):
# mean_actions = self.action_net[0](latent_pi)
mean_actions = self.action_net(latent_pi)
# mean_actions = F.relu(mean_actions)
# distribution = self.action_dist.proba_distribution(mean_actions)
actions, distribution = self.action_dist.sample_all(mean_actions)
else:
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
if isinstance(self.action_dist, ConditionalCategoricalDistribution):
mean_actions = self.action_net(latent_pi)
_, distribution = self.action_dist.sample_all(mean_actions)
else:
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi)
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(ActorCriticCnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(MultiInputActorCriticPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]
def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:
"""
Returns the registered policy from the base type and name.
See `register_policy` for registering policies and explanation.
:param base_policy_type: the base policy class
:param name: the policy name
:return: the policy
"""
if base_policy_type not in _policy_registry:
raise KeyError(f"Error: the policy type {base_policy_type} is not registered!")
if name not in _policy_registry[base_policy_type]:
raise KeyError(
f"Error: unknown policy type {name},"
f"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!"
)
return _policy_registry[base_policy_type][name]
def register_policy(name: str, policy: Type[BasePolicy]) -> None:
"""
Register a policy, so it can be called using its name.
e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).
The goal here is to standardize policy naming, e.g.
all algorithms can call upon "MlpPolicy" or "CnnPolicy",
and they receive respective policies that work for them.
Consider following:
OnlinePolicy
-- OnlineMlpPolicy ("MlpPolicy")
-- OnlineCnnPolicy ("CnnPolicy")
OfflinePolicy
-- OfflineMlpPolicy ("MlpPolicy")
-- OfflineCnnPolicy ("CnnPolicy")
Two policies have name "MlpPolicy" and two have "CnnPolicy".
In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)
is given and used to select and return the correct policy.
:param name: the policy name
:param policy: the policy class
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!")
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
# Check if the registered policy is same
# we try to register. If not so,
# do not override and complain.
if _policy_registry[sub_class][name] != policy:
raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.")
_policy_registry[sub_class][name] = policy
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.Sequential",
"torch.no_grad",
"torch.FloatTensor",
"torch.load",
"torch.nn.init.orthogonal_",
"torch.set_grad_enabled"
] | 1.8.1 | adbelniak/stable-baselines3 | 61e3b9c3fc4b113b5de65dd3b083de7550676018 |
1.1 | import numpy as np
import os
import cv2
from models import Wav2Lip
import face_detection
import torch
def get_smoothened_boxes(boxes, T):
for i in range(len(boxes)):
if i + T > len(boxes):
window = boxes[len(boxes) - T:]
else:
window = boxes[i: i + T]
boxes[i] = np.mean(window, axis=0)
return boxes
def face_detect(images, device, face_det_batch_size, pads, nosmooth):
detector = face_detection.FaceAlignment(face_detection.LandmarksType._2D,
flip_input=False, device=device)
batch_size = face_det_batch_size
while 1:
predictions = []
try:
for i in range(0, len(images), batch_size):
predictions.extend(detector.get_detections_for_batch(np.array(images[i:i + batch_size])))
except RuntimeError:
if batch_size == 1:
raise RuntimeError(
'Image too big to run face detection on GPU. Please use the --resize_factor argument')
batch_size //= 2
print('Recovering from OOM error; New batch size: {}'.format(batch_size))
continue
break
results = []
pady1, pady2, padx1, padx2 = pads
for rect, image in zip(predictions, images):
if rect is None:
cv2.imwrite('temp/faulty_frame.jpg', image) # check this frame where the face was not detected.
raise ValueError('Face not detected! Ensure the video contains a face in all the frames.')
y1 = max(0, rect[1] - pady1)
y2 = min(image.shape[0], rect[3] + pady2)
x1 = max(0, rect[0] - padx1)
x2 = min(image.shape[1], rect[2] + padx2)
results.append([x1, y1, x2, y2])
boxes = np.array(results)
if not nosmooth: boxes = get_smoothened_boxes(boxes, T=5)
results = [[image[y1: y2, x1:x2], (y1, y2, x1, x2)] for image, (x1, y1, x2, y2) in zip(images, boxes)]
del detector
return results
def face_detect_wrapper(frames, device, face_det_batch_size, pads, nosmooth, box, static):
if box[0] == -1:
if not static:
face_det_results = face_detect(frames,
device, face_det_batch_size, pads, nosmooth) # BGR2RGB for CNN face detection
else:
face_det_results = face_detect([frames[0]],
device, face_det_batch_size, pads, nosmooth)
else:
print('Using the specified bounding box instead of face detection...')
y1, y2, x1, x2 = box
face_det_results = [[f[y1: y2, x1:x2], (y1, y2, x1, x2)] for f in frames]
return face_det_results
def datagen(frames, face_det_results, mels, start_frame_idx, static, img_size, wav2lip_batch_size):
# start frame idx is the current frame idx in the output video
# we start from this point
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
start_frame_idx = start_frame_idx % len(frames) # loop back
num_frames = len(mels)
# take frames from start_frame_idx to start_frame_idx+num_frames
# wrapping around if necessary
if not static:
if len(frames) == 1:
frames_current = frames
face_det_results_current = face_det_results
if start_frame_idx + num_frames > len(frames):
frames_current = frames[start_frame_idx:] + frames[:start_frame_idx + num_frames - len(frames)]
face_det_results_current = face_det_results[start_frame_idx:] + face_det_results[
:start_frame_idx + num_frames - len(frames)]
else:
frames_current = frames[start_frame_idx:start_frame_idx + num_frames]
face_det_results_current = face_det_results[start_frame_idx:start_frame_idx + num_frames]
else:
frames_current = frames
face_det_results_current = face_det_results
for i, m in enumerate(mels):
idx = 0 if static else i % len(frames_current)
frame_to_save = frames_current[idx].copy()
face, coords = face_det_results_current[idx].copy()
face = cv2.resize(face, (img_size, img_size))
img_batch.append(face)
mel_batch.append(m)
frame_batch.append(frame_to_save)
coords_batch.append(coords)
if len(img_batch) >= wav2lip_batch_size:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
img_batch, mel_batch, frame_batch, coords_batch = [], [], [], []
if len(img_batch) > 0:
img_batch, mel_batch = np.asarray(img_batch), np.asarray(mel_batch)
img_masked = img_batch.copy()
img_masked[:, img_size // 2:] = 0
img_batch = np.concatenate((img_masked, img_batch), axis=3) / 255.
mel_batch = np.reshape(mel_batch, [len(mel_batch), mel_batch.shape[1], mel_batch.shape[2], 1])
yield img_batch, mel_batch, frame_batch, coords_batch
def _load(checkpoint_path, device):
if device == 'cuda':
checkpoint = torch.load(checkpoint_path)
else:
checkpoint = torch.load(checkpoint_path,
map_location=lambda storage, loc: storage)
return checkpoint
def load_model(path, device):
model = Wav2Lip()
print("Load checkpoint from: {}".format(path))
checkpoint = _load(path, device)
s = checkpoint["state_dict"]
new_s = {}
for k, v in s.items():
new_s[k.replace('module.', '')] = v
model.load_state_dict(new_s)
model = model.to(device)
return model.eval()
def preprocess_video(face, fps, resize_factor, rotate, crop):
if not os.path.isfile(face):
raise ValueError('--face argument must be a valid path to video/image file')
elif face.split('.')[1] in ['jpg', 'png', 'jpeg']:
full_frames = [cv2.imread(face)]
fps = fps
else:
video_stream = cv2.VideoCapture(face)
fps = video_stream.get(cv2.CAP_PROP_FPS)
print('Reading video frames...')
full_frames = []
while 1:
still_reading, frame = video_stream.read()
if not still_reading:
video_stream.release()
break
if resize_factor > 1:
frame = cv2.resize(frame, (frame.shape[1] // resize_factor, frame.shape[0] // resize_factor))
if rotate:
frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)
y1, y2, x1, x2 = crop
if x2 == -1: x2 = frame.shape[1]
if y2 == -1: y2 = frame.shape[0]
frame = frame[y1:y2, x1:x2]
full_frames.append(frame)
print("Number of frames available for inference: " + str(len(full_frames)))
return full_frames | [
"torch.load"
] | 1.1.0 | tpulkit/txt2vid | 679b1672fb3221c6b5fe576a158974556047c201 |
1.7 | import numpy as np
from glumpy import app, gl, glm, gloo
import torch
import module.gpu_work as gw
class mesh():
def __init__(self, motion):
# plane
self.motion = motion
self.N = N = motion.N[:2]
self.dx = dx = motion.dx
# vertices
X = [dx * (np.arange(N[i]) - N[i] * 0.5) for i in range(2)]
x, y = X
x, y = np.meshgrid(x, y)
z = motion.update_numpy()
vertices = np.transpose([x, y, z], (1, 2, 0)).reshape(-1, 3)
# colors
colors = np.random.randn(len(vertices), 4).astype(np.float32)
# outline
idx = []
for i in np.arange(N[1]-1):
for j in np.arange(N[0]-1):
offset = i * N[0] + j
idx.append([offset, offset+1, offset+1+N[0], offset+N[0]] +
[offset, offset+N[0], offset+1, offset+1+N[0]])
outline = np.array(idx).reshape(-1)
# outline
idx = np.arange(N[0]*N[1])
point_idx = np.array(idx).reshape(-1)
############################################################
# glumpy Vertex Buffer
dtype = [("position", np.float32, 3),
("color", np.float32, 4)]
VertexBuffer = np.zeros(len(vertices), dtype)
VertexBuffer["position"] = vertices
VertexBuffer["color"] = colors
VertexBuffer = VertexBuffer.view(gloo.VertexBuffer)
# glumpy Index Buffer
outline = outline.astype(np.uint32).view(gloo.IndexBuffer)
# glumpy Index Buffer
point_idx = point_idx.astype(np.uint32).view(gloo.IndexBuffer)
############################################################
# self
self.VertexBuffer = VertexBuffer
self.outline = outline
self.point_idx = point_idx
############################################################
# torch
v = torch.from_numpy(np.transpose(vertices, (1, 0)).reshape(1, 3, N[0], N[1]).astype(np.float32)).cuda()
c = torch.from_numpy(np.transpose(colors, (1, 0)).reshape(1, 4, N[0], N[1]).astype(np.float32)).cuda()
self.v = v
self.c = c
def update(self, dt=0):
motion = self.motion
v = self.v
c = self.c
z = motion.update(dt)
zc = 0.5 * z
c[0, 0] = 0 + 2*zc
c[0, 1] = 0.5 - zc
c[0, 2] = 1.0 + 2*zc
c[0, 3] = 1
v[0, 2] = z*0.3
class plot3d():
def __init__(self, obj):
self.obj = obj
self.phi, self.theta = 0, 0
# init
self.init_window()
self.bind_obj(obj)
self.update_VertexBuffer()
app.run()
def init_window(self):
window = app.Window(width=1920, height=1080,
color=(0.30, 0.30, 0.35, 1.00))
@window.event
def on_init():
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glPolygonOffset(1, 1)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(0.55)
@window.event
def on_draw(dt):
window.clear()
self.on_draw(dt)
@window.event
def on_resize(width, height):
program = self.program
program['projection'] = glm.perspective(
45.0, width / float(height), 0.1, 100.0)
self.window = window
def bind_obj(self, obj):
# make obj
vertex = """
uniform vec4 ucolor;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
attribute vec3 position;
attribute vec4 color;
varying vec4 v_color;
void main()
{
v_color = ucolor * color;
gl_Position = projection * view * model * vec4(position,1.0);
}
"""
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
VertexBuffer = obj.VertexBuffer
outline = obj.outline
point_idx = obj.point_idx
program = gloo.Program(vertex, fragment)
program.bind(VertexBuffer)
program['model'] = np.eye(4, dtype=np.float32)
program['view'] = glm.translation(0, 0, -5)
VertexBuffer.activate()
VertexBuffer.deactivate()
self.RegisteredBuffer = gw.make_RegisteredBuffer(VertexBuffer)
self.program = program
self.outline = outline
self.point_idx = point_idx
def update_VertexBuffer(self, dt=0):
# torch
self.obj.update(dt)
v = self.obj.v
c = self.obj.c
V_ = torch.cat((v, c), dim=1)
V_ = V_.contiguous(memory_format=torch.channels_last)
# copy
gw.copy_torch2RegisteredBuffer(self.RegisteredBuffer, V_[0])
def on_draw(self, dt):
program = self.program
window = self.window
# set title
window.set_title(str(
window.fps).encode("ascii"))
self.update_VertexBuffer(dt)
# # Point
# gl.glDisable(gl.GL_BLEND)
# gl.glEnable(gl.GL_DEPTH_TEST)
# gl.glPointSize(5)
# program['ucolor'] = 1, 1, 1, 1
# program.draw(gl.GL_POINTS, self.point_idx)
# Fill
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
program['ucolor'] = 1, 1, 1, 1
program.draw(gl.GL_QUADS, self.outline)
# Outlined program
# gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)
# gl.glEnable(gl.GL_BLEND)
# gl.glDepthMask(gl.GL_FALSE)
# program['ucolor'] = 0, 0, 0, 1
# program.draw(gl.GL_LINES, self.outline)
# gl.glDepthMask(gl.GL_TRUE)
# Make program rotate
self.theta += 0*dt # degrees
self.phi += 2*dt # degrees
model = np.eye(4, dtype=np.float32)
glm.rotate(model, -90, 1, 0, 0)
glm.rotate(model, self.theta, 0, 0, 1)
glm.rotate(model, self.phi, 0, 1, 0)
glm.rotate(model, 45, 1, 0, 0)
program['model'] = model
| [
"torch.cat"
] | 1.7.0 | dego1985/wave_simulation | 05f5119aab158e0958170d90066c2b87b998e658 |
1.4 | import pickle
from collections import OrderedDict
from distutils.version import LooseVersion
import cloudpickle
import numpy as np
import pytest
import torch
from torch import nn
from pytorch_lightning.metrics.metric import Metric, MetricCollection
torch.manual_seed(42)
class Dummy(Metric):
name = "Dummy"
def __init__(self):
super().__init__()
self.add_state("x", torch.tensor(0.0), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
class DummyList(Metric):
name = "DummyList"
def __init__(self):
super().__init__()
self.add_state("x", list(), dist_reduce_fx=None)
def update(self):
pass
def compute(self):
pass
def test_inherit():
Dummy()
def test_add_state():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum")
assert a._reductions["a"](torch.tensor([1, 1])) == 2
a.add_state("b", torch.tensor(0), "mean")
assert np.allclose(a._reductions["b"](torch.tensor([1.0, 2.0])).numpy(), 1.5)
a.add_state("c", torch.tensor(0), "cat")
assert a._reductions["c"]([torch.tensor([1]), torch.tensor([1])]).shape == (2, )
with pytest.raises(ValueError):
a.add_state("d1", torch.tensor(0), 'xyz')
with pytest.raises(ValueError):
a.add_state("d2", torch.tensor(0), 42)
with pytest.raises(ValueError):
a.add_state("d3", [torch.tensor(0)], 'sum')
with pytest.raises(ValueError):
a.add_state("d4", 42, 'sum')
def custom_fx(x):
return -1
a.add_state("e", torch.tensor(0), custom_fx)
assert a._reductions["e"](torch.tensor([1, 1])) == -1
def test_add_state_persistent():
a = Dummy()
a.add_state("a", torch.tensor(0), "sum", persistent=True)
assert "a" in a.state_dict()
a.add_state("b", torch.tensor(0), "sum", persistent=False)
if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"):
assert "b" not in a.state_dict()
def test_reset():
class A(Dummy):
pass
class B(DummyList):
pass
a = A()
assert a.x == 0
a.x = torch.tensor(5)
a.reset()
assert a.x == 0
b = B()
assert isinstance(b.x, list) and len(b.x) == 0
b.x = torch.tensor(5)
b.reset()
assert isinstance(b.x, list) and len(b.x) == 0
def test_update():
class A(Dummy):
def update(self, x):
self.x += x
a = A()
assert a.x == 0
assert a._computed is None
a.update(1)
assert a._computed is None
assert a.x == 1
a.update(2)
assert a.x == 3
assert a._computed is None
def test_compute():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert 0 == a.compute()
assert 0 == a.x
a.update(1)
assert a._computed is None
assert a.compute() == 1
assert a._computed == 1
a.update(2)
assert a._computed is None
assert a.compute() == 3
assert a._computed == 3
# called without update, should return cached value
a._computed = 5
assert a.compute() == 5
def test_forward():
class A(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
a = A()
assert a(5) == 5
assert a._forward_cache == 5
assert a(8) == 8
assert a._forward_cache == 8
assert a.compute() == 13
class DummyMetric1(Dummy):
def update(self, x):
self.x += x
def compute(self):
return self.x
class DummyMetric2(Dummy):
def update(self, y):
self.x -= y
def compute(self):
return self.x
def test_pickle(tmpdir):
# doesn't tests for DDP
a = DummyMetric1()
a.update(1)
metric_pickled = pickle.dumps(a)
metric_loaded = pickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
metric_loaded.update(5)
assert metric_loaded.compute() == 6
metric_pickled = cloudpickle.dumps(a)
metric_loaded = cloudpickle.loads(metric_pickled)
assert metric_loaded.compute() == 1
def test_state_dict(tmpdir):
""" test that metric states can be removed and added to state dict """
metric = Dummy()
assert metric.state_dict() == OrderedDict()
metric.persistent(True)
assert metric.state_dict() == OrderedDict(x=0)
metric.persistent(False)
assert metric.state_dict() == OrderedDict()
def test_child_metric_state_dict():
""" test that child metric states will be added to parent state dict """
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.metric = Dummy()
self.metric.add_state('a', torch.tensor(0), persistent=True)
self.metric.add_state('b', [], persistent=True)
self.metric.register_buffer('c', torch.tensor(0))
module = TestModule()
expected_state_dict = {
'metric.a': torch.tensor(0),
'metric.b': [],
'metric.c': torch.tensor(0),
}
assert module.state_dict() == expected_state_dict
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer(tmpdir):
metric = DummyMetric1()
assert metric.x.is_cuda is False
assert metric.x.dtype == torch.float32
metric = metric.to(device='cuda')
assert metric.x.is_cuda
metric = metric.double()
assert metric.x.dtype == torch.float64
metric = metric.half()
assert metric.x.dtype == torch.float16
def test_metric_collection(tmpdir):
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
# Test correct dict structure
assert len(metric_collection) == 2
assert metric_collection['DummyMetric1'] == m1
assert metric_collection['DummyMetric2'] == m2
# Test correct initialization
for name, metric in metric_collection.items():
assert metric.x == 0, f'Metric {name} not initialized correctly'
# Test every metric gets updated
metric_collection.update(5)
for name, metric in metric_collection.items():
assert metric.x.abs() == 5, f'Metric {name} not updated correctly'
# Test compute on each metric
metric_collection.update(-5)
metric_vals = metric_collection.compute()
assert len(metric_vals) == 2
for name, metric_val in metric_vals.items():
assert metric_val == 0, f'Metric {name}.compute not called correctly'
# Test that everything is reset
for name, metric in metric_collection.items():
assert metric.x == 0, f'Metric {name} not reset correctly'
# Test pickable
metric_pickled = pickle.dumps(metric_collection)
metric_loaded = pickle.loads(metric_pickled)
assert isinstance(metric_loaded, MetricCollection)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Test requires GPU.")
def test_device_and_dtype_transfer_metriccollection(tmpdir):
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
for _, metric in metric_collection.items():
assert metric.x.is_cuda is False
assert metric.x.dtype == torch.float32
metric_collection = metric_collection.to(device='cuda')
for _, metric in metric_collection.items():
assert metric.x.is_cuda
metric_collection = metric_collection.double()
for _, metric in metric_collection.items():
assert metric.x.dtype == torch.float64
metric_collection = metric_collection.half()
for _, metric in metric_collection.items():
assert metric.x.dtype == torch.float16
def test_metric_collection_wrong_input(tmpdir):
""" Check that errors are raised on wrong input """
m1 = DummyMetric1()
# Not all input are metrics (list)
with pytest.raises(ValueError):
_ = MetricCollection([m1, 5])
# Not all input are metrics (dict)
with pytest.raises(ValueError):
_ = MetricCollection({'metric1': m1, 'metric2': 5})
# Same metric passed in multiple times
with pytest.raises(ValueError, match='Encountered two metrics both named *.'):
_ = MetricCollection([m1, m1])
# Not a list or dict passed in
with pytest.raises(ValueError, match='Unknown input to MetricCollection.'):
_ = MetricCollection(m1)
def test_metric_collection_args_kwargs(tmpdir):
""" Check that args and kwargs gets passed correctly in metric collection,
Checks both update and forward method
"""
m1 = DummyMetric1()
m2 = DummyMetric2()
metric_collection = MetricCollection([m1, m2])
# args gets passed to all metrics
metric_collection.update(5)
assert metric_collection['DummyMetric1'].x == 5
assert metric_collection['DummyMetric2'].x == -5
metric_collection.reset()
_ = metric_collection(5)
assert metric_collection['DummyMetric1'].x == 5
assert metric_collection['DummyMetric2'].x == -5
metric_collection.reset()
# kwargs gets only passed to metrics that it matches
metric_collection.update(x=10, y=20)
assert metric_collection['DummyMetric1'].x == 10
assert metric_collection['DummyMetric2'].x == -20
metric_collection.reset()
_ = metric_collection(x=10, y=20)
assert metric_collection['DummyMetric1'].x == 10
assert metric_collection['DummyMetric2'].x == -20
| [
"torch.manual_seed",
"torch.cuda.is_available",
"torch.tensor"
] | 1.4 | javierlorenzod/pytorch-lightning | 6dba26666aa564db414eb238d99a4213006d8220 |
1.5 | import os
import hydra
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]='1'
import logging
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
from hydra import utils
from torch.utils.data import DataLoader
from deepke.name_entity_re.few_shot.models.model import PromptBartModel, PromptGeneratorModel
from deepke.name_entity_re.few_shot.module.datasets import ConllNERProcessor, ConllNERDataset
from deepke.name_entity_re.few_shot.module.train import Trainer
from deepke.name_entity_re.few_shot.module.metrics import Seq2SeqSpanMetric
from deepke.name_entity_re.few_shot.utils.util import get_loss, set_seed
from deepke.name_entity_re.few_shot.module.mapping_type import mit_movie_mapping, mit_restaurant_mapping, atis_mapping
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from tensorboardX import SummaryWriter
writer = SummaryWriter(log_dir='logs')
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
DATASET_CLASS = {
'conll2003': ConllNERDataset,
'mit-movie': ConllNERDataset,
'mit-restaurant': ConllNERDataset,
'atis': ConllNERDataset
}
DATA_PROCESS = {
'conll2003': ConllNERProcessor,
'mit-movie': ConllNERProcessor,
'mit-restaurant': ConllNERProcessor,
'atis': ConllNERProcessor
}
DATA_PATH = {
'conll2003': {'train': 'data/conll2003/train.txt',
'dev': 'data/conll2003/dev.txt',
'test': 'data/conll2003/test.txt'},
'mit-movie': {'train': 'data/mit-movie/20-shot-train.txt',
'dev': 'data/mit-movie/test.txt'},
'mit-restaurant': {'train': 'data/mit-restaurant/10-shot-train.txt',
'dev': 'data/mit-restaurant/test.txt'},
'atis': {'train': 'data/atis/20-shot-train.txt',
'dev': 'data/atis/test.txt'}
}
MAPPING = {
'conll2003': {'loc': '<<location>>',
'per': '<<person>>',
'org': '<<organization>>',
'misc': '<<others>>'},
'mit-movie': mit_movie_mapping,
'mit-restaurant': mit_restaurant_mapping,
'atis': atis_mapping
}
@hydra.main(config_path="conf/config.yaml")
def main(cfg):
cwd = utils.get_original_cwd()
cfg.cwd = cwd
print(cfg)
data_path = DATA_PATH[cfg.dataset_name]
for mode, path in data_path.items():
data_path[mode] = os.path.join(cfg.cwd, path)
dataset_class, data_process = DATASET_CLASS[cfg.dataset_name], DATA_PROCESS[cfg.dataset_name]
mapping = MAPPING[cfg.dataset_name]
set_seed(cfg.seed) # set seed, default is 1
if cfg.save_path is not None: # make save_path dir
cfg.save_path = os.path.join(cfg.save_path, cfg.dataset_name+"_"+str(cfg.batch_size)+"_"+str(cfg.learning_rate)+cfg.notes)
if not os.path.exists(cfg.save_path):
os.makedirs(cfg.save_path, exist_ok=True)
process = data_process(data_path=data_path, mapping=mapping, bart_name=cfg.bart_name, learn_weights=cfg.learn_weights)
train_dataset = dataset_class(data_processor=process, mode='train')
train_dataloader = DataLoader(train_dataset, collate_fn=train_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
dev_dataset = dataset_class(data_processor=process, mode='dev')
dev_dataloader = DataLoader(dev_dataset, collate_fn=dev_dataset.collate_fn, batch_size=cfg.batch_size, num_workers=4)
label_ids = list(process.mapping2id.values())
prompt_model = PromptBartModel(tokenizer=process.tokenizer, label_ids=label_ids, args=cfg)
model = PromptGeneratorModel(prompt_model=prompt_model, bos_token_id=0,
eos_token_id=1,
max_length=cfg.tgt_max_len, max_len_a=cfg.src_seq_ratio,num_beams=cfg.num_beams, do_sample=False,
repetition_penalty=1, length_penalty=cfg.length_penalty, pad_token_id=1,
restricter=None)
metrics = Seq2SeqSpanMetric(eos_token_id=1, num_labels=len(label_ids), target_type='word')
loss = get_loss
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=None, model=model, args=cfg, logger=logger, loss=loss,
metrics=metrics, writer=writer)
trainer.train()
writer.close()
if __name__ == "__main__":
main()
| [
"torch.utils.data.DataLoader"
] | 1.5 | hphphp123321/DeepKE | f7efd3fc87d3bf88783a41efc3c09dca7a986013 |
1.7 | import torch
import torch.nn as nn
def safe_norm(x, eps=1e-5, dim=-1, keepdim=True):
return torch.sqrt(torch.sum(torch.square(x), dim=dim, keepdim=keepdim) + eps)
class LayerNormTf(nn.Module):
def __init__(self, hidden_size: int, eps: float = 1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
Link: https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py#L234
"""
super(LayerNormTf, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate Layernorm the tf way"""
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class ScaleNorm(nn.Module):
def __init__(self, hidden_size: int, eps=1e-5):
super(ScaleNorm, self).__init__()
self.eps = eps
self.g = nn.Parameter(torch.tensor(hidden_size ** 0.5))
def forward(self, x: torch.Tensor):
scaled_norm = self.g / safe_norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return scaled_norm * x
# Default to pytorch layernorm
LayerNorm = nn.LayerNorm
| [
"torch.zeros",
"torch.sqrt",
"torch.square",
"torch.ones",
"torch.tensor"
] | 1.7.1 | PansoK/slp | ac55154f063245e0e4ed584c59f16370d228d8a7 |
0.4 | import os
import torch
from collections import OrderedDict
from abc import ABC, abstractmethod
from . import networks
class BaseModel(ABC):
"""This class is an abstract base class (ABC) for models.
To create a subclass, you need to implement the following five functions:
-- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
-- <set_input>: unpack data from dataset and apply preprocessing.
-- <forward>: produce intermediate results.
-- <optimize_parameters>: calculate losses, gradients, and update network weights.
-- <modify_commandline_options>: (optionally) add model-specific options and set default options.
"""
def __init__(self, opt):
"""Initialize the BaseModel class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
When creating your custom class, you need to implement your own initialization.
In this fucntion, you should first call <BaseModel.__init__(self, opt)>
Then, you need to define four lists:
-- self.loss_names (str list): specify the training losses that you want to plot and save.
-- self.model_names (str list): specify the images that you want to display and save.
-- self.visual_names (str list): define networks used in our training.
-- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
"""
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
torch.backends.cudnn.benchmark = True
self.loss_names = []
self.model_names = []
self.visual_names = []
self.optimizers = []
self.image_paths = []
self.metric = 0 # used for learning rate policy 'plateau'
@staticmethod
def modify_commandline_options(parser, is_train):
"""Add new model-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
"""
return parser
@abstractmethod
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): includes the data itself and its metadata information.
"""
pass
@abstractmethod
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
pass
@abstractmethod
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
pass
def setup(self, opt):
"""Load and print networks; create schedulers
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
if self.isTrain:
self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
if not self.isTrain or opt.continue_train:
load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
aux = self.load_networks(load_suffix)
else:
aux = None
self.print_networks(opt.verbose)
return aux
def eval(self):
"""Make models eval mode during test time"""
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
net.eval()
def test(self):
"""Forward function used in test time.
This function wraps <forward> function in no_grad() so we don't save intermediate steps for backprop
It also calls <compute_visuals> to produce additional visualization results
"""
with torch.no_grad():
self.forward()
self.compute_visuals()
def compute_visuals(self):
"""Calculate additional output images for visdom and HTML visualization"""
pass
def get_image_paths(self):
""" Return image paths that are used to load current data"""
return self.image_paths
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
for scheduler in self.schedulers:
if self.opt.lr_policy == 'plateau':
scheduler.step(self.metric)
else:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
visual_ret[name] = getattr(self, name)
return visual_ret
def get_current_losses(self):
"""Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
return errors_ret
def save_networks(self, epoch, aux):
"""Save all the networks to the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net' + name)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda(self.gpu_ids[0])
else:
torch.save(net.cpu().state_dict(), save_path)
save_filename = '%s_aux.pth' % (epoch)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(aux,save_path)
def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
"""Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
key = keys[i]
if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'running_mean' or key == 'running_var'):
if getattr(module, key) is None:
state_dict.pop('.'.join(keys))
if module.__class__.__name__.startswith('InstanceNorm') and \
(key == 'num_batches_tracked'):
state_dict.pop('.'.join(keys))
else:
self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
def load_networks(self, epoch):
"""Load all the networks from the disk.
Parameters:
epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
"""
for name in self.model_names:
if isinstance(name, str):
load_filename = '%s_net_%s.pth' % (epoch, name)
load_path = os.path.join(self.save_dir, load_filename)
net = getattr(self, 'net' + name)
if isinstance(net, torch.nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
# if you are using PyTorch newer than 0.4 (e.g., built from
# GitHub source), you can remove str() on self.device
state_dict = torch.load(load_path, map_location=str(self.device))
if hasattr(state_dict, '_metadata'):
del state_dict._metadata
# patch InstanceNorm checkpoints prior to 0.4
for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
net.load_state_dict(state_dict)
load_filename = '%s_aux.pth' % (epoch)
load_path = os.path.join(self.save_dir, load_filename)
if os.path.exists(load_path):
return torch.load(load_path)
else:
return None
def print_networks(self, verbose):
"""Print the total number of parameters in the network and (if verbose) network architecture
Parameters:
verbose (bool) -- if verbose: print the network architecture
"""
print('---------- Networks initialized -------------')
for name in self.model_names:
if isinstance(name, str):
net = getattr(self, 'net' + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
if verbose:
print(net)
print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
print('-----------------------------------------------')
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def get_optimizer_states(self):
return [o.state_dict() for o in self.optimizers]
def set_optimizer_states(self,states):
for i,s in enumerate(states):
self.optimizers[i].load_state_dict(s)
def get_scheduler_states(self):
return [o.state_dict() for o in self.schedulers]
def set_scheduler_states(self,states):
for i,s in enumerate(states):
self.schedulers[i].load_state_dict(s)
| [
"torch.device",
"torch.save",
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
] | 0.4.0 | herobd/GAN_aug | b240da32d4f3ae9a00a9d395ac8f29728623f6b4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.