version
stringclasses 21
values | code
stringlengths 225
174k
| apis
sequence | full_version
stringlengths 1
6
| repo_name
stringlengths 10
107
| hexsha
stringlengths 40
40
|
---|---|---|---|---|---|
1.8 | # coding=utf-8
#
# Copyright 2020 Heinrich Heine University Duesseldorf
#
# Part of this code is based on the source code of BERT-DST
# (arXiv:1907.03040)
# Part of this code is based on the source code of Transformers
# (arXiv:1910.03771)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import sys
from typing import Dict, Union
import hydra
import numpy as np
import torch
import transformers
from fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap.auto_wrap import auto_wrap
from fairscale.optim.grad_scaler import ShardedGradScaler
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer
logger: logging.Logger
# transformers.logging.set_verbosity_error()
def save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
# Save model checkpoint.
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
# Save tokenizer and training args.
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):
if cfg.fp16:
with torch.cuda.amp.autocast():
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in transformers (see doc)
else:
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in pytorch-transformers (see doc)
if cfg.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
return loss.item()
def train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):
""" Train the model """
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
if "extended_vocab" in cfg and cfg.extended_vocab:
logger.info(f"Extended extra vocab size: {cfg.extended_vocab}")
model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
optimizer = scheduler = None
# Prepare optimizer and schedule (linear warmup and decay)
if cfg.local_rank == -1:
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
if cfg.local_rank != -1:
scaler = ShardedGradScaler()
else:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler()
else:
scaler = None
# multi-gpu training (should be after apex fp16 initialization)
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
# Distributed training (should be after apex fp16 initialization)
if cfg.local_rank != -1:
model = auto_wrap(model)
model = FullyShardedDDP(model,
mixed_precision=cfg.fp16,
flatten_parameters=getattr(cfg, "flatten_parameters", True),
reshard_after_forward=cfg.reshard_after_forward,
move_grads_to_cpu=cfg.move_grads_to_cpu,
move_params_to_cpu=cfg.move_params_to_cpu)
if not cfg.move_params_to_cpu:
model = model.to(cfg.device)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
logger.info(optimizer)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
# If training is continued from a checkpoint, fast forward
# to the state of that checkpoint.
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
loss = forward_step(model, batch, cfg, scaler)
else:
loss = forward_step(model, batch, cfg, scaler)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.fp16:
scaler.unscale_(optimizer)
if cfg.max_grad_norm:
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
if cfg.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad(set_to_none=True)
global_step += 1
# Log metrics
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
# Evaluation
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
state_dict = model.state_dict()
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
flag = note_best_checkpoint(cfg, results, sub_path)
if cfg.save_best and flag:
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)
else:
model.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
logger.info("Saving best model checkpoint to %s", cfg.output_dir)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
# Eval!
torch.cuda.empty_cache()
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
# Seems FSDP does not need to unwrap the model for evaluating.
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", dynamic_ncols=True):
batch = batch_to_device(batch, cfg.device)
with torch.cuda.amp.autocast():
with torch.no_grad():
outputs = model(**batch)
probs = outputs["logits"].softmax(dim=-1).detach().float().cpu()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train"):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
dataset = TensorDataset(*tensors)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
# Set seed
set_seed(cfg)
# Load pre-trained model and tokenizer
if cfg.local_rank not in [-1, 0]:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.
model.to(cfg.device)
# logger.info("Training/evaluation parameters %s", OmegaConf.to_yaml(cfg))
if cfg.local_rank in [-1, 0] and cfg.do_train:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
# Training
if cfg.do_train:
# TODO: Add option for continuously training from checkpoint.
# The operation should be introduced in ``train`` method since both the state dict
# of schedule and optimizer (and scaler, if any) should be loaded.
# If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)
continue_from_global_step = 0 # If set to 0, start training from the beginning
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))
# if len(checkpoints) > 0:
# checkpoint = checkpoints[-1]
# logger.info("Resuming training from the latest checkpoint: %s", checkpoint)
# continue_from_global_step = int(checkpoint.split('-')[-1])
# model = model_class.from_pretrained(checkpoint)
# model.to(args.device)
train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Test
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
| [
"torch.cuda.amp.grad_scaler.GradScaler",
"torch.distributed.get_world_size",
"torch.utils.data.RandomSampler",
"torch.cuda.amp.autocast",
"torch.cuda.is_available",
"torch.load",
"torch.nn.DataParallel",
"torch.distributed.init_process_group",
"torch.utils.data.DataLoader",
"torch.utils.tensorboard.SummaryWriter",
"torch.device",
"torch.utils.data.SequentialSampler",
"torch.cuda.device_count",
"torch.cuda.set_device",
"torch.cuda.empty_cache",
"torch.distributed.barrier",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"torch.utils.data.distributed.DistributedSampler"
] | 1.8.1 | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 |
1.8 | from datetime import datetime
import os
import pickle
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from mcmc_unlearner import sgmcmcUnlearner
import utils
import models
class myUnlearner(sgmcmcUnlearner):
def _apply_sample(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
self.optimizer.zero_grad()
lo.backward()
self.optimizer.step()
def _fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
def _z_fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return F.cross_entropy(self.model(x), y, reduction='sum')
def get_args():
parser = argparse.ArgumentParser()
utils.add_shared_args(parser)
parser.add_argument('--rm-idx-path', type=str, default=None)
parser.add_argument('--save-freq', type=int, default=-1)
return parser.parse_args()
def get_forget_idx(dataset, kill_num):
kill_val = 0
if 'targets' in vars(dataset).keys():
labels = np.array(dataset.targets)
elif 'labels' in vars(dataset).keys():
labels = np.array(dataset.labels)
else:
raise NotImplementedError
randidx = np.random.permutation( np.where(labels==kill_val)[0] )
return randidx[:kill_num]
def evaluate(model, loader, cpu):
''' average log predictive probability '''
loss = utils.AverageMeter()
acc = utils.AverageMeter()
n = len(loader.sampler.indices)
model.eval()
for x, y in loader:
if not cpu: x, y = x.cuda(), y.cuda()
with torch.no_grad():
_y = model(x)
lo = - model.log_prior() + F.cross_entropy(_y,y) * n
lo = lo.item()
ac = (_y.argmax(dim=1) == y).sum().item() / len(y)
loss.update(lo, len(y))
acc.update(ac, len(y))
return loss.average(), acc.average()
def forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):
remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)
forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)
test_loss, test_acc = evaluate(model, test_loader, args.cpu)
utils.add_log(log, 'remain_train_loss', remain_train_loss)
utils.add_log(log, 'remain_train_acc', remain_train_acc)
utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)
utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)
utils.add_log(log, 'test_loss', test_loss)
utils.add_log(log, 'test_acc', test_acc)
logger.info('remaining train loss {:.2e} \t train acc {:.2%}'
.format(remain_train_loss, remain_train_acc))
logger.info('forgetted train loss {:.2e} \t train acc {:.2%}'
.format(forgetted_train_loss, forgetted_train_acc))
logger.info('test loss {:.2e} \t test acc {:.2%}'
.format(test_loss, test_acc))
logger.info('')
def save_checkpoint(save_dir, save_name, log, model, optimizer):
with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:
pickle.dump(log, f)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, '{}/{}-model.pkl'.format(save_dir, save_name))
def main(args, logger):
''' retrieve lots of data '''
trainset, testset = utils.get_dataset(args.dataset)
if args.rm_idx_path is not None:
with open(args.rm_idx_path, 'rb') as f:
forgetted_idx = pickle.load(f)
else:
forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)
forgetted_idx_loader = utils.IndexBatchSampler(
batch_size=args.ifs_rm_bs, indices=forgetted_idx)
train_sampler = utils.DataSampler(trainset, args.batch_size)
train_loader = utils.DataLoader(trainset, args.batch_size)
train_loader.remove(forgetted_idx)
forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)
forgetted_train_loader.set_sampler_indices(forgetted_idx)
test_loader = utils.DataLoader(testset, args.batch_size)
''' end of retrieving data '''
model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)
if not args.cpu:
model.cuda()
args.lr /= len(trainset)
optimizer = utils.get_optim(model.parameters(), args.optim,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)
model.n = len(train_sampler)
''' restore model / sampler '''
state_dict = torch.load(args.resume_path)
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
''' for backward compatibility '''
for group in optimizer.param_groups:
if 'lr_decay' in group:
group['lr'] *= group['lr_decay']
group.pop('lr_decay')
del state_dict
unlearner = myUnlearner(
model = model,
optimizer = optimizer,
params = model.parameters(),
cpu = args.cpu,
iter_T = args.ifs_iter_T,
scaling = args.ifs_scaling,
samp_T = args.ifs_samp_T,)
log = dict()
log['user_time'] = 0
utils.add_log(log, 'forgetted_idx', forgetted_idx)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
removed_nums = 0
freq_counter = 0
for ii in forgetted_idx_loader:
''' create forget-batch '''
xx, yy = [], []
for i in ii:
x, y = trainset[i]
if len(x.shape) == 3: x = x.reshape(1, *x.shape)
xx.append(x)
yy.append(y)
xx, yy = torch.cat(xx), torch.tensor(yy)
''' end '''
scaling = args.ifs_scaling / len(train_sampler)
unlearner.param_dict['scaling'] = scaling
''' start calculation of time '''
start_time = datetime.now()
unlearner.remove([xx,yy], train_sampler)
torch.cuda.synchronize()
end_time = datetime.now()
user_time = (end_time - start_time).total_seconds()
''' end calculation of time '''
log['user_time'] += user_time
train_sampler.remove(ii)
''' after removal, update the number of remaining datums '''
unlearner.model.n = len(train_sampler)
removed_nums += len(ii)
freq_counter += len(ii)
''' update mcmc sampler '''
for group in unlearner.optimizer.param_groups:
group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)
logger.info('remaining trainset size {}'.format(len(train_sampler)))
logger.info('user time {:.3f} sec \t'
'cumulated user time {:.3f} mins'
.format(user_time, log['user_time']/60) )
if (args.save_freq > 0) and (freq_counter >= args.save_freq):
freq_counter = 0
save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)
return
if __name__ == '__main__':
args = get_args()
logger = utils.generic_init(args)
try:
main(args, logger)
except Exception as e:
logger.exception('Unexpected exception! %s', e)
| [
"torch.cat",
"torch.cuda.synchronize",
"torch.no_grad",
"torch.nn.functional.cross_entropy",
"torch.tensor",
"torch.load"
] | 1.8.1 | fshp971/mcmc-unlearning | 3113dedca6de33bcaf316b804cb9c1e636db7fd5 |
1.9 | import time
import hashlib
import torch
from torch_geometric.data import DataLoader
from cgl.utils.params import ParamDict
from cgl.data.graph_data import CircuitInMemDataset, CircuitGraphDataset
# from cgl.models.gnn import DeepGENNet
s = time.time()
print('Loading the dataset ...')
root = '/store/nosnap/results/ngspice_biased_pmos_gain/two_stage_biased_pmos'
cir_dset = CircuitGraphDataset(root=root, mode='train', circuit_type='opamp_biased_pmos')
node_output_idx = next(iter(cir_dset.graph_nodes.values()))['V_net6']
vout_idx = torch.where((torch.where(cir_dset[0].output_node_mask)[0] == node_output_idx))[0].item()
# gain mean and variance
gmean, gstd = -1.1057, 0.6559
def transform_fn(data):
data.gain = (data.vac_mag[vout_idx, 0].float() - gmean) / gstd
return data
dset = CircuitInMemDataset(root=root, mode='train', transform=transform_fn)
print(f'Dataset was loaded in {time.time() - s:.6f} seconds.')
sample_data = dset[0]
fract = 0.05
splits = dset.splits
train_idx = int(fract * len(splits['train']))
train_dset = dset[splits['train'][:train_idx]]
valid_dset = dset[splits['valid']]
test_dset = dset[splits['test']]
backbone_config = 'configs/opamp/dc/deep_gen_net/15-layer/config.py'
bb_id = hashlib.sha256(backbone_config.encode('utf-8')).hexdigest()[:6]
lr = 1e-3
activation = 'relu'
hidden_channels = 128
num_layers = 15
train_batch_size = min(256, len(train_dset))
valid_batch_size = min(256, len(valid_dset))
test_batch_size = min(256, len(test_dset))
exp_name = f'GAIN_PMOS_FT_Pool_{fract*10:.1f}_DeepGEN_h{hidden_channels}_nl{num_layers}_bs{train_batch_size}_lr{lr:.0e}_{activation}'
mdl_config = ParamDict(
exp_name=exp_name,
num_nodes=sample_data.vdc.shape[0],
in_channels=sample_data.x.shape[-1] + sample_data.type_tens.shape[-1],
hidden_channels=hidden_channels,
num_layers=num_layers,
dropout=0,
activation=activation,
bins=50,
lr=lr,
freeze_backbone=False,
use_pooling=True,
output_label='gain',
output_sigmoid=False,
lr_warmup={'peak_lr': lr, 'weight_decay': 0,
'warmup_updates': 50, 'tot_updates': 20000, 'end_lr': 5e-5},
)
train_dloader = DataLoader(train_dset, batch_size=train_batch_size, shuffle=True, num_workers=0)
valid_dloader = DataLoader(valid_dset, batch_size=valid_batch_size, num_workers=0)
test_dloader = DataLoader(test_dset, batch_size=test_batch_size, num_workers=0)
# .to converts the weight dtype to match input
# model = DeepGENNet(mdl_config).to(sample_data.x.dtype)
| [
"torch.where"
] | 1.9.0 | kouroshHakha/circuit-fewshot-code | 32007e119da30632736868a3f643027624bf08d2 |
1.0 | # File: rigidbody.py
import abc
import torch
from mechamodlearn import nn, utils
from mechamodlearn.models import CholeskyMMNet, PotentialNet, GeneralizedForceNet
class AbstractRigidBody:
@property
@abc.abstractmethod
def thetamask(self):
"""Returns theta mask for configuration q.
These should use utils.diffangles to compute differences
"""
@abc.abstractmethod
def mass_matrix(self, q):
"""Return mass matrix for configuration q"""
@abc.abstractmethod
def potential(self, q):
"""Return potential for configuration q"""
@abc.abstractmethod
def generalized_force(self, q, v, u):
"""Return generalized force for configuration q, velocity v, external torque u"""
def kinetic_energy(self, q, v):
mass_matrix = self.mass_matrix(q)
# TODO(jkg): Check if this works correctly for batched
kenergy = 0.5 * (v.unsqueeze(1) @ (mass_matrix @ v.unsqueeze(2))).squeeze(2)
return kenergy
def lagrangian(self, q, v):
""" Returns the Lagrangian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
lag = kenergy - pot
return lag
def hamiltonian(self, q, v):
""" Returns the Hamiltonian of a mechanical system
"""
kenergy = self.kinetic_energy(q, v)
pot = self.potential(q)
ham = kenergy + pot
return ham
def corriolisforce(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix times v
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
Mv = mass_matrix @ v.unsqueeze(2)
KE = 0.5 * v.unsqueeze(1) @ Mv
Cv_KE = torch.autograd.grad(KE.sum(), q, retain_graph=True, create_graph=True)[0]
gMv = torch.stack([
torch.autograd.grad(Mv[:, i].sum(), q, retain_graph=True, create_graph=True)[0]
for i in range(q.size(1))
], dim=1)
Cv = gMv @ v.unsqueeze(2) - Cv_KE.unsqueeze(2)
return Cv
def corriolis(self, q, v, mass_matrix=None):
""" Computes the corriolis matrix
"""
with torch.enable_grad():
if mass_matrix is None:
mass_matrix = self.mass_matrix(q)
qdim = q.size(1)
B = mass_matrix.size(0)
mass_matrix = mass_matrix.reshape(-1, qdim, qdim)
# TODO vectorize
rows = []
for i in range(qdim):
cols = []
for j in range(qdim):
qgrad = torch.autograd.grad(
torch.sum(mass_matrix[:, i, j]), q, retain_graph=True, create_graph=True)[0]
cols.append(qgrad)
rows.append(torch.stack(cols, dim=1))
dMijk = torch.stack(rows, dim=1)
corriolis = 0.5 * ((dMijk + dMijk.transpose(2, 3) - dMijk.transpose(1, 3)
) @ v.reshape(B, 1, qdim, 1)).squeeze(3)
return corriolis
def gradpotential(self, q):
""" Returns the conservative forces acting on the system
"""
with torch.enable_grad():
pot = self.potential(q)
gvec = torch.autograd.grad(torch.sum(pot), q, retain_graph=True, create_graph=True)[0]
return gvec
def solve_euler_lagrange(self, q, v, u=None):
""" Computes `qddot` (generalized acceleration) by solving
the Euler-Lagrange equation (Eq 7 in the paper)
\qddot = M^-1 (F - Cv - G)
"""
with torch.enable_grad():
with utils.temp_require_grad((q, v)):
M = self.mass_matrix(q)
Cv = self.corriolisforce(q, v, M)
G = self.gradpotential(q)
F = torch.zeros_like(Cv)
if u is not None:
F = self.generalized_force(q, v, u)
# Solve M \qddot = F - Cv - G
qddot = torch.gesv(F - Cv - G.unsqueeze(2), M)[0].squeeze(2)
return qddot
class LearnedRigidBody(AbstractRigidBody, torch.nn.Module):
def __init__(self, qdim: int, udim: int, thetamask: torch.tensor, mass_matrix=None,
potential=None, generalized_force=None, hidden_sizes=None):
"""
Arguments:
- `qdim`:
- `udim`: [int]
- `thetamask`: [torch.Tensor (1, qdim)] 1 if angle, 0 otherwise
- `mass_matrix`: [torch.nn.Module]
- `potential`: [torch.nn.Module]
- `generalized_force`: [torch.nn.Module]
- hidden_sizes: [list]
"""
self._qdim = qdim
self._udim = udim
self._thetamask = thetamask
super().__init__()
if mass_matrix is None:
mass_matrix = CholeskyMMNet(qdim, hidden_sizes=hidden_sizes)
self._mass_matrix = mass_matrix
if potential is None:
potential = PotentialNet(qdim, hidden_sizes=hidden_sizes)
self._potential = potential
if generalized_force is None:
generalized_force = GeneralizedForceNet(qdim, udim, hidden_sizes)
self._generalized_force = generalized_force
def mass_matrix(self, q):
return self._mass_matrix(q)
def potential(self, q):
return self._potential(q)
def generalized_force(self, q, v, u):
return self._generalized_force(q, v, u)
@property
def thetamask(self):
return self._thetamask
def forward(self, q, v, u=None):
return self.solve_euler_lagrange(q, v, u)
| [
"torch.zeros_like",
"torch.enable_grad",
"torch.stack",
"torch.sum"
] | 1.0 | sisl/mechamodlearn | ed514b5d1193ce546b0221ba9222b0228d6c319a |
1.6 | """
Experiment config to evaluate a PointNav RGB policy
trained with Nav. Loss + Rotation Prediction
Supports "Clean" and the following visual corruptions
- Defocus Blur
- Motion Blur
- Spatter
- Low Lighting
- Speckle
"""
# Required imports
import glob
import os
from abc import ABC
from math import ceil
from typing import Dict, Any, List, Optional, Sequence, Union
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torchvision import models
from allenact.base_abstractions.experiment_config import ExperimentConfig
from allenact.base_abstractions.preprocessor import Preprocessor
from allenact.base_abstractions.sensor import Sensor, RotationSensor
from allenact.base_abstractions.experiment_config import MachineParams
from allenact.base_abstractions.preprocessor import SensorPreprocessorGraph
from allenact.base_abstractions.sensor import SensorSuite, ExpertActionSensor
from allenact.base_abstractions.task import TaskSampler
from allenact.utils.experiment_utils import evenly_distribute_count_into_bins
from allenact.utils.system import get_logger
from allenact.utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
)
from allenact_plugins.ithor_plugin.ithor_sensors import RGBSensorThor
from allenact_plugins.robothor_plugin.robothor_sensors import DepthSensorThor
from allenact_plugins.robothor_plugin.robothor_sensors import GPSCompassSensorRoboThor
from allenact_plugins.ithor_plugin.ithor_util import horizontal_to_vertical_fov
from allenact_plugins.robothor_plugin.robothor_task_samplers import (
PointNavDatasetTaskSampler,
)
from allenact_plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from allenact_plugins.robothor_plugin.robothor_tasks import PointNavTask
from allenact.embodiedai.preprocessors.resnet import ResNetPreprocessor
from allenact.algorithms.onpolicy_sync.losses import PPO, RotationPred
from allenact.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from allenact.algorithms.onpolicy_sync.losses.rotation_pred import RotPredConfig
from projects.pointnav_baselines.models.point_nav_models import (
PointNavActorCriticSimpleConvRNN,
ResnetTensorPointNavActorCritic,
ResnetTensorAuxPointNavActorCritic,
)
from allenact.base_abstractions.sensor import DepthSensor, RGBSensor
class PointNavS2SRGBResNetDDPPO(ExperimentConfig, ABC):
"""A PointNav Experiment Config using RGB sensors and DDPPO"""
def __init__(self):
super().__init__()
# Task Parameters
self.ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
self.STEP_SIZE = 0.25
self.ROTATION_DEGREES = 30.0
self.DISTANCE_TO_GOAL = 0.2
self.STOCHASTIC = True
self.HORIZONTAL_FIELD_OF_VIEW = 79
self.CAMERA_WIDTH = 400
self.CAMERA_HEIGHT = 300
self.SCREEN_SIZE = 224
self.MAX_STEPS = 300
# Random crop specifications for data augmentations
self.CROP_WIDTH = 320
self.CROP_HEIGHT = 240
self.REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"reached_max_steps_reward": 0.0,
"shaping_weight": 1.0,
}
self.NUM_PROCESSES = 60
self.TRAIN_GPU_IDS = list(range(torch.cuda.device_count()))
self.VALID_GPU_IDS = [torch.cuda.device_count() - 1]
self.TEST_GPU_IDS = [torch.cuda.device_count() - 1]
self.PREPROCESSORS = [
Builder(
ResNetPreprocessor,
{
"input_height": self.SCREEN_SIZE,
"input_width": self.SCREEN_SIZE,
"output_width": 7,
"output_height": 7,
"output_dims": 512,
"pool": False,
"torchvision_resnet_model": models.resnet18,
"input_uuids": ["rgb_lowres"],
"output_uuid": "rgb_resnet",
},
),
]
OBSERVATIONS = [
"rgb_resnet",
"target_coordinates_ind",
"rot_label",
]
self.ENV_ARGS = dict(
width=self.CAMERA_WIDTH,
height=self.CAMERA_HEIGHT,
continuousMode=True,
applyActionNoise=self.STOCHASTIC,
agentType="stochastic",
rotateStepDegrees=self.ROTATION_DEGREES,
gridSize=self.STEP_SIZE,
snapToGrid=False,
agentMode="locobot",
fieldOfView=horizontal_to_vertical_fov(
horizontal_fov_in_degrees=self.HORIZONTAL_FIELD_OF_VIEW,
width=self.CAMERA_WIDTH,
height=self.CAMERA_HEIGHT,
),
include_private_scenes=False,
renderDepthImage=False,
)
@classmethod
def tag(cls):
return "Pointnav-RoboTHOR-Vanilla-RGB-ResNet-Rot-Pred-DDPPO"
def monkey_patch_datasets(self, train_dataset, val_dataset, test_dataset):
if train_dataset is not None:
self.TRAIN_DATASET_DIR = os.path.join(os.getcwd(), train_dataset)
else:
self.TRAIN_DATASET_DIR = os.path.join(
os.getcwd(), "datasets/robothor-pointnav/train"
)
if val_dataset is not None:
self.VAL_DATASET_DIR = os.path.join(os.getcwd(), val_dataset)
else:
self.VAL_DATASET_DIR = os.path.join(
os.getcwd(), "datasets/robothor-pointnav/robustnav_eval"
)
if test_dataset is not None:
self.TEST_DATASET_DIR = os.path.join(os.getcwd(), test_dataset)
else:
self.TEST_DATASET_DIR = os.path.join(
os.getcwd(), "datasets/robothor-pointnav/robustnav_eval"
)
def monkey_patch_sensor(
self,
corruptions=None,
severities=None,
random_crop=False,
color_jitter=False,
random_shift=False,
):
self.SENSORS = [
RGBSensorThor(
height=self.SCREEN_SIZE,
width=self.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="rgb_lowres",
corruptions=corruptions,
severities=severities,
random_crop=random_crop,
random_translate=random_shift,
crop_height=self.CROP_HEIGHT,
crop_width=self.CROP_WIDTH,
color_jitter=color_jitter,
# rotate=True,
),
GPSCompassSensorRoboThor(),
RotationSensor(uuid="rot_label"),
]
# DD-PPO Base
def training_pipeline(self, **kwargs):
ppo_steps = int(75000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 4
num_steps = 128
save_interval = 5000000
log_interval = 10000 if torch.cuda.is_available() else 1
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={
"ppo_loss": PPO(**PPOConfig),
"rotation_pred_loss": RotationPred(**RotPredConfig),
},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(
loss_names=["ppo_loss", "rotation_pred_loss"],
max_stage_steps=ppo_steps,
loss_weights=[1.0, 0.01],
)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
# Model base requirements
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
rgb_uuid = "rgb_resnet"
goal_sensor_uuid = "target_coordinates_ind"
return ResnetTensorAuxPointNavActorCritic(
action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid=goal_sensor_uuid,
rgb_resnet_preprocessor_uuid=rgb_uuid,
hidden_size=512,
goal_dims=32,
aux_mode=True,
rot_mode=True,
)
def machine_params(self, mode="train", **kwargs):
sampler_devices: Sequence[int] = []
if mode == "train":
workers_per_device = 1
gpu_ids = (
[]
if not torch.cuda.is_available()
else self.TRAIN_GPU_IDS * workers_per_device
)
nprocesses = (
1
if not torch.cuda.is_available()
else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids))
)
sampler_devices = self.TRAIN_GPU_IDS
elif mode == "valid":
nprocesses = 1 if torch.cuda.is_available() else 0
gpu_ids = [] if not torch.cuda.is_available() else self.VALID_GPU_IDS
elif mode == "test":
nprocesses = 15
gpu_ids = [] if not torch.cuda.is_available() else self.TEST_GPU_IDS
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces,
preprocessors=self.PREPROCESSORS,
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=gpu_ids,
sampler_devices=sampler_devices
if mode == "train"
else gpu_ids, # ignored with > 1 gpu_ids
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return PointNavDatasetTaskSampler(**kwargs)
@staticmethod
def _partition_inds(n: int, num_parts: int):
return np.round(np.linspace(0, n, num_parts + 1, endpoint=True)).astype(
np.int32
)
def _get_sampler_args_for_scene_split(
self,
scenes_dir: str,
process_ind: int,
total_processes: int,
devices: Optional[List[int]],
seeds: Optional[List[int]],
deterministic_cudnn: bool,
include_expert_sensor: bool = True,
) -> Dict[str, Any]:
path = os.path.join(scenes_dir, "*.json.gz")
scenes = [scene.split("/")[-1].split(".")[0] for scene in glob.glob(path)]
if len(scenes) == 0:
raise RuntimeError(
(
"Could find no scene dataset information in directory {}."
" Are you sure you've downloaded them? "
" If not, see https://allenact.org/installation/download-datasets/ information"
" on how this can be done."
).format(scenes_dir)
)
oversample_warning = (
f"Warning: oversampling some of the scenes ({scenes}) to feed all processes ({total_processes})."
" You can avoid this by setting a number of workers divisible by the number of scenes"
)
if total_processes > len(scenes): # oversample some scenes -> bias
if total_processes % len(scenes) != 0:
get_logger().warning(oversample_warning)
scenes = scenes * int(ceil(total_processes / len(scenes)))
scenes = scenes[: total_processes * (len(scenes) // total_processes)]
elif len(scenes) % total_processes != 0:
get_logger().warning(oversample_warning)
inds = self._partition_inds(len(scenes), total_processes)
return {
"scenes": scenes[inds[process_ind] : inds[process_ind + 1]],
"max_steps": self.MAX_STEPS,
"sensors": [
s
for s in self.SENSORS
if (include_expert_sensor or not isinstance(s, ExpertActionSensor))
],
"action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())),
"seed": seeds[process_ind] if seeds is not None else None,
"deterministic_cudnn": deterministic_cudnn,
"rewards_config": self.REWARD_CONFIG,
"env_args": {
**self.ENV_ARGS,
"x_display": (
f"0.{devices[process_ind % len(devices)]}"
if devices is not None
and len(devices) > 0
and devices[process_ind % len(devices)] >= 0
else None
),
},
}
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
os.path.join(self.TRAIN_DATASET_DIR, "episodes"),
process_ind,
total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
)
res["scene_directory"] = self.TRAIN_DATASET_DIR
res["loop_dataset"] = True
res["allow_flipping"] = True
return res
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
os.path.join(self.VAL_DATASET_DIR, "episodes"),
process_ind,
total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=False,
)
res["scene_directory"] = self.VAL_DATASET_DIR
res["loop_dataset"] = False
return res
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
res = self._get_sampler_args_for_scene_split(
scenes_dir=os.path.join(self.TEST_DATASET_DIR, "episodes"),
process_ind=process_ind,
total_processes=total_processes,
devices=devices,
seeds=seeds,
deterministic_cudnn=deterministic_cudnn,
include_expert_sensor=False,
)
res["scene_directory"] = self.TEST_DATASET_DIR
res["loop_dataset"] = False
return res
| [
"torch.cuda.is_available",
"torch.cuda.device_count"
] | 1.6.0 | DexiongYung/robustnav_AE | f2b1b5bb8780e4e6ae5f81c127b7589cfc949801 |
1.4 | # Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
from collections import OrderedDict
from typing import Union
import numpy as np
import torch
import torch.distributions.constraints as constraints
import pyro
import pyro.distributions as dist
from netcal.scaling import AbstractLogisticRegression
class LogisticCalibration(AbstractLogisticRegression):
"""
On classification, apply the logistic calibration method aka Platt scaling to obtain a
calibration mapping. This method is originally proposed by [1]_.
For the multiclass case, we use the Vector scaling proposed in [2]_.
On detection mode, this calibration method uses multiple independent normal distributions to obtain a
calibration mapping by means of the confidence as well as additional features [3]_. This calibration scheme
assumes independence between all variables.
On detection, it is necessary to provide all data in input parameter ``X`` as an NumPy array
of shape ``(n_samples, n_features)``,
whereas the confidence must be the first feature given in the input array. The ground-truth samples ``y``
must be an array of shape ``(n_samples,)`` consisting of binary labels :math:`y \\in \\{0, 1\\}`. Those
labels indicate if the according sample has matched a ground truth box :math:`\\text{m}=1` or is a false
prediction :math:`\\text{m}=0`.
**Mathematical background:** For confidence calibration in classification tasks, a
confidence mapping :math:`g` is applied on top of a miscalibrated scoring classifier :math:`\\hat{p} = h(x)` to
deliver a calibrated confidence score :math:`\\hat{q} = g(h(x))`.
For detection calibration, we can also use the additional box regression output which we denote as
:math:`\\hat{r} \\in [0, 1]^J` with :math:`J` as the number of dimensions used for the box encoding (e.g.
:math:`J=4` for x position, y position, width and height).
Therefore, the calibration map is not only a function of the confidence score, but also of :math:`\\hat{r}`.
To define a general calibration map for binary problems, we use the logistic function and the combined
input :math:`s = (\\hat{p}, \\hat{r})` of size K by
.. math::
g(s) = \\frac{1}{1 + \\exp(-z(s))} ,
According to [1]_, we can interpret the logit :math:`z` as the logarithm of the posterior odds
.. math::
z(s) = \\log \\frac{f(\\text{m}=1 | s)}{f(\\text{m}=0 | s)} \\approx
\\log \\frac{f(s | \\text{m}=1)}{f(s | \\text{m}=1)} = \\ell r(s)
If we assume independence of all variables given in :math:`s`, we can use multiple univariate probability
density distributions with the same variance to obtain a calibration mapping. Using this formulation, we can
simply extend the scaling factor (from classification logistic calibration) to a scaling
vector :math:`w \\in \\mathbb{R}^K`.
However, instead of using the uncalibrated confidence estimate :math:`\\hat{p}`, we use the logit of the
network as part of :math:`s` to be conform with the original formulation in [1]_ and [2]_. Thus,
the log-likelihood ratio can be expressed as
.. math::
\\ell r(s) = s^T w + c,
with bias :math:`c \\in \\mathbb{R}`.
We utilize standard optimization methods to determine the calibration mapping :math:`g(s)`.
Parameters
----------
temperature_only : bool, default: False
If True, use Temperature Scaling instead of Platt/Vector Scaling.
method : str, default: "mle"
Method that is used to obtain a calibration mapping:
- 'mle': Maximum likelihood estimate without uncertainty using a convex optimizer.
- 'momentum': MLE estimate using Momentum optimizer for non-convex optimization.
- 'variational': Variational Inference with uncertainty.
- 'mcmc': Markov-Chain Monte-Carlo sampling with uncertainty.
momentum_epochs : int, optional, default: 1000
Number of epochs used by momentum optimizer.
mcmc_steps : int, optional, default: 20
Number of weight samples obtained by MCMC sampling.
mcmc_chains : int, optional, default: 1
Number of Markov-chains used in parallel for MCMC sampling (this will result
in mcmc_steps * mcmc_chains samples).
mcmc_warmup_steps : int, optional, default: 100
Warmup steps used for MCMC sampling.
vi_epochs : int, optional, default: 1000
Number of epochs used for ELBO optimization.
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
use_cuda : str or bool, optional, default: False
Specify if CUDA should be used. If str, you can also specify the device
number like 'cuda:0', etc.
References
----------
.. [1] Platt, John:
"Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods."
Advances in large margin classifiers 10.3: 61-74, 1999
`Get source online <https://www.researchgate.net/profile/John_Platt/publication/2594015_Probabilistic_Outputs_for_Support_Vector_Machines_and_Comparisons_to_Regularized_Likelihood_Methods/links/004635154cff5262d6000000.pdf>`_
.. [2] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:
"On Calibration of Modern Neural Networks."
Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.
`Get source online <https://arxiv.org/abs/1706.04599>`_
.. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:
"Multivariate Confidence Calibration for Object Detection."
The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops.
.. [4] Fabian Küppers, Jan Kronenberger, Jonas Schneider and Anselm Haselhoff:
"Bayesian Confidence Calibration for Epistemic Uncertainty Modelling."
2021 IEEE Intelligent Vehicles Symposium (IV), 2021
"""
def __init__(self, *args, temperature_only: bool = False, **kwargs):
""" Create an instance of `LogisticCalibration`. Detailed parameter description given in class docs. """
super().__init__(*args, **kwargs)
self.temperature_only = temperature_only
# -------------------------------------------------
@property
def intercept(self) -> Union[np.ndarray, float]:
""" Getter for intercept of logistic calibration. """
if self._sites is None:
raise ValueError("Intercept is None. You have to call the method 'fit' first.")
if self.temperature_only:
raise ValueError("There is no intercept for temperature scaling.")
return self._sites['bias']['values']
@property
def weights(self) -> Union[np.ndarray, float]:
""" Getter for weights of logistic calibration. """
if self._sites is None:
raise ValueError("Weights is None. You have to call the method 'fit' first.")
return self._sites['weights']['values']
# -------------------------------------------------
def prepare(self, X: np.ndarray) -> torch.Tensor:
"""
Preprocessing of input data before called at the beginning of the fit-function.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
On detection, this array must have 2 dimensions with number of additional box features in last dim.
Returns
-------
torch.Tensor
Prepared data vector X as torch tensor.
"""
if len(X.shape) == 1:
X = np.reshape(X, (-1, 1))
# on detection mode, convert confidence to sigmoid and append the remaining features
if self.detection:
data_input = np.concatenate((self._inverse_sigmoid(X[:, 0]).reshape(-1, 1), X[:, 1:]), axis=1)
# on binary classification, simply convert the confidences to logits
elif self._is_binary_classification():
data_input = self._inverse_sigmoid(X)
# on multiclass classification, use inverse softmax instead
else:
data_input = self._inverse_softmax(X)
return torch.Tensor(data_input)
def prior(self):
"""
Prior definition of the weights used for log regression. This function has to set the
variables 'self.weight_prior_dist', 'self.weight_mean_init' and 'self.weight_stddev_init'.
"""
self._sites = OrderedDict()
# on temperature scaling, we only have one single weight for all classes
if self.temperature_only:
self._sites['weights'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.ones(1),
'scale': torch.ones(1)
},
'prior': dist.Normal(torch.ones(1), 10 * torch.ones(1), validate_args=True)
}
else:
# on detection mode or binary classification, we have a weight for each given feature (one for binary
# classification) and bias
if self.detection or self._is_binary_classification():
num_bias = 1
num_weights = self.num_features
# on multiclass classification, we have one weight and one bias for each class separately
else:
num_bias = self.num_classes
num_weights = self.num_classes
# set properties for "weights"
self._sites['weights'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.ones(num_weights),
'scale': torch.ones(num_weights)
},
'prior': dist.Normal(torch.ones(num_weights), 10 * torch.ones(num_weights), validate_args=True),
}
# set properties for "bias"
self._sites['bias'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.zeros(num_bias),
'scale': torch.ones(num_bias)
},
'prior': dist.Normal(torch.zeros(num_bias), 10 * torch.ones(num_bias), validate_args=True),
}
def model(self, X: torch.Tensor = None, y: torch.Tensor = None) -> torch.Tensor:
"""
Definition of the log regression model.
Parameters
----------
X : torch.Tensor, shape=(n_samples, n_log_regression_features)
Input data that has been prepared by "self.prepare" function call.
y : torch.Tensor, shape=(n_samples, [n_classes])
Torch tensor with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D) (for multiclass MLE only).
Returns
-------
torch.Tensor, shape=(n_samples, [n_classes])
Logit of the log regression model.
"""
# sample from prior - on MLE, this weight will be set as conditional
weights = pyro.sample("weights", self._sites["weights"]["prior"])
if self.temperature_only:
bias = 0.
else:
bias = pyro.sample("bias", self._sites["bias"]["prior"])
# on detection or binary classification, use dot product to sum up all given features to one logit
if self.detection or self._is_binary_classification():
# we need squeeze to remove last (unnecessary) dim to avoid site-effects
# temperature scaling: sinlge scalar
if self.temperature_only:
def logit_op(x, w, b): return torch.squeeze(torch.sum(torch.mul(x, w), dim=1))
# platt scaling: one weight for each feature given
else:
weights = torch.reshape(weights, (-1, 1))
def logit_op(x, w, b): return torch.squeeze(torch.matmul(x, w) + b)
# define as probabilistic output the sigmoid and a bernoulli distribution
prob_op = torch.sigmoid
dist_op = dist.Bernoulli
else:
# the op for calculating the logit is an element-wise multiplication
# for vector scaling and to keep multinomial output
def logit_op(x, w, b): return torch.mul(x, w) + b
# define as probabilistic output the softmax and a categorical distribution
def prob_op(logit): return torch.softmax(logit, dim=1)
dist_op = dist.Categorical
# the first dimension of the given input data is the "independent" sample dimension
with pyro.plate("data", X.shape[0]):
# calculate logit
logit = logit_op(X, weights, bias)
# if MLE, (slow) sampling is not necessary. However, this is needed for 'variational' and 'mcmc'
if self.method in ['variational', 'mcmc']:
probs = prob_op(logit)
pyro.sample("obs", dist_op(probs=probs, validate_args=True), obs=y)
return logit
| [
"torch.zeros",
"torch.mul",
"torch.softmax",
"torch.ones",
"torch.matmul",
"torch.Tensor",
"torch.reshape"
] | 1.4 | by-liu/calibration-framework | 7b306e4bbe6361d411b209759b7ba3d016bd0d17 |
1.8 | import torch
import torch.distributed as dist
from .parallel_mode import ParallelMode
from typing import Tuple
def _check_sanity():
from colossalai.core import global_context as gpc
if gpc.tensor_parallel_size > 1 or gpc.pipeline_parallel_size > 1:
raise NotImplementedError("Moe is not compatible with tensor or "
"pipeline parallel at present.")
class MoeParallelInfo:
"""Moe parallelism information, storing parallel sizes and groups.
"""
def __init__(self, ep_size: int, dp_size: int):
_check_sanity()
self.ep_size = ep_size
self.dp_size = dp_size
self.ep_group = None
# data parallel group for experts, since ep_group is different
# we may have different dp_group from get_group(ParallelMode.DATA)
self.dp_group = None
# Here we assume tensor parallel size = 1
# Otherwise, MoE can't be used
# Since TENSOR parallel group and DATA parallel group
# have been created, we can use them directly.
if ep_size == 1:
from colossalai.core import global_context as gpc
self.ep_group = gpc.get_group(ParallelMode.TENSOR)
self.dp_group = gpc.get_group(ParallelMode.DATA)
return
if dp_size == 1:
from colossalai.core import global_context as gpc
self.ep_group = gpc.get_group(ParallelMode.DATA)
self.dp_group = gpc.get_group(ParallelMode.TENSOR)
return
rank = dist.get_rank()
# Create expert parallel group
for i in range(dp_size):
ranks = [i * ep_size + j for j in range(ep_size)]
group = dist.new_group(ranks)
if rank in ranks:
self.ep_group = group
# Create data parallel group
for j in range(ep_size):
ranks = [i * ep_size + j for i in range(dp_size)]
group = dist.new_group(ranks)
if rank in ranks:
self.dp_group = group
class MoeContext:
"""MoE parallel context manager. This class manages different
parallel groups in MoE context and MoE loss in training.
"""
__instance = None
@staticmethod
def get_instance():
if MoeContext.__instance is None:
MoeContext.__instance = MoeContext()
return MoeContext.__instance
def __init__(self):
self.world_size = 1
# Users may want to set maximum expert parallel size smaller than the world size
# since very low bandwidth across nodes may constrain the performance of MoE
# When we have a maximum expert parallel size, we have a minimum data parallel size naturally
self.max_ep_size = 1
self.min_dp_size = 1
self.aux_loss = None
self.use_kernel_optim = True
self.has_setup = False
self._parallel_info_dict = dict()
@property
def parallel_info_dict(self):
return self._parallel_info_dict
@property
def is_initialized(self):
return self.has_setup
def setup(self, seed: int, use_kernel_optim: bool = True):
assert not self.is_initialized, "MoE distributed context shouldn't be set up again"
_check_sanity()
assert torch.cuda.is_available(), "MoE requires to enable CUDA first"
self.world_size = dist.get_world_size()
from colossalai.core import global_context as gpc
self.max_ep_size = gpc.config.get('max_ep_size', self.world_size)
assert self.world_size % self.max_ep_size == 0, \
"Maximum epxert parallel size must be a factor of the number of GPUs"
self.min_dp_size = self.world_size // self.max_ep_size
# Enabling kernel optimization may raise error in some cases
# Users can close kernel optimization manually
self.use_kernel_optim = use_kernel_optim
from .random import moe_set_seed
moe_set_seed(seed)
self.has_setup = True
def get_info(self, num_experts: int) -> Tuple[int, MoeParallelInfo]:
"""Calculate the Data Parallel Group and Expert Parallel Group.
Parameters
----------
num_experts : int
The number experts
Returns
-------
int, MoeParallelInfo
number of local experts, the MoeParallelInfo of the current ep_size
"""
gt_flag = num_experts % self.max_ep_size == 0 # check whether num_experts is greater
lt_flag = self.max_ep_size % num_experts == 0 # check whether num_experts is less
assert gt_flag or lt_flag, "Automatic experts placement dose not not support expert number"\
" is not a multiple of ep size or vice versa."
# If the number of experts is greater than maximum expert parallel size. a.k.a ep_size,
# there are multiple experts in each GPU and each GPU has different experts
# So it's data parallel size is 1
# Otherwise, there is only one expert in each GPU
# The data parallel size should be calculated
dp_size = 1 if gt_flag else self.max_ep_size // num_experts
ep_size = self.max_ep_size // dp_size
# Calculate the number of experts for each GPU
num_local_experts = 1 if lt_flag else num_experts // self.max_ep_size
# Don't forget to multiply minimum data parallel size
dp_size *= self.min_dp_size
if not (ep_size in self.parallel_info_dict):
self.parallel_info_dict[ep_size] = MoeParallelInfo(ep_size, dp_size)
return num_local_experts, self.parallel_info_dict[ep_size]
def set_kernel_not_use(self):
self.use_kernel_optim = False
def reset_loss(self):
self.aux_loss = 0
def add_loss(self, loss):
self.aux_loss += loss
def get_loss(self):
return self.aux_loss
| [
"torch.distributed.get_world_size",
"torch.distributed.get_rank",
"torch.cuda.is_available",
"torch.distributed.new_group"
] | 1.8 | JunjieChen-2020/ColossalAI | 0e121a256ac4f628f5d26a16dc553cd0024ca2d5 |
1.6 | """Script to train the Hamiltonian Generative Network
"""
import ast
import argparse
import copy
import pprint
import os
import warnings
import yaml
import numpy as np
import torch
import tqdm
from utilities.integrator import Integrator
from utilities.training_logger import TrainingLogger
from utilities import loader
from utilities.loader import load_hgn, get_online_dataloaders, get_offline_dataloaders
from utilities.losses import reconstruction_loss, kld_loss, geco_constraint
from utilities.statistics import mean_confidence_interval
def _avoid_overwriting(experiment_id):
# This function throws an error if the given experiment data already exists in runs/
logdir = os.path.join('runs', experiment_id)
if os.path.exists(logdir):
assert len(os.listdir(logdir)) == 0,\
f'Experiment id {experiment_id} already exists in runs/. Remove it, change the name ' \
f'in the yaml file.'
class HgnTrainer:
def __init__(self, params, resume=False):
"""Instantiate and train the Hamiltonian Generative Network.
Args:
params (dict): Experiment parameters (see experiment_params folder).
"""
self.params = params
self.resume = resume
if not resume: # Fail if experiment_id already exist in runs/
_avoid_overwriting(params["experiment_id"])
# Set device
self.device = params["device"]
if "cuda" in self.device and not torch.cuda.is_available():
warnings.warn(
"Warning! Set to train in GPU but cuda is not available. Device is set to CPU.")
self.device = "cpu"
# Get dtype, will raise a 'module 'torch' has no attribute' if there is a typo
self.dtype = torch.__getattribute__(params["networks"]["dtype"])
# Load hgn from parameters to deice
self.hgn = load_hgn(params=self.params,
device=self.device,
dtype=self.dtype)
if 'load_path' in self.params:
self.load_and_reset(self.params, self.device, self.dtype)
# Either generate data on-the-fly or load the data from disk
if "train_data" in self.params["dataset"]:
print("Training with OFFLINE data...")
self.train_data_loader, self.test_data_loader = get_offline_dataloaders(self.params)
else:
print("Training with ONLINE data...")
self.train_data_loader, self.test_data_loader = get_online_dataloaders(self.params)
# Initialize training logger
self.training_logger = TrainingLogger(
hyper_params=self.params,
loss_freq=100,
rollout_freq=1000,
model_freq=10000
)
# Initialize tensorboard writer
self.model_save_file = os.path.join(
self.params["model_save_dir"],
self.params["experiment_id"]
)
# Define optimization modules
optim_params = [
{
'params': self.hgn.encoder.parameters(),
'lr': params["optimization"]["encoder_lr"]
},
{
'params': self.hgn.transformer.parameters(),
'lr': params["optimization"]["transformer_lr"]
},
{
'params': self.hgn.hnn.parameters(),
'lr': params["optimization"]["hnn_lr"]
},
{
'params': self.hgn.decoder.parameters(),
'lr': params["optimization"]["decoder_lr"]
},
]
self.optimizer = torch.optim.Adam(optim_params)
def load_and_reset(self, params, device, dtype):
"""Load the HGN from the path specified in params['load_path'] and reset the networks in
params['reset'].
Args:
params (dict): Dictionary with all the necessary parameters to load the networks.
device (str): 'gpu:N' or 'cpu'
dtype (torch.dtype): Data type to be used in computations.
"""
self.hgn.load(params['load_path'])
if 'reset' in params:
if isinstance(params['reset'], list):
for net in params['reset']:
assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']
else:
assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']
if 'encoder' in params['reset']:
self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)
if 'decoder' in params['reset']:
self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)
if 'transformer' in params['reset']:
self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)
if 'hamiltonian' in params['reset']:
self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)
def training_step(self, rollouts):
"""Perform a training step with the given rollouts batch.
Args:
rollouts (torch.Tensor): Tensor of shape (batch_size, seq_len, channels, height, width)
corresponding to a batch of sampled rollouts.
Returns:
A dictionary of losses and the model's prediction of the rollout. The reconstruction loss and
KL divergence are floats and prediction is the HGNResult object with data of the forward pass.
"""
self.optimizer.zero_grad()
rollout_len = rollouts.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollouts[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollouts[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
if self.params["networks"]["variational"]:
tol = self.params["geco"]["tol"]
alpha = self.params["geco"]["alpha"]
lagrange_mult_param = self.params["geco"]["lagrange_multiplier_param"]
C, rec_loss = geco_constraint(target, prediction, tol) # C has gradient
# Compute moving average of constraint C (without gradient)
if self.C_ma is None:
self.C_ma = C.detach()
else:
self.C_ma = alpha * self.C_ma + (1 - alpha) * C.detach()
C_curr = C.detach().item() # keep track for logging
C = C + (self.C_ma - C.detach()) # Move C without affecting its gradient
# Compute KL divergence
mu = hgn_output.z_mean
logvar = hgn_output.z_logvar
kld = kld_loss(mu=mu, logvar=logvar)
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
# Compute losses
train_loss = kld + self.langrange_multiplier * C
# clamping the langrange multiplier to avoid inf values
self.langrange_multiplier = self.langrange_multiplier * torch.exp(
lagrange_mult_param * C.detach())
self.langrange_multiplier = torch.clamp(self.langrange_multiplier, 1e-10, 1e10)
losses = {
'loss/train': train_loss.item(),
'loss/kld': kld.item(),
'loss/C': C_curr,
'loss/C_ma': self.C_ma.item(),
'loss/rec': rec_loss.item(),
'other/langrange_mult': self.langrange_multiplier.item()
}
else: # not variational
# Compute frame reconstruction error
train_loss = reconstruction_loss(
target=target,
prediction=prediction)
losses = {'loss/train': train_loss.item()}
train_loss.backward()
self.optimizer.step()
return losses, hgn_output
def fit(self):
"""The trainer fits an HGN.
Returns:
(HGN) An HGN model that has been fitted to the data
"""
# Initial values for geco algorithm
if self.params["networks"]["variational"]:
self.langrange_multiplier = self.params["geco"]["initial_lagrange_multiplier"]
self.C_ma = None
# TRAIN
for ep in range(self.params["optimization"]["epochs"]):
print("Epoch %s / %s" % (str(ep + 1), str(self.params["optimization"]["epochs"])))
pbar = tqdm.tqdm(self.train_data_loader)
for batch_idx, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
# Do an optimization step
losses, prediction = self.training_step(rollouts=rollout_batch)
# Log progress
self.training_logger.step(losses=losses,
rollout_batch=rollout_batch,
prediction=prediction,
model=self.hgn)
# Progress-bar msg
msg = ", ".join([
f"{k}: {v:.2e}" for k, v in losses.items() if v is not None
])
pbar.set_description(msg)
# Save model
self.hgn.save(self.model_save_file)
self.test()
return self.hgn
def compute_reconst_kld_errors(self, dataloader):
"""Computes reconstruction error and KL divergence.
Args:
dataloader (torch.utils.data.DataLoader): DataLoader to retrieve errors from.
Returns:
(reconst_error_mean, reconst_error_h), (kld_mean, kld_h): Tuples where the mean and 95%
conficence interval is shown.
"""
first = True
pbar = tqdm.tqdm(dataloader)
for _, rollout_batch in enumerate(pbar):
# Move to device and change dtype
rollout_batch = rollout_batch.to(self.device).type(self.dtype)
rollout_len = rollout_batch.shape[1]
input_frames = self.params['optimization']['input_frames']
assert(input_frames <= rollout_len) # optimization.use_steps must be smaller (or equal) to rollout.sequence_length
roll = rollout_batch[:, :input_frames]
hgn_output = self.hgn.forward(rollout_batch=roll, n_steps=rollout_len - input_frames)
target = rollout_batch[:, input_frames-1:] # Fit first input_frames and try to predict the last + the next (rollout_len - input_frames)
prediction = hgn_output.reconstructed_rollout
error = reconstruction_loss(
target=target,
prediction=prediction, mean_reduction=False).detach().cpu(
).numpy()
if self.params["networks"]["variational"]:
kld = kld_loss(mu=hgn_output.z_mean, logvar=hgn_output.z_logvar, mean_reduction=False).detach().cpu(
).numpy()
# normalize by number of frames, channels and pixels per frame
kld_normalizer = prediction.flatten(1).size(1)
kld = kld / kld_normalizer
if first:
first = False
set_errors = error
if self.params["networks"]["variational"]:
set_klds = kld
else:
set_errors = np.concatenate((set_errors, error))
if self.params["networks"]["variational"]:
set_klds = np.concatenate((set_klds, kld))
err_mean, err_h = mean_confidence_interval(set_errors)
if self.params["networks"]["variational"]:
kld_mean, kld_h = mean_confidence_interval(set_klds)
return (err_mean, err_h), (kld_mean, kld_h)
else:
return (err_mean, err_h), None
def test(self):
"""Test after the training is finished and logs result to tensorboard.
"""
print("Calculating final training error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.train_data_loader)
self.training_logger.log_error("Train reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Train KL divergence", kld_mean, kld_h)
print("Calculating final test error...")
(err_mean, err_h), kld = self.compute_reconst_kld_errors(self.test_data_loader)
self.training_logger.log_error("Test reconstruction error", err_mean, err_h)
if kld is not None:
kld_mean, kld_h = kld
self.training_logger.log_error("Test KL divergence", kld_mean, kld_h)
def _overwrite_config_with_cmd_arguments(config, args):
if args.name is not None:
config['experiment_id'] = args.name[0]
if args.epochs is not None:
config['optimization']['epochs'] = args.epochs[0]
if args.dataset_path is not None:
# Read the parameters.yaml file in the given dataset path
dataset_config = _read_config(os.path.join(_args.dataset_path[0], 'parameters.yaml'))
for key, value in dataset_config.items():
config[key] = value
if args.env is not None:
if 'train_data' in config['dataset']:
raise ValueError(
f'--env was given but configuration is set for offline training: '
f'train_data={config["dataset"]["train_data"]}'
)
env_params = _read_config(DEFAULT_ENVIRONMENTS_PATH + args.env[0] + '.yaml')
config['environment'] = env_params['environment']
if args.params is not None:
for p in args.params:
key, value = p.split('=')
ptr = config
keys = key.split('.')
for i, k in enumerate(keys):
if i == len(keys) - 1:
ptr[k] = ast.literal_eval(value)
else:
ptr = ptr[k]
if args.load is not None:
config['load_path'] = args.load[0]
if args.reset is not None:
config['reset'] = args.reset
def _read_config(config_file):
with open(config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
return config
def _merge_configs(train_config, dataset_config):
config = copy.deepcopy(train_config)
for key, value in dataset_config.items():
config[key] = value
# If the config specifies a dataset path, we take the rollout from the configuration file
# in the given dataset
if 'dataset' in config and 'train_data' in config['dataset']:
dataset_config = _read_config( # Read parameters.yaml in root of given dataset
os.path.join(os.path.dirname(config['dataset']['train_data']), 'parameters.yaml'))
config['dataset']['rollout'] = dataset_config['dataset']['rollout']
return config
def _ask_confirmation(config):
printer = pprint.PrettyPrinter(indent=4)
print(f'The training will be run with the following configuration:')
printed_config = copy.deepcopy(_config)
printed_config.pop('networks')
printer.pprint(printed_config)
print('Proceed? (y/n):')
if input() != 'y':
print('Abort.')
exit()
if __name__ == "__main__":
DEFAULT_TRAIN_CONFIG_FILE = "experiment_params/train_config_default.yaml"
DEFAULT_DATASET_CONFIG_FILE = "experiment_params/dataset_online_default.yaml"
DEFAULT_ENVIRONMENTS_PATH = "experiment_params/default_environments/"
DEFAULT_SAVE_MODELS_DIR = "saved_models/"
parser = argparse.ArgumentParser()
parser.add_argument(
'--train-config', action='store', nargs=1, type=str, required=True,
help=f'Path to the training configuration yaml file.'
)
parser.add_argument(
'--dataset-config', action='store', nargs=1, type=str, required=False,
help=f'Path to the dataset configuration yaml file.'
)
parser.add_argument(
'--name', action='store', nargs=1, required=False,
help='If specified, this name will be used instead of experiment_id of the yaml file.'
)
parser.add_argument(
'--epochs', action='store', nargs=1, type=int, required=False,
help='The number of training epochs. If not specified, optimization.epochs of the '
'training configuration will be used.'
)
parser.add_argument(
'--env', action='store', nargs=1, type=str, required=False,
help='The environment to use (for online training only). Possible values are '
'\'pendulum\', \'spring\', \'two_bodies\', \'three_bodies\', corresponding to '
'environment configurations in experiment_params/default_environments/. If not '
'specified, the environment specified in the given --dataset-config will be used.'
)
parser.add_argument(
'--dataset-path', action='store', nargs=1, type=str, required=False,
help='Path to a stored dataset to use for training. For offline training only. In this '
'case no dataset configuration file will be loaded.'
)
parser.add_argument(
'--params', action='store', nargs='+', required=False,
help='Override one or more parameters in the config. The format of an argument is '
'param_name=param_value. Nested parameters are accessible by using a dot, '
'i.e. --param dataset.img_size=32. IMPORTANT: lists must be enclosed in double '
'quotes, i.e. --param environment.mass:"[0.5, 0.5]".'
)
parser.add_argument(
'-y', '-y', action='store_true', default=False, required=False,
help='Whether to skip asking for user confirmation before starting the training.'
)
parser.add_argument(
'--resume', action='store', required=False, nargs='?', default=None,
help='NOT IMPLEMENTED YET. Resume the training from a saved model. If a path is provided, '
'the training will be resumed from the given checkpoint. Otherwise, the last '
'checkpoint will be taken from saved_models/<experiment_id>.'
)
parser.add_argument(
'--load', action='store', type=str, required=False, nargs=1,
help='Path from which to load the HGN.'
)
parser.add_argument(
'--reset', action='store', nargs='+', required=False,
help='Use only in combimation with --load, tells the trainer to reinstantiate the given '
'networks. Values: \'encoder\', \'transformer\', \'decoder\', \'hamiltonian\'.'
)
_args = parser.parse_args()
# Read configurations
_train_config = _read_config(_args.train_config[0])
if _args.dataset_path is None: # Will use the dataset config file (or default if not given)
_dataset_config_file = DEFAULT_DATASET_CONFIG_FILE if _args.dataset_config is None else \
_args.dataset_config[0]
_dataset_config = _read_config(_dataset_config_file)
_config = _merge_configs(_train_config, _dataset_config)
else: # Will use the dataset given in the command line arguments
assert _args.dataset_config is None, 'Both --dataset-path and --dataset-config were given.'
_config = _train_config
# Overwrite configuration with command line arguments
_overwrite_config_with_cmd_arguments(_config, _args)
# Show configuration and ask user for confirmation
if not _args.y:
_ask_confirmation(_config)
# Train HGN network
trainer = HgnTrainer(_config)
hgn = trainer.fit()
| [
"torch.__getattribute__",
"torch.optim.Adam",
"torch.clamp",
"torch.cuda.is_available"
] | 1.6.0 | feng-y16/Hamiltonian-Generative-Networks | 702d3ff3aec40eba20e17c5a1612b5b0b1e2f831 |
1.8 | # coding=utf-8
import math
import torch
import numpy as np
from torch.nn import init
from itertools import repeat
from torch.nn import functional as F
import collections.abc as container_abcs
from typing import Optional
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
class DOConv2d(Module):
"""
DOConv2d can be used as an alternative for torch.nn.Conv2d.
The interface is similar to that of Conv2d, with one exception:
1. D_mul: the depth multiplier for the over-parameterization.
Note that the groups parameter switchs between DO-Conv (groups=1),
DO-DConv (groups=in_channels), DO-GConv (otherwise).
"""
__constants__ = ['stride', 'padding', 'dilation', 'groups',
'padding_mode', 'output_padding', 'in_channels',
'out_channels', 'kernel_size', 'D_mul']
__annotations__ = {'bias': Optional[torch.Tensor]}
def __init__(self, in_channels, out_channels, kernel_size, D_mul=None, stride=1,
padding=0, dilation=1, groups=1, bias=True, padding_mode='zeros'):
super(DOConv2d, self).__init__()
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
valid_padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in valid_padding_modes:
raise ValueError("padding_mode must be one of {}, but got padding_mode='{}'".format(
valid_padding_modes, padding_mode))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.padding_mode = padding_mode
self._padding_repeated_twice = tuple(x for x in self.padding for _ in range(2))
#################################### Initailization of D & W ###################################
M = self.kernel_size[0]
N = self.kernel_size[1]
self.D_mul = M * N if D_mul is None or M * N <= 1 else D_mul
self.W = Parameter(torch.Tensor(out_channels, in_channels // groups, self.D_mul))
init.kaiming_uniform_(self.W, a=math.sqrt(5))
if M * N > 1:
self.D = Parameter(torch.Tensor(in_channels, M * N, self.D_mul))
init_zero = np.zeros([in_channels, M * N, self.D_mul], dtype=np.float32)
self.D.data = torch.from_numpy(init_zero)
eye = torch.reshape(torch.eye(M * N, dtype=torch.float32), (1, M * N, M * N))
D_diag = eye.repeat((in_channels, 1, self.D_mul // (M * N)))
if self.D_mul % (M * N) != 0: # the cases when D_mul > M * N
zeros = torch.zeros([in_channels, M * N, self.D_mul % (M * N)])
self.D_diag = Parameter(torch.cat([D_diag, zeros], dim=2), requires_grad=False)
else: # the case when D_mul = M * N
self.D_diag = Parameter(D_diag, requires_grad=False)
##################################################################################################
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
fan_in, _ = init._calculate_fan_in_and_fan_out(self.W)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
else:
self.register_parameter('bias', None)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.padding_mode != 'zeros':
s += ', padding_mode={padding_mode}'
return s.format(**self.__dict__)
def __setstate__(self, state):
super(DOConv2d, self).__setstate__(state)
if not hasattr(self, 'padding_mode'):
self.padding_mode = 'zeros'
def _conv_forward(self, input, weight):
if self.padding_mode != 'zeros':
return F.conv2d(F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups)
return F.conv2d(input, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def forward(self, input):
M = self.kernel_size[0]
N = self.kernel_size[1]
DoW_shape = (self.out_channels, self.in_channels // self.groups, M, N)
if M * N > 1:
######################### Compute DoW #################
# (input_channels, D_mul, M * N)
D = self.D + self.D_diag
W = torch.reshape(self.W, (self.out_channels // self.groups, self.in_channels, self.D_mul))
# einsum outputs (out_channels // groups, in_channels, M * N),
# which is reshaped to
# (out_channels, in_channels // groups, M, N)
DoW = torch.reshape(torch.einsum('ims,ois->oim', D, W), DoW_shape)
#######################################################
else:
# in this case D_mul == M * N
# reshape from
# (out_channels, in_channels // groups, D_mul)
# to
# (out_channels, in_channels // groups, M, N)
DoW = torch.reshape(self.W, DoW_shape)
return self._conv_forward(input, DoW)
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_pair = _ntuple(2) | [
"torch.zeros",
"torch.cat",
"torch.nn.parameter.Parameter",
"torch.einsum",
"torch.from_numpy",
"torch.nn.init._calculate_fan_in_and_fan_out",
"torch.eye",
"torch.nn.init.uniform_",
"torch.nn.functional.pad",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.reshape"
] | 1.8.0 | khawar512/OPVT | 690e540e7f54e43751d28a046009993e3e325291 |
1.4 | import torch
import torch.nn as nn
import torch.nn.functional as F
class MolDQN(nn.Module):
def __init__(self, input_length, output_length):
super(MolDQN, self).__init__()
self.linear_1 = nn.Linear(input_length, 1024)
self.linear_2 = nn.Linear(1024, 512)
self.linear_3 = nn.Linear(512, 128)
self.linear_4 = nn.Linear(128, 32)
self.linear_5 = nn.Linear(32, output_length)
self.activation = nn.ReLU()
def forward(self, x):
x = self.activation(self.linear_1(x))
x = self.activation(self.linear_2(x))
x = self.activation(self.linear_3(x))
x = self.activation(self.linear_4(x))
x = self.linear_5(x)
return x
| [
"torch.nn.Linear",
"torch.nn.ReLU"
] | 1.4.0 | iamchosenlee/MolDQN-pytorch | bda8a74eb9e5d2f3232a6a27b6a32928a3797f6d |
0.6 | import numpy as np
import torch
from sklearn.metrics import roc_auc_score
from tqdm import tqdm
import config as c
from model import get_cs_flow_model, save_model, FeatureExtractor, nf_forward
from utils import *
def train(train_loader, test_loader):
model = get_cs_flow_model()
optimizer = torch.optim.Adam(model.parameters(), lr=c.lr_init, eps=1e-04, weight_decay=1e-5)
model.to(c.device)
if not c.pre_extracted:
fe = FeatureExtractor()
fe.eval()
fe.to(c.device)
for param in fe.parameters():
param.requires_grad = False
z_obs = Score_Observer('AUROC')
for epoch in range(c.meta_epochs):
# train some epochs
model.train()
if c.verbose:
print(F'\nTrain epoch {epoch}')
for sub_epoch in range(c.sub_epochs):
train_loss = list()
for i, data in enumerate(tqdm(train_loader, disable=c.hide_tqdm_bar)):
optimizer.zero_grad()
inputs, labels = preprocess_batch(data) # move to device and reshape
if not c.pre_extracted:
inputs = fe(inputs)
z, jac = nf_forward(model, inputs)
loss = get_loss(z, jac)
train_loss.append(t2np(loss))
loss.backward()
norm = torch.nn.utils.clip_grad_norm_(model.parameters(), c.max_grad_norm)
optimizer.step()
mean_train_loss = np.mean(train_loss)
if c.verbose and epoch == 0 and sub_epoch % 4 == 0:
print('Epoch: {:d}.{:d} \t train loss: {:.4f}'.format(epoch, sub_epoch, mean_train_loss))
# evaluate
model.eval()
if c.verbose:
print('\nCompute loss and scores on test set:')
test_loss = list()
test_z = list()
test_labels = list()
with torch.no_grad():
for i, data in enumerate(tqdm(test_loader, disable=c.hide_tqdm_bar)):
inputs, labels = preprocess_batch(data)
if not c.pre_extracted:
inputs = fe(inputs)
z, jac = nf_forward(model, inputs)
loss = get_loss(z, jac)
z_concat = t2np(concat_maps(z))
score = np.mean(z_concat ** 2, axis=(1, 2))
test_z.append(score)
test_loss.append(t2np(loss))
test_labels.append(t2np(labels))
test_loss = np.mean(np.array(test_loss))
if c.verbose:
print('Epoch: {:d} \t test_loss: {:.4f}'.format(epoch, test_loss))
test_labels = np.concatenate(test_labels)
is_anomaly = np.array([0 if l == 0 else 1 for l in test_labels])
anomaly_score = np.concatenate(test_z, axis=0)
z_obs.update(roc_auc_score(is_anomaly, anomaly_score), epoch,
print_score=c.verbose or epoch == c.meta_epochs - 1)
if c.save_model:
model.to('cpu')
save_model(model, c.modelname)
return z_obs.max_score, z_obs.last, z_obs.min_loss_score
| [
"torch.no_grad"
] | 0.6.3 | MuhammadSYahyaS/cs-flow | bef320ae7b2063f1dce41fb2f2225228cd43a589 |
0.4 | import os
import json
import torch
import numpy as np
from torch.utils import data
from PIL import Image
from ptsemseg.utils import recursive_glob
from ptsemseg.augmentations import Compose, RandomHorizontallyFlip, RandomRotate
class mapillaryVistasLoader(data.Dataset):
def __init__(
self,
root,
split="training",
img_size=(1025, 2049),
is_transform=True,
augmentations=None,
test_mode=False,
):
self.root = root
self.split = split
self.is_transform = is_transform
self.augmentations = augmentations
self.n_classes = 9
self.img_size = img_size if isinstance(img_size, tuple) else (img_size, img_size)
self.mean = np.array([80.5423, 91.3162, 81.4312])
self.files = {}
if not test_mode:
self.images_base = os.path.join(self.root, self.split, "images")
self.annotations_base = os.path.join(self.root, self.split, "labels")
self.files[split] = recursive_glob(rootdir=self.images_base, suffix=".jpg")
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
self.class_names, self.class_ids, self.class_colors, self.class_major_ids = self.parse_config()
self.ignore_id = 250
def parse_config(self):
with open(os.path.join(self.root, "config.json")) as config_file:
config = json.load(config_file)
labels = config["labels"]
class_names = []
class_ids = []
class_colors = []
class_major_ids = []
for label_id, label in enumerate(labels):
class_names.append(label["readable"])
class_ids.append(label_id)
class_colors.append(label["color"])
class_major_ids.append(label['majorclass'])
print("There are {} labels in the config file".format(len(set(class_major_ids))))
return class_names, class_ids, class_colors, class_major_ids
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(
self.annotations_base, os.path.basename(img_path).replace(".jpg", ".png")
)
img = Image.open(img_path)
lbl = Image.open(lbl_path)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.is_transform:
img, lbl = self.transform(img, lbl)
return img, lbl
def transform(self, img, lbl):
if self.img_size == ("same", "same"):
pass
else:
img = img.resize(
(self.img_size[1], self.img_size[0]), resample=Image.LANCZOS
) # uint8 with RGB mode
lbl = lbl.resize((self.img_size[1], self.img_size[0]))
img = np.array(img).astype(np.float64) / 255.0
img = torch.from_numpy(img.transpose(2, 0, 1)).float() # From HWC to CHW
#
# lbl = torch.from_numpy(np.array(lbl)).long()
# lbl[lbl == 65] = self.ignore_id
#
lbl = torch.from_numpy(np.array(lbl)).long()
lbl[lbl == self.ignore_id] = 65
lbl = self.encode_segmap(lbl)
lbl[lbl == 0] = self.ignore_id
return img, lbl
def decode_segmap(self, temp):
class_major_colors = [[0, 0, 0],
[70, 70, 70],
[180, 165, 180],
[128, 64, 64],
[220, 20, 60],
[255, 255, 255],
[70, 130, 180],
[250, 170, 30],
[0, 0, 142]]
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, len(class_major_colors)):
r[temp == l] = class_major_colors[l][0]
g[temp == l] = class_major_colors[l][1]
b[temp == l] = class_major_colors[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
# rgb[:, :, 0] = r / 255.0
# rgb[:, :, 1] = g / 255.0
# rgb[:, :, 2] = b / 255.0
rgb[:, :, 0] = r
rgb[:, :, 1] = g
rgb[:, :, 2] = b
return rgb
def encode_segmap(self, mask):
# Put all void classes to zero
for id in self.class_ids:
mask[mask == id] = self.class_major_ids[id]+100
mask = mask - 100
return mask
if __name__ == "__main__":
augment = Compose([RandomHorizontallyFlip(0.5), RandomRotate(6)])
local_path = "/home/lin/Documents/dataset/mapillary"
dst = mapillaryVistasLoader(
local_path, split='validation', img_size=(512, 1024), is_transform=True, augmentations=None
)
bs = 1
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=4, shuffle=True)
for i, data_samples in enumerate(trainloader):
x = dst.decode_segmap(data_samples[1][0].numpy())
x = Image.fromarray(np.uint8(x))
x.show()
| [
"torch.utils.data.DataLoader"
] | 0.4.1 | EEEGUI/Mapillary-vistas-semseg | d07a107fd08a7536f09f25e426a6f15033cbb609 |
1.0 | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import shutil
import tempfile
import unittest
from typing import List
import numpy as np
import pandas as pd
from transformers import AddedToken
from transformers.models.tapas.tokenization_tapas import (
VOCAB_FILES_NAMES,
BasicTokenizer,
TapasTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_pandas,
require_scatter,
require_tensorflow_probability,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english, merge_model_tokenizer_mappings
@require_tokenizers
@require_pandas
class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = TapasTokenizer
test_rust_tokenizer = False
space_between_special_tokens = True
from_pretrained_filter = filter_non_english
test_seq2seq = False
def get_table(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if length == 0:
data = {}
else:
data = {toks[0]: [toks[tok] for tok in range(1, length)]}
table = pd.DataFrame.from_dict(data)
return table
def get_table_and_query(
self,
tokenizer: TapasTokenizer,
length=5,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
table = self.get_table(tokenizer, length=length - 3)
query = " ".join(toks[:3])
return table, query
def get_clean_sequence(
self,
tokenizer: TapasTokenizer,
with_prefix_space=False,
max_length=20,
min_length=5,
empty_table: bool = False,
add_special_tokens: bool = True,
return_table_and_query: bool = False,
):
toks = [tokenizer.decode([i], clean_up_tokenization_spaces=False) for i in range(len(tokenizer))]
if empty_table:
table = pd.DataFrame.from_dict({})
query = " ".join(toks[:min_length])
else:
data = {toks[0]: [toks[tok] for tok in range(1, min_length - 3)]}
table = pd.DataFrame.from_dict(data)
query = " ".join(toks[:3])
output_ids = tokenizer.encode(table, query, add_special_tokens=add_special_tokens)
output_txt = tokenizer.decode(output_ids)
assert len(output_ids) >= min_length, "Update the code to generate the sequences so that they are larger"
assert len(output_ids) <= max_length, "Update the code to generate the sequences so that they are smaller"
if return_table_and_query:
return output_txt, output_ids, table, query
return output_txt, output_ids
def setUp(self):
super().setUp()
vocab_tokens = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def get_input_output_texts(self, tokenizer):
input_text = "UNwant\u00E9d,running"
output_text = "unwanted, running"
return input_text, output_text
@require_tensorflow_probability
def test_tf_encode_plus_sent_to_model(self):
super().test_tf_encode_plus_sent_to_model()
def test_rust_and_python_full_tokenizers(self):
if not self.test_rust_tokenizer:
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
# With lower casing
tokenizer = self.get_tokenizer(do_lower_case=True)
rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True)
sequence = "UNwant\u00E9d,running"
tokens = tokenizer.tokenize(sequence)
rust_tokens = rust_tokenizer.tokenize(sequence)
self.assertListEqual(tokens, rust_tokens)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
rust_tokenizer = self.get_rust_tokenizer()
ids = tokenizer.encode(sequence)
rust_ids = rust_tokenizer.encode(sequence)
self.assertListEqual(ids, rust_ids)
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def test_basic_tokenizer_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_lower_strip_accents_default(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"]
)
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_false(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_no_lower_strip_accents_true(self):
tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"]
)
def test_basic_tokenizer_respects_never_split_tokens(self):
tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"]
)
def test_wordpiece_tokenizer(self):
vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
vocab = {}
for i, token in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def test_is_control(self):
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def test_clean_text(self):
tokenizer = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual(
[tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], ["[EMPTY]"], ["[UNK]"]]
)
@slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained("google/tapas-base-finetuned-wtq")
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
text = tokenizer.encode(table, add_special_tokens=False)
text_2 = tokenizer.encode(empty_table, "multi-sequence build", add_special_tokens=False)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_pair == [101] + text + [102] + text_2
def test_offsets_with_special_characters(self):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
tokens = tokenizer_r.encode_plus(
sentence,
return_attention_mask=False,
return_token_type_ids=False,
return_offsets_mapping=True,
add_special_tokens=True,
)
do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False
expected_results = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"])
)
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def test_add_special_tokens(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_table = self.get_table(tokenizer, length=0)
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode(input_table, special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_add_tokens_tokenizer(self):
tokenizers: List[TapasTokenizer] = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
new_toks = ["aaaaa bbbbbb", "cccccccccdddddddd"]
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, all_size + len(new_toks))
tokens = tokenizer.encode(table, "aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, all_size_2 + len(new_toks_2))
tokens = tokenizer.encode(
table,
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l",
add_special_tokens=False,
)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], tokenizer.vocab_size - 1)
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[-2], tokenizer.vocab_size - 1)
self.assertGreater(tokens[-2], tokens[-3])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[-2], tokenizer.pad_token_id)
@require_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)]
tokenizer.add_tokens(new_toks)
input = "[ABC][DEF][ABC][DEF]"
if self.space_between_special_tokens:
output = "[ABC] [DEF] [ABC] [DEF]"
else:
output = input
encoded = tokenizer.encode(table, input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(table, sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence["input_ids"]
special_tokens_mask = encoded_sequence["special_tokens_mask"]
sequence_length = len(input_ids)
# Test 'longest' and 'no_padding' don't do anything
tokenizer.padding_side = "right"
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
not_padded_sequence = tokenizer.encode_plus(
table,
sequence,
padding=False,
return_special_tokens_mask=True,
)
not_padded_input_ids = not_padded_sequence["input_ids"]
not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"]
not_padded_sequence_length = len(not_padded_input_ids)
assert sequence_length == not_padded_sequence_length
assert input_ids == not_padded_input_ids
assert special_tokens_mask == not_padded_special_tokens_mask
# Test right padding
tokenizer.padding_side = "right"
right_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
right_padded_input_ids = right_padded_sequence["input_ids"]
right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"]
right_padded_sequence_length = len(right_padded_input_ids)
assert sequence_length + padding_size == right_padded_sequence_length
assert input_ids + [padding_idx] * padding_size == right_padded_input_ids
assert special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask
# Test left padding
tokenizer.padding_side = "left"
left_padded_sequence = tokenizer.encode_plus(
table,
sequence,
max_length=sequence_length + padding_size,
padding="max_length",
return_special_tokens_mask=True,
)
left_padded_input_ids = left_padded_sequence["input_ids"]
left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"]
left_padded_sequence_length = len(left_padded_input_ids)
assert sequence_length + padding_size == left_padded_sequence_length
assert [padding_idx] * padding_size + input_ids == left_padded_input_ids
assert [1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask
if "token_type_ids" in tokenizer.model_input_names:
token_type_ids = encoded_sequence["token_type_ids"]
left_padded_token_type_ids = left_padded_sequence["token_type_ids"]
right_padded_token_type_ids = right_padded_sequence["token_type_ids"]
assert (
token_type_ids + [[token_type_padding_idx] * 7] * padding_size == right_padded_token_type_ids
)
assert [[token_type_padding_idx] * 7] * padding_size + token_type_ids == left_padded_token_type_ids
if "attention_mask" in tokenizer.model_input_names:
attention_mask = encoded_sequence["attention_mask"]
right_padded_attention_mask = right_padded_sequence["attention_mask"]
left_padded_attention_mask = left_padded_sequence["attention_mask"]
assert attention_mask + [0] * padding_size == right_padded_attention_mask
assert [0] * padding_size + attention_mask == left_padded_attention_mask
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(table, input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
if (
tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer"
and "token_type_ids" in tokenizer.model_input_names
):
information = tokenizer.encode_plus(table, query, add_special_tokens=True)
sequences, mask = information["input_ids"], information["token_type_ids"]
self.assertEqual(len(sequences), len(mask))
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip("TAPAS tokenizer only handles two sequences.")
def test_maximum_encoding_length_single_input(self):
pass
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table, query = self.get_table_and_query(tokenizer)
sequences = tokenizer.encode(table, query, add_special_tokens=False)
attached_sequences = tokenizer.encode(table, query, add_special_tokens=True)
# Method is implemented (e.g. not GPT-2)
if len(attached_sequences) != 2:
self.assertEqual(
tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences)
)
def test_padding_to_max_length(self):
"""We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated"""
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
# FIXME: the next line should be padding(max_length) to avoid warning
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding=True
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# Check that nothing is done when a maximum length is not specified
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
# Test not batched
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[0])
encoded_sequences_2 = tokenizer(table, sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test not batched pairs
table = self.get_table(tokenizer, length=10)
encoded_sequences_1 = tokenizer.encode_plus(table, sequences[1])
encoded_sequences_2 = tokenizer(table, sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
# Test batched
table = self.get_table(tokenizer, length=0)
encoded_sequences_1 = tokenizer.batch_encode_plus(table, sequences)
encoded_sequences_2 = tokenizer(table, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
# Tests that all encoded values have the correct size
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
encoded_sequences = [tokenizer.encode_plus(table, sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(table, sequences, padding=False)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
maximum_length = len(
max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len)
)
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [
tokenizer.encode_plus(table, sequence, max_length=maximum_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(table, sequences, padding=True)
self.assertListEqual(
encoded_sequences_padded,
self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded),
)
# check 'longest' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding="longest"
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
# check 'no_padding' is unsensitive to a max length
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(table, sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(
table, sequences, max_length=maximum_length + 10, padding=False
)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(
encoded_sequences_batch_padded_1[key],
encoded_sequences_batch_padded_2[key],
)
@unittest.skip("batch_encode_plus does not handle overflowing tokens.")
def test_batch_encode_plus_overflowing_tokens(self):
pass
def test_batch_encode_plus_padding(self):
# Test that padded sequences are equivalent between batch_encode_plus and encode_plus
# Right padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
# Left padding tests
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
tokenizer.padding_side = "left"
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
max_length = 100
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [
tokenizer.encode_plus(table, sequence, max_length=max_length, padding="max_length")
for sequence in sequences
]
encoded_sequences_batch = tokenizer.batch_encode_plus(
table, sequences, max_length=max_length, padding="max_length"
)
self.assertListEqual(
encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch)
)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
if tokenizer.pad_token is None:
self.skipTest("No padding token.")
else:
empty_tokens = tokenizer(table, padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer(table, "This is a sample input", padding=True, pad_to_multiple_of=8)
for key, value in empty_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
normal_tokens = tokenizer(table, "This", pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
# Should also work with truncation
normal_tokens = tokenizer(table, "This", padding=True, truncation=True, pad_to_multiple_of=8)
for key, value in normal_tokens.items():
self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8")
@unittest.skip("TAPAS cannot handle `prepare_for_model` without passing by `encode_plus` or `batch_encode_plus`")
def test_prepare_for_model(self):
pass
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for parameter_name, parameter in signature.parameters.items():
if parameter.default != inspect.Parameter.empty:
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequence_0 = "Encode this."
empty_table = self.get_table(tokenizer, length=0)
table = self.get_table(tokenizer, length=10)
encoded_sequence = tokenizer.encode(empty_table, sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(table, "", add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table,
sequence_0,
add_special_tokens=True,
return_special_tokens_mask=True,
# add_prefix_space=False,
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special)
]
filtered_sequence = [x for x in filtered_sequence if x is not None]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence_0 = "Encode this."
# Testing single inputs
encoded_sequence = tokenizer.encode(table, sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(
table, sequence_0, add_special_tokens=True, return_special_tokens_mask=True
)
encoded_sequence_w_special = encoded_sequence_dict["input_ids"]
special_tokens_mask = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_save_and_load_tokenizer(self):
# safety check on max_len default value so we are sure the test works
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
self.assertNotEqual(tokenizer.model_max_length, 42)
# Now let's start the test
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
# Isolate this from the other tests because we save additional tokens/etc
table = self.get_table(tokenizer, length=0)
tmpdirname = tempfile.mkdtemp()
sample_text = " He is very happy, UNwant\u00E9d,running"
before_tokens = tokenizer.encode(table, sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(table, sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
@unittest.skip("Not implemented")
def test_right_and_left_truncation(self):
pass
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
table = self.get_table(tokenizer, length=0)
sequence = "Sequence"
padding_size = 10
# check correct behaviour if no pad_token_id exists and add it eventually
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
# RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "right"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert encoded_sequence + [padding_idx] * padding_size == padded_sequence
# LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True
tokenizer.padding_side = "left"
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(
table, sequence, max_length=sequence_length + padding_size, padding="max_length"
)
padded_sequence_length = len(padded_sequence)
assert sequence_length + padding_size == padded_sequence_length
assert [padding_idx] * padding_size + encoded_sequence == padded_sequence
# RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding'
encoded_sequence = tokenizer.encode(table, sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding="longest")
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
tokenizer.padding_side = "right"
padded_sequence_right = tokenizer.encode(table, sequence)
padded_sequence_right_length = len(padded_sequence_right)
assert sequence_length == padded_sequence_right_length
assert encoded_sequence == padded_sequence_right
tokenizer.padding_side = "left"
padded_sequence_left = tokenizer.encode(table, sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert sequence_length == padded_sequence_left_length
assert encoded_sequence == padded_sequence_left
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
empty_table = self.get_table(tokenizer, length=0)
seq_0 = "Test this method."
# We want to have sequence 0 and sequence 1 are tagged
# respectively with 0 and 1 token_ids
# (regardless of whether the model use token type ids)
# We use this assumption in the QA pipeline among other place
output = tokenizer(empty_table, seq_0, return_token_type_ids=True)
# Assert that the token type IDs have the same length as the input IDs
self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"]))
# Assert that each token type ID has 7 values
self.assertTrue(all(len(token_type_ids) == 7 for token_type_ids in output["token_type_ids"]))
# Do the same test as modeling common.
self.assertIn(0, output["token_type_ids"][0])
@require_torch
@slow
@require_scatter
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING:
return
config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if config.is_encoder_decoder or config.pad_token_id is None:
return
model = model_class(config)
# Make sure the model contains at least the full vocabulary size in its embedding matrix
is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight")
assert (
(model.get_input_embeddings().weight.shape[0] >= len(tokenizer))
if is_using_common_embeddings
else True
)
# Build sequence
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = " ".join(first_ten_tokens)
table = self.get_table(tokenizer, length=0)
encoded_sequence = tokenizer.encode_plus(table, sequence, return_tensors="pt")
batch_encoded_sequence = tokenizer.batch_encode_plus(table, [sequence, sequence], return_tensors="pt")
# This should not fail
with torch.no_grad(): # saves some time
model(**encoded_sequence)
model(**batch_encoded_sequence)
@unittest.skip("TAPAS doesn't handle pre-tokenized inputs.")
def test_pretokenized_inputs(self):
pass
@slow
def test_tapas_truncation_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", model_max_length=512)
for i in range(12):
# The table cannot even encode the headers, so raise an error
with self.assertRaises(ValueError):
tokenizer.encode(table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit")
for i in range(12, 512):
new_encoded_inputs = tokenizer.encode(
table=table, query=queries[0], max_length=i, truncation="drop_rows_to_fit"
)
# Ensure that the input IDs are less than the max length defined.
self.assertLessEqual(len(new_encoded_inputs), i)
tokenizer.model_max_length = 20
new_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation=True)
dropped_encoded_inputs = tokenizer.encode(table=table, query=queries[0], truncation="drop_rows_to_fit")
# Ensure that the input IDs are still truncated when no max_length is specified
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
self.assertLessEqual(len(new_encoded_inputs), 20)
@slow
def test_min_max_question_length(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = "When was Brad Pitt born?"
table = pd.DataFrame.from_dict(data)
# test max_question_length
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", max_question_length=2)
encoding = tokenizer(table=table, queries=queries)
# query should not be tokenized as it's longer than the specified max_question_length
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
# test min_question_length
tokenizer = TapasTokenizer.from_pretrained("lysandre/tapas-temporary-repo", min_question_length=30)
encoding = tokenizer(table=table, queries=queries)
# query should not be tokenized as it's shorter than the specified min_question_length
expected_results = [101, 102]
self.assertListEqual(encoding.input_ids[:2], expected_results)
@is_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
sequences = [
"Testing batch encode plus",
"Testing batch encode plus with different sequence lengths",
"Testing batch encode plus with different sequence lengths correctly pads",
]
table = self.get_table(tokenizer, length=0)
# A Tensor cannot be build by sequences which are not the same size
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="pt")
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="tf")
if tokenizer.pad_token_id is None:
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding=True,
return_tensors="pt",
)
self.assertRaises(
ValueError,
tokenizer.batch_encode_plus,
table,
sequences,
padding="longest",
return_tensors="tf",
)
else:
pytorch_tensor = tokenizer.batch_encode_plus(table, sequences, padding=True, return_tensors="pt")
tensorflow_tensor = tokenizer.batch_encode_plus(
table, sequences, padding="longest", return_tensors="tf"
)
encoded_sequences = tokenizer.batch_encode_plus(table, sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
@slow
def test_tapas_integration_test(self):
data = {
"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
"Age": ["56", "45", "59"],
"Number of movies": ["87", "53", "69"],
"Date of birth": ["18 december 1963", "11 november 1974", "6 may 1961"],
}
queries = [
"When was Brad Pitt born?",
"Which actor appeared in the least number of movies?",
"What is the average number of movies?",
]
table = pd.DataFrame.from_dict(data)
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
# fmt: off
expected_results = {'input_ids':[101,2043,2001,8226,15091,2141,1029,102,5889,2287,2193,1997,5691,3058,1997,4182,8226,15091,5179,6584,2324,2285,3699,14720,4487,6178,9488,3429,5187,2340,2281,3326,2577,18856,7828,3240,5354,6353,1020,2089,3777],'attention_mask':[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1],'token_type_ids':[[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[0,0,0,0,0,0,0],[1,1,0,0,0,0,0],[1,2,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,3,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,4,0,0,0,0,0],[1,1,1,0,0,0,0],[1,1,1,0,0,0,0],[1,2,1,0,2,2,0],[1,3,1,0,3,1,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,4,1,0,2,2,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,1,2,0,0,0,0],[1,2,2,0,1,3,0],[1,3,2,0,1,3,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,4,2,0,3,1,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,1,3,0,0,0,0],[1,2,3,0,3,1,0],[1,3,3,0,2,2,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0],[1,4,3,0,1,3,0]]} # noqa: E231
# fmt: on
new_encoded_inputs = tokenizer.encode_plus(table=table, query=queries[0])
self.assertDictEqual(dict(new_encoded_inputs), expected_results)
@slow
def test_full_tokenizer(self):
data = [
["Pos", "No", "Driver", "Team", "Laps", "Time/Retired", "Grid", "Points"],
["1", "32", "Patrick Carpentier", "Team Player's", "87", "1:48:11.023", "1", "22"],
["2", "1", "Bruno Junqueira", "Newman/Haas Racing", "87", "+0.8 secs", "2", "17"],
["3", "3", "Paul Tracy", "Team Player's", "87", "+28.6 secs", "3", "14"],
["4", "9", "Michel Jourdain, Jr.", "Team Rahal", "87", "+40.8 secs", "13", "12"],
["5", "34", "Mario Haberfeld", "Mi-Jack Conquest Racing", "87", "+42.1 secs", "6", "10"],
["6", "20", "Oriol Servia", "Patrick Racing", "87", "+1:00.2", "10", "8"],
["7", "51", "Adrian Fernandez", "Fernandez Racing", "87", "+1:01.4", "5", "6"],
["8", "12", "Jimmy Vasser", "American Spirit Team Johansson", "87", "+1:01.8", "8", "5"],
["9", "7", "Tiago Monteiro", "Fittipaldi-Dingman Racing", "86", "+ 1 Lap", "15", "4"],
["10", "55", "Mario Dominguez", "Herdez Competition", "86", "+ 1 Lap", "11", "3"],
["11", "27", "Bryan Herta", "PK Racing", "86", "+ 1 Lap", "12", "2"],
["12", "31", "Ryan Hunter-Reay", "American Spirit Team Johansson", "86", "+ 1 Lap", "17", "1"],
["13", "19", "Joel Camathias", "Dale Coyne Racing", "85", "+ 2 Laps", "18", "0"],
["14", "33", "Alex Tagliani", "Rocketsports Racing", "85", "+ 2 Laps", "14", "0"],
["15", "4", "Roberto Moreno", "Herdez Competition", "85", "+ 2 Laps", "9", "0"],
["16", "11", "Geoff Boss", "Dale Coyne Racing", "83", "Mechanical", "19", "0"],
["17", "2", "Sebastien Bourdais", "Newman/Haas Racing", "77", "Mechanical", "4", "0"],
["18", "15", "Darren Manning", "Walker Racing", "12", "Mechanical", "7", "0"],
["19", "5", "Rodolfo Lavin", "Walker Racing", "10", "Mechanical", "16", "0"],
]
query = "what were the drivers names?"
table = pd.DataFrame.from_records(data[1:], columns=data[0])
tokenizer = TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq", model_max_length=512)
model_inputs = tokenizer(table, query, padding="max_length")
input_ids = model_inputs["input_ids"]
token_type_ids = np.array(model_inputs["token_type_ids"])
segment_ids = token_type_ids[:, 0]
column_ids = token_type_ids[:, 1]
row_ids = token_type_ids[:, 2]
# fmt: off
expected_results = {'input_ids':[101,2054,2020,1996,6853,3415,1029,102,13433,2015,2053,4062,2136,10876,2051,1013,3394,8370,2685,1015,3590,4754,29267,4765,3771,2136,2447,1005,1055,6584,1015,1024,4466,1024,2340,1012,6185,2509,1015,2570,1016,1015,10391,12022,4226,7895,10625,1013,22996,3868,6584,1009,1014,1012,1022,10819,2015,1016,2459,1017,1017,2703,10555,2136,2447,1005,1055,6584,1009,2654,1012,1020,10819,2015,1017,2403,1018,1023,8709,8183,3126,21351,2078,1010,3781,1012,2136,10958,8865,6584,1009,2871,1012,1022,10819,2015,2410,2260,1019,4090,7986,5292,5677,8151,2771,1011,2990,9187,3868,6584,1009,4413,1012,1015,10819,2015,1020,2184,1020,2322,2030,20282,14262,9035,4754,3868,6584,1009,1015,1024,4002,1012,1016,2184,1022,1021,4868,7918,12023,12023,3868,6584,1009,1015,1024,5890,1012,1018,1019,1020,1022,2260,5261,12436,18116,2137,4382,2136,26447,6584,1009,1015,1024,5890,1012,1022,1022,1019,1023,1021,27339,3995,10125,9711,4906,25101,24657,1011,22033,2386,3868,6564,1009,1015,5001,2321,1018,2184,4583,7986,14383,2075,29488,14906,9351,2971,6564,1009,1015,5001,2340,1017,2340,2676,8527,2014,2696,1052,2243,3868,6564,1009,1015,5001,2260,1016,2260,2861,4575,4477,1011,2128,4710,2137,4382,2136,26447,6564,1009,1015,5001,2459,1015,2410,2539,8963,11503,25457,3022,8512,2522,9654,3868,5594,1009,1016,10876,2324,1014,2403,3943,4074,6415,15204,2072,12496,25378,3868,5594,1009,1016,10876,2403,1014,2321,1018,10704,17921,14906,9351,2971,5594,1009,1016,10876,1023,1014,2385,2340,14915,5795,8512,2522,9654,3868,6640,6228,2539,1014,2459,1016,28328,8945,3126,21351,2015,10625,1013,22996,3868,6255,6228,1018,1014,2324,2321,12270,11956,5232,3868,2260,6228,1021,1014,2539,1019,8473,28027,2080,2474,6371,5232,3868,2184,6228,2385,1014,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'column_ids':[0,0,0,0,0,0,0,0,1,1,2,3,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,3,3,3,3,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,4,4,4,4,5,6,6,6,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,4,5,6,6,6,7,8,1,2,3,3,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,5,6,6,6,7,8,1,2,3,3,4,4,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,4,4,5,6,7,8,1,2,3,3,4,4,5,6,7,8,1,2,3,3,3,3,3,4,4,5,6,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'row_ids':[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11,11,11,11,11,11,11,11,11,11,11,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,15,15,15,15,15,15,15,15,15,15,15,15,15,16,16,16,16,16,16,16,16,16,16,16,16,17,17,17,17,17,17,17,17,17,17,17,17,17,17,17,18,18,18,18,18,18,18,18,18,18,19,19,19,19,19,19,19,19,19,19,19,19,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],'segment_ids':[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]} # noqa: E231
# fmt: on
self.assertListEqual(input_ids, expected_results["input_ids"])
self.assertListEqual(segment_ids.tolist(), expected_results["segment_ids"])
self.assertListEqual(column_ids.tolist(), expected_results["column_ids"])
self.assertListEqual(row_ids.tolist(), expected_results["row_ids"])
@unittest.skip("Skip this test while all models are still to be uploaded.")
def test_pretrained_model_lists(self):
pass
@unittest.skip("Doesn't support another framework than PyTorch")
def test_np_encode_plus_sent_to_model(self):
pass
| [
"torch.no_grad"
] | 1.0 | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 |
1.0 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch BigBird model. """
import unittest
from transformers import BigBirdConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.models.big_bird.tokenization_big_bird import BigBirdTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
BigBirdForCausalLM,
BigBirdForMaskedLM,
BigBirdForMultipleChoice,
BigBirdForPreTraining,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
BigBirdModel,
)
from transformers.models.big_bird.modeling_big_bird import BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST
class BigBirdModelTester:
def __init__(
self,
parent,
batch_size=7,
seq_length=128,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu_new",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=256,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
attention_type="block_sparse",
use_bias=True,
rescale_embeddings=False,
block_size=8,
num_rand_blocks=3,
position_embedding_type="absolute",
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.attention_type = attention_type
self.use_bias = use_bias
self.rescale_embeddings = rescale_embeddings
self.block_size = block_size
self.num_rand_blocks = num_rand_blocks
self.position_embedding_type = position_embedding_type
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return BigBirdConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_encoder_decoder=False,
initializer_range=self.initializer_range,
attention_type=self.attention_type,
use_bias=self.use_bias,
rescale_embeddings=self.rescale_embeddings,
block_size=self.block_size,
num_random_blocks=self.num_rand_blocks,
position_embedding_type=self.position_embedding_type,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, config.num_labels))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
encoder_hidden_states=encoder_hidden_states,
)
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_causal_lm(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.is_decoder = True
config.add_cross_attention = True
model = BigBirdForCausalLM(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
)["hidden_states"][0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
)["hidden_states"][0]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = BigBirdForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = BigBirdForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = BigBirdForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def create_and_check_for_auto_padding(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_change_to_full_attn(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
model = BigBirdModel(config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
# the config should not be changed
self.parent.assertTrue(model.config.attention_type == "block_sparse")
@require_torch
class BigBirdModelTest(ModelTesterMixin, unittest.TestCase):
# head masking & pruning is currently not supported for big bird
test_head_masking = False
test_pruning = False
# torchscript should be possible, but takes prohibitively long to test.
# Also torchscript is not an important feature to have in the beginning.
test_torchscript = False
all_model_classes = (
(
BigBirdModel,
BigBirdForPreTraining,
BigBirdForMaskedLM,
BigBirdForCausalLM,
BigBirdForMultipleChoice,
BigBirdForQuestionAnswering,
BigBirdForSequenceClassification,
BigBirdForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BigBirdForCausalLM,) if is_torch_available() else ()
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["next_sentence_label"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = BigBirdModelTester(self)
self.config_tester = ConfigTester(self, config_class=BigBirdConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
# This regression test was failing with PyTorch < 1.3
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def test_retain_grad_hidden_states_attentions(self):
# bigbird cannot keep gradients in attentions when `attention_type=block_sparse`
if self.model_tester.attention_type == "original_full":
super().test_retain_grad_hidden_states_attentions()
@slow
def test_model_from_pretrained(self):
for model_name in BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BigBirdForPreTraining.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_model_various_attn_type(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
for type in ["original_full", "block_sparse"]:
config_and_inputs[0].attention_type = type
self.model_tester.create_and_check_model(*config_and_inputs)
def test_fast_integration(self):
# fmt: off
input_ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73],[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 12, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 28, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 18, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
input_ids = input_ids % self.model_tester.vocab_size
input_ids[1] = input_ids[1] - 1
attention_mask = torch.ones((input_ids.shape), device=torch_device)
attention_mask[:, :-10] = 0
config, _, _, _, _, _, _ = self.model_tester.prepare_config_and_inputs()
torch.manual_seed(0)
model = BigBirdModel(config).eval().to(torch_device)
with torch.no_grad():
hidden_states = model(input_ids, attention_mask=attention_mask).last_hidden_state
self.assertTrue(
torch.allclose(
hidden_states[0, 0, :5],
torch.tensor([1.4825, 0.0774, 0.8226, -0.2962, -0.9593], device=torch_device),
atol=1e-3,
)
)
def test_auto_padding(self):
self.model_tester.seq_length = 241
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_auto_padding(*config_and_inputs)
def test_for_change_to_full_attn(self):
self.model_tester.seq_length = 9
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_change_to_full_attn(*config_and_inputs)
# overwrite from common in order to skip the check on `attentions`
# also use `5e-5` to avoid flaky test failure
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
@require_torch
@slow
class BigBirdModelIntegrationTest(unittest.TestCase):
# we can have this true once block_sparse attn_probs works accurately
test_attention_probs = False
def _get_dummy_input_ids(self):
# fmt: off
ids = torch.tensor(
[[6, 117, 33, 36, 70, 22, 63, 31, 71, 72, 88, 58, 109, 49, 48, 116, 92, 6, 19, 95, 118, 100, 80, 111, 93, 2, 31, 84, 26, 5, 6, 82, 46, 96, 109, 4, 39, 19, 109, 13, 92, 31, 36, 90, 111, 18, 75, 6, 56, 74, 16, 42, 56, 92, 69, 108, 127, 81, 82, 41, 106, 19, 44, 24, 82, 121, 120, 65, 36, 26, 72, 13, 36, 98, 43, 64, 8, 53, 100, 92, 51, 122, 66, 17, 61, 50, 104, 127, 26, 35, 94, 23, 110, 71, 80, 67, 109, 111, 44, 19, 51, 41, 86, 71, 76, 44, 18, 68, 44, 77, 107, 81, 98, 126, 100, 2, 49, 98, 84, 39, 23, 98, 52, 46, 10, 82, 121, 73]], # noqa: E231
dtype=torch.long,
device=torch_device,
)
# fmt: on
return ids
def test_inference_block_sparse_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="block_sparse")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device)
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 4096, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[-0.2420, -0.6048, -0.0614, 7.8422],
[-0.0596, -0.0104, -1.8408, 9.3352],
[1.0588, 0.7999, 5.0770, 8.7555],
[-0.1385, -1.7199, -1.7613, 6.1094],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[58.8196, 56.3629]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_inference_full_pretraining(self):
model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base", attention_type="original_full")
model.to(torch_device)
input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device)
outputs = model(input_ids)
prediction_logits = outputs.prediction_logits
seq_relationship_logits = outputs.seq_relationship_logits
self.assertEqual(prediction_logits.shape, torch.Size((1, 512 * 4, 50358)))
self.assertEqual(seq_relationship_logits.shape, torch.Size((1, 2)))
expected_prediction_logits_slice = torch.tensor(
[
[0.1499, -1.1217, 0.1990, 8.4499],
[-2.7757, -3.0687, -4.8577, 7.5156],
[1.5446, 0.1982, 4.3016, 10.4281],
[-1.3705, -4.0130, -3.9629, 5.1526],
],
device=torch_device,
)
self.assertTrue(
torch.allclose(prediction_logits[0, 128:132, 128:132], expected_prediction_logits_slice, atol=1e-4)
)
expected_seq_relationship_logits = torch.tensor([[41.4503, 41.2406]], device=torch_device)
self.assertTrue(torch.allclose(seq_relationship_logits, expected_seq_relationship_logits, atol=1e-4))
def test_block_sparse_attention_probs(self):
"""
Asserting if outputted attention matrix is similar to hard coded attention matrix
"""
if not self.test_attention_probs:
return
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
hidden_states = model.embeddings(input_ids)
batch_size, seqlen, _ = hidden_states.size()
attn_mask = torch.ones(batch_size, seqlen, device=torch_device, dtype=torch.float)
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = config.block_size
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
from_blocked_mask = to_blocked_mask = blocked_mask
for i in range(config.num_hidden_layers):
pointer = model.encoder.layer[i].attention.self
query_layer = pointer.transpose_for_scores(pointer.query(hidden_states))
key_layer = pointer.transpose_for_scores(pointer.key(hidden_states))
value_layer = pointer.transpose_for_scores(pointer.value(hidden_states))
context_layer, attention_probs = pointer.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
pointer.num_attention_heads,
pointer.num_random_blocks,
pointer.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=pointer.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=True,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
cl = torch.einsum("bhqk,bhkd->bhqd", attention_probs, value_layer)
cl = cl.view(context_layer.size())
self.assertTrue(torch.allclose(context_layer, cl, atol=0.001))
def test_block_sparse_context_layer(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
config = model.config
input_ids = self._get_dummy_input_ids()
dummy_hidden_states = model.embeddings(input_ids)
attn_mask = torch.ones_like(input_ids, device=torch_device)
blocked_mask, band_mask, from_mask, to_mask = model.create_masks_for_block_sparse_attn(
attn_mask, config.block_size
)
targeted_cl = torch.tensor(
[
[0.1874, 1.5260, 0.2335, -0.0473, -0.0961, 1.8384, -0.0141, 0.1250, 0.0085, -0.0048],
[-0.0554, 0.0728, 0.1683, -0.1332, 0.1741, 0.1337, -0.2380, -0.1849, -0.0390, -0.0259],
[-0.0419, 0.0767, 0.1591, -0.1399, 0.1789, 0.1257, -0.2406, -0.1772, -0.0261, -0.0079],
[0.1860, 1.5172, 0.2326, -0.0473, -0.0953, 1.8291, -0.0147, 0.1245, 0.0082, -0.0046],
[0.1879, 1.5296, 0.2335, -0.0471, -0.0975, 1.8433, -0.0136, 0.1260, 0.0086, -0.0054],
[0.1854, 1.5147, 0.2334, -0.0480, -0.0956, 1.8250, -0.0149, 0.1222, 0.0082, -0.0060],
[0.1859, 1.5184, 0.2334, -0.0474, -0.0955, 1.8297, -0.0143, 0.1234, 0.0079, -0.0054],
[0.1885, 1.5336, 0.2335, -0.0467, -0.0979, 1.8481, -0.0130, 0.1269, 0.0085, -0.0049],
[0.1881, 1.5305, 0.2335, -0.0471, -0.0976, 1.8445, -0.0135, 0.1262, 0.0086, -0.0053],
[0.1852, 1.5148, 0.2333, -0.0480, -0.0949, 1.8254, -0.0151, 0.1225, 0.0079, -0.0055],
[0.1877, 1.5292, 0.2335, -0.0470, -0.0972, 1.8431, -0.0135, 0.1259, 0.0084, -0.0052],
[0.1874, 1.5261, 0.2334, -0.0472, -0.0968, 1.8393, -0.0140, 0.1251, 0.0084, -0.0052],
[0.1853, 1.5151, 0.2331, -0.0478, -0.0948, 1.8256, -0.0154, 0.1228, 0.0086, -0.0052],
[0.1867, 1.5233, 0.2334, -0.0475, -0.0965, 1.8361, -0.0139, 0.1247, 0.0084, -0.0054],
],
device=torch_device,
)
context_layer = model.encoder.layer[0].attention.self(
dummy_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_mask,
to_blocked_mask=blocked_mask,
)
context_layer = context_layer[0]
self.assertEqual(context_layer.shape, torch.Size((1, 128, 768)))
self.assertTrue(torch.allclose(context_layer[0, 64:78, 300:310], targeted_cl, atol=0.0001))
def test_tokenizer_inference(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
text = [
"Transformer-based models are unable to process long sequences due to their self-attention operation,"
" which scales quadratically with the sequence length. To address this limitation, we introduce the"
" Longformer with an attention mechanism that scales linearly with sequence length, making it easy to"
" process documents of thousands of tokens or longer. Longformer’s attention mechanism is a drop-in"
" replacement for the standard self-attention and combines a local windowed attention with a task"
" motivated global attention. Following prior work on long-sequence transformers, we evaluate Longformer"
" on character-level language modeling and achieve state-of-the-art results on text8 and enwik8. In"
" contrast to most prior work, we also pretrain Longformer and finetune it on a variety of downstream"
" tasks. Our pretrained Longformer consistently outperforms RoBERTa on long document tasks and sets new"
" state-of-the-art results on WikiHop and TriviaQA."
]
inputs = tokenizer(text)
for k in inputs:
inputs[k] = torch.tensor(inputs[k], device=torch_device, dtype=torch.long)
prediction = model(**inputs)
prediction = prediction[0]
self.assertEqual(prediction.shape, torch.Size((1, 199, 768)))
expected_prediction = torch.tensor(
[
[-0.0213, -0.2213, -0.0061, 0.0687],
[0.0977, 0.1858, 0.2374, 0.0483],
[0.2112, -0.2524, 0.5793, 0.0967],
[0.2473, -0.5070, -0.0630, 0.2174],
[0.2885, 0.1139, 0.6071, 0.2991],
[0.2328, -0.2373, 0.3648, 0.1058],
[0.2517, -0.0689, 0.0555, 0.0880],
[0.1021, -0.1495, -0.0635, 0.1891],
[0.0591, -0.0722, 0.2243, 0.2432],
[-0.2059, -0.2679, 0.3225, 0.6183],
[0.2280, -0.2618, 0.1693, 0.0103],
[0.0183, -0.1375, 0.2284, -0.1707],
],
device=torch_device,
)
self.assertTrue(torch.allclose(prediction[0, 52:64, 320:324], expected_prediction, atol=1e-4))
def test_inference_question_answering(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-base-trivia-itc")
model = BigBirdForQuestionAnswering.from_pretrained(
"google/bigbird-base-trivia-itc", attention_type="block_sparse", block_size=16, num_random_blocks=3
)
model.to(torch_device)
context = (
"The BigBird model was proposed in Big Bird: Transformers for Longer Sequences by Zaheer, Manzil and"
" Guruganesh, Guru and Dubey, Kumar Avinava and Ainslie, Joshua and Alberti, Chris and Ontanon, Santiago"
" and Pham, Philip and Ravula, Anirudh and Wang, Qifan and Yang, Li and others. BigBird, is a"
" sparse-attention based transformer which extends Transformer based models, such as BERT to much longer"
" sequences. In addition to sparse attention, BigBird also applies global attention as well as random"
" attention to the input sequence. Theoretically, it has been shown that applying sparse, global, and"
" random attention approximates full attention, while being computationally much more efficient for longer"
" sequences. As a consequence of the capability to handle longer context, BigBird has shown improved"
" performance on various long document NLP tasks, such as question answering and summarization, compared"
" to BERT or RoBERTa."
)
question = [
"Which is better for longer sequences- BigBird or BERT?",
"What is the benefit of using BigBird over BERT?",
]
inputs = tokenizer(
question,
[context, context],
padding=True,
return_tensors="pt",
add_special_tokens=True,
max_length=256,
truncation=True,
)
inputs = {k: v.to(torch_device) for k, v in inputs.items()}
start_logits, end_logits = model(**inputs).to_tuple()
# fmt: off
target_start_logits = torch.tensor(
[[-8.9304, -10.3849, -14.4997, -9.6497, -13.9469, -7.8134, -8.9687, -13.3585, -9.7987, -13.8869, -9.2632, -8.9294, -13.6721, -7.3198, -9.5434, -11.2641, -14.3245, -9.5705, -12.7367, -8.6168, -11.083, -13.7573, -8.1151, -14.5329, -7.6876, -15.706, -12.8558, -9.1135, 8.0909, -3.1925, -11.5812, -9.4822], [-11.5595, -14.5591, -10.2978, -14.8445, -10.2092, -11.1899, -13.8356, -10.5644, -14.7706, -9.9841, -11.0052, -14.1862, -8.8173, -11.1098, -12.4686, -15.0531, -11.0196, -13.6614, -10.0236, -11.8151, -14.8744, -9.5123, -15.1605, -8.6472, -15.4184, -8.898, -9.6328, -7.0258, -11.3365, -14.4065, -10.2587, -8.9103]], # noqa: E231
device=torch_device,
)
target_end_logits = torch.tensor(
[[-12.4131, -8.5959, -15.7163, -11.1524, -15.9913, -12.2038, -7.8902, -16.0296, -12.164, -16.5017, -13.3332, -6.9488, -15.7756, -13.8506, -11.0779, -9.2893, -15.0426, -10.1963, -17.3292, -12.2945, -11.5337, -16.4514, -9.1564, -17.5001, -9.1562, -16.2971, -13.3199, -7.5724, -5.1175, 7.2168, -10.3804, -11.9873], [-10.8654, -14.9967, -11.4144, -16.9189, -14.2673, -9.7068, -15.0182, -12.8846, -16.8716, -13.665, -10.3113, -15.1436, -14.9069, -13.3364, -11.2339, -16.0118, -11.8331, -17.0613, -13.8852, -12.4163, -16.8978, -10.7772, -17.2324, -10.6979, -16.9811, -10.3427, -9.497, -13.7104, -11.1107, -13.2936, -13.855, -14.1264]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertTrue(torch.allclose(start_logits[:, 64:96], target_start_logits, atol=1e-4))
self.assertTrue(torch.allclose(end_logits[:, 64:96], target_end_logits, atol=1e-4))
input_ids = inputs["input_ids"].tolist()
answer = [
input_ids[i][torch.argmax(start_logits, dim=-1)[i] : torch.argmax(end_logits, dim=-1)[i] + 1]
for i in range(len(input_ids))
]
answer = tokenizer.batch_decode(answer)
self.assertTrue(answer == ["BigBird", "global attention"])
def test_fill_mask(self):
tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base")
model.to(torch_device)
input_ids = tokenizer("The goal of life is [MASK] .", return_tensors="pt").input_ids.to(torch_device)
logits = model(input_ids).logits
# [MASK] is token at 6th position
pred_token = tokenizer.decode(torch.argmax(logits[0, 6:7], axis=-1))
self.assertEqual(pred_token, "happiness")
def test_auto_padding(self):
model = BigBirdModel.from_pretrained(
"google/bigbird-roberta-base", attention_type="block_sparse", num_random_blocks=3, block_size=16
)
model.to(torch_device)
model.eval()
input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long)
output = model(input_ids).to_tuple()[0]
# fmt: off
target = torch.tensor(
[[-0.045136, -0.068013, 0.12246, -0.01356, 0.018386, 0.025333, -0.0044439, -0.0030996, -0.064031, 0.0006439], [-0.045018, -0.067638, 0.12317, -0.013998, 0.019216, 0.025695, -0.0043705, -0.0031895, -0.063153, 0.00088899], [-0.045042, -0.067305, 0.1234, -0.014512, 0.020057, 0.026084, -0.004615, -0.0031728, -0.062442, 0.0010263], [-0.044589, -0.067655, 0.12416, -0.014287, 0.019416, 0.026065, -0.0050958, -0.002702, -0.063158, 0.0004827], [-0.044627, -0.067535, 0.1239, -0.014319, 0.019491, 0.026213, -0.0059482, -0.0025906, -0.063116, 0.00014669], [-0.044899, -0.067704, 0.12337, -0.014231, 0.019256, 0.026345, -0.0065565, -0.0022938, -0.063433, -0.00011409], [-0.045599, -0.067764, 0.12235, -0.014151, 0.019206, 0.026417, -0.0068965, -0.0024494, -0.063313, -4.4499e-06], [-0.045557, -0.068372, 0.12199, -0.013747, 0.017962, 0.026103, -0.0070607, -0.0023552, -0.06447, -0.00048756], [-0.045334, -0.068913, 0.1217, -0.013566, 0.01693, 0.025745, -0.006311, -0.0024903, -0.065575, -0.0006719], [-0.045171, -0.068726, 0.12164, -0.013688, 0.017139, 0.025629, -0.005213, -0.0029412, -0.065237, -0.00020669], [-0.044411, -0.069267, 0.12206, -0.013645, 0.016212, 0.025589, -0.0044121, -0.002972, -0.066277, -0.00067963], [-0.043487, -0.069792, 0.1232, -0.013663, 0.015303, 0.02613, -0.0036294, -0.0030616, -0.067483, -0.0012642], [-0.042622, -0.069287, 0.12469, -0.013936, 0.016204, 0.026474, -0.0040534, -0.0027365, -0.066994, -0.0014148], [-0.041879, -0.070031, 0.12593, -0.014047, 0.015082, 0.027751, -0.0040683, -0.0027189, -0.068985, -0.0027146]], # noqa: E231
device=torch_device,
)
# fmt: on
self.assertEqual(output.shape, torch.Size((1, 241, 768)))
self.assertTrue(torch.allclose(output[0, 64:78, 300:310], target, atol=0.0001))
| [
"torch.Size",
"torch.zeros",
"torch.cat",
"torch.einsum",
"torch.no_grad",
"torch.ones",
"torch.manual_seed",
"torch.tensor",
"torch.ones_like",
"torch.allclose",
"torch.argmax"
] | 1.0 | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 |
1.0 | # coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Longformer model."""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN, gelu
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_longformer import LongformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096"
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"allenai/longformer-base-4096",
"allenai/longformer-large-4096",
"allenai/longformer-large-4096-finetuned-triviaqa",
"allenai/longformer-base-4096-extra.pos.embd.only",
"allenai/longformer-large-4096-extra.pos.embd.only",
# See all Longformer models at https://huggingface.co/models?filter=longformer
]
@dataclass
class LongformerBaseModelOutput(ModelOutput):
"""
Base class for Longformer's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerBaseModelOutputWithPooling(ModelOutput):
"""
Base class for Longformer's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: torch.FloatTensor
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering Longformer models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice Longformer models.
Args:
loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):
*num_choices* is the second dimension of the input tensors. (see *input_ids* above).
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LongformerTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
global_attentions: Optional[Tuple[torch.FloatTensor]] = None
def _get_question_end_index(input_ids, sep_token_id):
"""
Computes the index of the first occurrence of `sep_token_id`.
"""
sep_token_indices = (input_ids == sep_token_id).nonzero()
batch_size = input_ids.shape[0]
assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions"
assert sep_token_indices.shape[0] == 3 * batch_size, (
f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You"
" might also consider to set `global_attention_mask` manually in the forward function to avoid this error."
)
return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1]
def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
question_end_index = _get_question_end_index(input_ids, sep_token_id)
question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device)
if before_sep_token is True:
attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * (
attention_mask.expand_as(input_ids) < input_ids.shape[-1]
).to(torch.uint8)
return attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
return incremental_indices.long() + padding_idx
class LongformerEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor inputs_embeds:
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class LongformerSelfAttention(nn.Module):
def __init__(self, config, layer_id):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = nn.Linear(config.hidden_size, self.embed_dim)
self.key = nn.Linear(config.hidden_size, self.embed_dim)
self.value = nn.Linear(config.hidden_size, self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = nn.Linear(config.hidden_size, self.embed_dim)
self.key_global = nn.Linear(config.hidden_size, self.embed_dim)
self.value_global = nn.Linear(config.hidden_size, self.embed_dim)
self.dropout = config.attention_probs_dropout_prob
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
"""
[`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to
*attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer.
The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to:
- -10000: no attention
- 0: local attention
- +10000: global attention
"""
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], (
f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads},"
f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
)
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to local_attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
# free memory
del global_key_attn_scores
attn_probs = nn.functional.softmax(
attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
attn_probs = attn_probs.type_as(attn_scores)
# free memory
del attn_scores
# apply dropout
attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden(
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
layer_head_mask=layer_head_mask,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
# The attention weights for tokens with global attention are
# just filler values, they were never used to compute the output.
# Fill with 0 now, the correct values are in 'global_attn_probs'.
attn_probs[is_index_global_attn_nonzero] = 0
outputs = (attn_output.transpose(0, 1),)
if output_attentions:
outputs += (attn_probs,)
return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, padding):
"""pads rows and then flips rows and columns"""
hidden_states_padded = nn.functional.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
*hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2)
)
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example:
```python
chunked_hidden_states: [
0.4983,
2.6918,
-0.0071,
1.0492,
-1.8348,
0.7672,
0.2986,
0.0285,
-0.7584,
0.4206,
-0.0405,
0.1599,
2.0514,
-1.1600,
0.5372,
0.2629,
]
window_overlap = num_rows = 4
```
(pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206,
-0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = nn.functional.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlap*window_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
)
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
hidden_states.size(1) // (window_overlap * 2),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = list(hidden_states.stride())
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
@staticmethod
def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor:
beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
query = self._chunk(query, window_overlap)
key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
diagonal_chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1) : -1, window_overlap + 1 :
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1, 1 - window_overlap :
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn_probs_value(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = padded_value.stride()
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
"""compute global attn indices required throughout forward pass"""
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = torch.finfo(attn_probs_from_global_key.dtype).min
return attn_probs_from_global_key
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone()
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
hidden_states,
max_num_global_attn_indices,
layer_head_mask,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
):
seq_len, batch_size = hidden_states.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert list(global_attn_scores.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
seq_len,
], (
"global_attn_scores have the wrong size. Size should be"
f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is"
f" {global_attn_scores.size()}."
)
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = torch.finfo(global_attn_scores.dtype).min
global_attn_scores = global_attn_scores.masked_fill(
is_index_masked[:, None, None, :],
torch.finfo(global_attn_scores.dtype).min,
)
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = nn.functional.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
# apply layer head masking
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view(
batch_size, self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs_float = global_attn_probs_float.view(
batch_size * self.num_heads, max_num_global_attn_indices, seq_len
)
global_attn_probs = nn.functional.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert list(global_attn_output.size()) == [
batch_size * self.num_heads,
max_num_global_attn_indices,
self.head_dim,
], (
"global_attn_output tensor has the wrong size. Size should be"
f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is"
f" {global_attn_output.size()}."
)
global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output, global_attn_probs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LongformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.self = LongformerSelfAttention(config, layer_id)
self.output = LongformerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self.output(self_outputs[0], hidden_states)
outputs = (attn_output,) + self_outputs[1:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LongformerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LongformerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LongformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = LongformerAttention(config, layer_id)
self.intermediate = LongformerIntermediate(config)
self.output = LongformerOutput(config)
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(
self,
hidden_states,
attention_mask=None,
layer_head_mask=None,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=False,
):
self_attn_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
attn_output = self_attn_outputs[0]
outputs = self_attn_outputs[1:]
layer_output = apply_chunking_to_forward(
self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output
)
outputs = (layer_output,) + outputs
return outputs
def ff_chunk(self, attn_output):
intermediate_output = self.intermediate(attn_output)
layer_output = self.output(intermediate_output, attn_output)
return layer_output
class LongformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
padding_len=0,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None # All local attentions.
all_global_attentions = () if (output_attentions and is_global_attn) else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layer)
), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}."
for idx, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, is_global_attn, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[idx] if head_mask is not None else None,
is_index_masked,
is_index_global_attn,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
layer_head_mask=head_mask[idx] if head_mask is not None else None,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),)
if is_global_attn:
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# undo padding
if padding_len > 0:
# unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1)
hidden_states = hidden_states[:, :-padding_len]
if output_hidden_states:
all_hidden_states = tuple([state[:, :-padding_len] for state in all_hidden_states])
if output_attentions:
all_attentions = tuple([state[:, :, :-padding_len, :] for state in all_attentions])
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
)
return LongformerBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LongformerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer
class LongformerLMHead(nn.Module):
"""Longformer Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
class LongformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongformerConfig
base_model_prefix = "longformer"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LongformerEncoder):
module.gradient_checkpointing = value
LONGFORMER_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`LongformerConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
LONGFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LongformerTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
global_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more
details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Longformer Model outputting raw hidden-states without any specific head on top.",
LONGFORMER_START_DOCSTRING,
)
class LongformerModel(LongformerPreTrainedModel):
"""
This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention
to provide the ability to process long sequences following the self-attention approach described in [Longformer:
the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan.
Longformer self-attention combines a local (sliding window) and global attention to extend to long documents
without the O(n^2) increase in memory and compute.
The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global
attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated
attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future
release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA
kernel to be memory and compute efficient.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.embeddings = LongformerEmbeddings(config)
self.encoder = LongformerEncoder(config)
self.pooler = LongformerPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _pad_to_window_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer self-attention."""
# padding
attention_window = (
self.config.attention_window
if isinstance(self.config.attention_window, int)
else max(self.config.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.attention_window`: {attention_window}"
)
if input_ids is not None:
input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = nn.functional.pad(
attention_mask, (0, padding_len), value=False
) # no attention on the padding tokens
token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerBaseModelOutputWithPooling]:
r"""
Returns:
Examples:
```python
>>> import torch
>>> from transformers import LongformerModel, LongformerTokenizer
>>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096")
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
>>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document
>>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1
>>> attention_mask = torch.ones(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to local attention
>>> global_attention_mask = torch.zeros(
... input_ids.shape, dtype=torch.long, device=input_ids.device
... ) # initialize to global attention to be deactivated for all tokens
>>> global_attention_mask[
... :,
... [
... 1,
... 4,
... 21,
... ],
... ] = 1 # Set global attention to random tokens for the sake of this example
>>> # Usually, set global attention based on the task. For example,
>>> # classification: the <s> token
>>> # QA: question tokens
>>> # LM: potentially on the beginning of sentences and paragraphs
>>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)
>>> sequence_output = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# merge `global_attention_mask` and `attention_mask`
if global_attention_mask is not None:
attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask)
padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[
:, 0, 0, :
]
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
padding_len=padding_len,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return LongformerBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
)
@add_start_docstrings("""Longformer Model with a `language modeling` head on top.""", LONGFORMER_START_DOCSTRING)
class LongformerForMaskedLM(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.lm_head = LongformerLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerMaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Mask filling example:
```python
>>> from transformers import LongformerTokenizer, LongformerForMaskedLM
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096")
>>> model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096")
```
Let's try a very long input.
```python
>>> TXT = (
... "My friends are <mask> but they eat too many carbs."
... + " That's why I decide not to eat with them." * 300
... )
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['healthy', 'skinny', 'thin', 'good', 'vegetarian']
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LongformerMaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForSequenceClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.classifier = LongformerClassificationHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="jpelhaw/longformer-base-plagiarism-detection",
output_type=LongformerSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output="'ORIGINAL'",
expected_loss=5.44,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerSequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
logger.info("Initializing global attention on CLS token...")
global_attention_mask = torch.zeros_like(input_ids)
# global attention on cls token
global_attention_mask[:, 0] = 1
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
class LongformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
output = self.out_proj(hidden_states)
return output
@add_start_docstrings(
"""
Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForQuestionAnswering(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerQuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Returns:
Examples:
```python
>>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering
>>> import torch
>>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> encoding = tokenizer(question, text, return_tensors="pt")
>>> input_ids = encoding["input_ids"]
>>> # default is local attention everywhere
>>> # the forward method will automatically set global attention on question tokens
>>> attention_mask = encoding["attention_mask"]
>>> outputs = model(input_ids, attention_mask=attention_mask)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist())
>>> answer_tokens = all_tokens[torch.argmax(start_logits) : torch.argmax(end_logits) + 1]
>>> answer = tokenizer.decode(
... tokenizer.convert_tokens_to_ids(answer_tokens)
... ) # remove space prepending space token
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if global_attention_mask is None:
if input_ids is None:
logger.warning(
"It is not possible to automatically generate the `global_attention_mask` because input_ids is"
" None. Please make sure that it is correctly set."
)
else:
# set global attention on question tokens automatically
global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id)
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return LongformerQuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForTokenClassification(LongformerPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.longformer = LongformerModel(config, add_pooling_layer=False)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint="brad1141/Longformer-finetuned-norm",
output_type=LongformerTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
expected_output=(
"['Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence', 'Evidence',"
" 'Evidence', 'Evidence', 'Evidence', 'Evidence']"
),
expected_loss=0.63,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerTokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.longformer(
input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
head_mask=head_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@add_start_docstrings(
"""
Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class LongformerForMultipleChoice(LongformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.longformer = LongformerModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(
LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=LongformerMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
global_attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, LongformerMultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# set global attention on question tokens
if global_attention_mask is None and input_ids is not None:
logger.info("Initializing global attention on multiple choice...")
# put global attention on all tokens after `config.sep_token_id`
global_attention_mask = torch.stack(
[
_compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False)
for i in range(num_choices)
],
dim=1,
)
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_global_attention_mask = (
global_attention_mask.view(-1, global_attention_mask.size(-1))
if global_attention_mask is not None
else None
)
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.longformer(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
global_attention_mask=flat_global_attention_mask,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return LongformerMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.einsum",
"torch.finfo",
"torch.bmm",
"torch.ones",
"torch.masked_fill",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.pad",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.zeros_like",
"torch.zeros",
"torch.nn.Tanh",
"torch.nn.functional.dropout",
"torch.nn.functional.softmax",
"torch.cumsum",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.tanh",
"torch.nn.Embedding"
] | 1.0 | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 |
1.0 | # coding=utf-8
# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTorch XLM model.
"""
import itertools
import math
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import numpy as np
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import gelu
from ...modeling_outputs import (
BaseModelOutput,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, SQuADHead
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_xlm import XLMConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "xlm-mlm-en-2048"
_CONFIG_FOR_DOC = "XLMConfig"
_TOKENIZER_FOR_DOC = "XLMTokenizer"
XLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlm-mlm-en-2048",
"xlm-mlm-ende-1024",
"xlm-mlm-enfr-1024",
"xlm-mlm-enro-1024",
"xlm-mlm-tlm-xnli15-1024",
"xlm-mlm-xnli15-1024",
"xlm-clm-enfr-1024",
"xlm-clm-ende-1024",
"xlm-mlm-17-1280",
"xlm-mlm-100-1280",
# See all XLM models at https://huggingface.co/models?filter=xlm
]
def create_sinusoidal_embeddings(n_pos, dim, out):
position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)])
out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2]))
out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2]))
out.detach_()
out.requires_grad = False
def get_masks(slen, lengths, causal, padding_mask=None):
"""
Generate hidden states mask, and optionally an attention mask.
"""
alen = torch.arange(slen, dtype=torch.long, device=lengths.device)
if padding_mask is not None:
mask = padding_mask
else:
assert lengths.max().item() <= slen
mask = alen < lengths[:, None]
# attention mask is the same as mask, or triangular inferior attention (causal)
bs = lengths.size(0)
if causal:
attn_mask = alen[None, None, :].repeat(bs, slen, 1) <= alen[None, :, None]
else:
attn_mask = mask
# sanity check
assert mask.size() == (bs, slen)
assert causal is False or attn_mask.size() == (bs, slen, slen)
return mask, attn_mask
class MultiHeadAttention(nn.Module):
NEW_ID = itertools.count()
def __init__(self, n_heads, dim, config):
super().__init__()
self.layer_id = next(MultiHeadAttention.NEW_ID)
self.dim = dim
self.n_heads = n_heads
self.dropout = config.attention_dropout
assert self.dim % self.n_heads == 0
self.q_lin = nn.Linear(dim, dim)
self.k_lin = nn.Linear(dim, dim)
self.v_lin = nn.Linear(dim, dim)
self.out_lin = nn.Linear(dim, dim)
self.pruned_heads = set()
def prune_heads(self, heads):
attention_head_size = self.dim // self.n_heads
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, attention_head_size, self.pruned_heads)
# Prune linear layers
self.q_lin = prune_linear_layer(self.q_lin, index)
self.k_lin = prune_linear_layer(self.k_lin, index)
self.v_lin = prune_linear_layer(self.v_lin, index)
self.out_lin = prune_linear_layer(self.out_lin, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.dim = attention_head_size * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
bs, qlen, dim = input.size()
if kv is None:
klen = qlen if cache is None else cache["slen"] + qlen
else:
klen = kv.size(1)
# assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'
n_heads = self.n_heads
dim_per_head = self.dim // n_heads
mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)
def shape(x):
"""projection"""
return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2)
def unshape(x):
"""compute context"""
return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head)
q = shape(self.q_lin(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k_lin(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(input)) # (bs, n_heads, qlen, dim_per_head)
elif cache is None or self.layer_id not in cache:
k = v = kv
k = shape(self.k_lin(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v_lin(v)) # (bs, n_heads, qlen, dim_per_head)
if cache is not None:
if self.layer_id in cache:
if kv is None:
k_, v_ = cache[self.layer_id]
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = cache[self.layer_id]
cache[self.layer_id] = (k, v)
q = q / math.sqrt(dim_per_head) # (bs, n_heads, qlen, dim_per_head)
scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, qlen, klen)
mask = (mask == 0).view(mask_reshape).expand_as(scores) # (bs, n_heads, qlen, klen)
scores.masked_fill_(mask, torch.finfo(scores.dtype).min) # (bs, n_heads, qlen, klen)
weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = nn.functional.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
outputs = (self.out_lin(context),)
if output_attentions:
outputs = outputs + (weights,)
return outputs
class TransformerFFN(nn.Module):
def __init__(self, in_dim, dim_hidden, out_dim, config):
super().__init__()
self.dropout = config.dropout
self.lin1 = nn.Linear(in_dim, dim_hidden)
self.lin2 = nn.Linear(dim_hidden, out_dim)
self.act = gelu if config.gelu_activation else nn.functional.relu
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
def forward(self, input):
return apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, input)
def ff_chunk(self, input):
x = self.lin1(input)
x = self.act(x)
x = self.lin2(x)
x = nn.functional.dropout(x, p=self.dropout, training=self.training)
return x
class XLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = XLMConfig
load_tf_weights = None
base_model_prefix = "transformer"
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
@property
def dummy_inputs(self):
inputs_list = torch.tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
attns_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
if self.config.use_lang_emb and self.config.n_langs > 1:
langs_list = torch.tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
else:
langs_list = None
return {"input_ids": inputs_list, "attention_mask": attns_list, "langs": langs_list}
def _init_weights(self, module):
"""Initialize the weights."""
if isinstance(module, nn.Embedding):
if self.config is not None and self.config.embed_init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.embed_init_std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, nn.Linear):
if self.config is not None and self.config.init_std is not None:
nn.init.normal_(module.weight, mean=0, std=self.config.init_std)
if module.bias is not None:
nn.init.constant_(module.bias, 0.0)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class XLMForQuestionAnsweringOutput(ModelOutput):
"""
Base class for outputs of question answering models using a `SquadHead`.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification
losses.
start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
(beam-search).
end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
Log probabilities for the `is_impossible` label of the answers.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_top_log_probs: Optional[torch.FloatTensor] = None
start_top_index: Optional[torch.LongTensor] = None
end_top_log_probs: Optional[torch.FloatTensor] = None
end_top_index: Optional[torch.LongTensor] = None
cls_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
XLM_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`XLMConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
XLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`XLMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
langs (`torch.LongTensor` of shape `({0})`, *optional*):
A parallel sequence of tokens to be used to indicate the language of each token in the input. Indices are
languages ids which can be obtained from the language names by using two conversion mappings provided in
the configuration of the model (only provided for multilingual models). More precisely, the *language name
to language id* mapping is in `model.config.lang2id` (which is a dictionary string to int) and the
*language id to language name* mapping is in `model.config.id2lang` (dictionary int to string).
See usage examples detailed in the [multilingual documentation](../multilingual).
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
lengths (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Length of each sentence that can be used to avoid performing attention on padding token indices. You can
also use *attention_mask* for the same result (see above), kept here for compatibility. Indices selected in
`[0, ..., input_ids.size(-1)]`.
cache (`Dict[str, torch.FloatTensor]`, *optional*):
Dictionary string to `torch.FloatTensor` that contains precomputed hidden states (key and values in the
attention blocks) as computed by the model (see `cache` output below). Can be used to speed up sequential
decoding.
The dictionary object will be modified in-place during the forward pass to add newly computed
hidden-states.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare XLM Model transformer outputting raw hidden-states without any specific head on top.",
XLM_START_DOCSTRING,
)
class XLMModel(XLMPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config):
super().__init__(config)
# encoder / decoder, output layer
self.is_encoder = config.is_encoder
self.is_decoder = not config.is_encoder
if self.is_decoder:
raise NotImplementedError("Currently XLM can only be used as an encoder")
# self.with_output = with_output
self.causal = config.causal
# dictionary / languages
self.n_langs = config.n_langs
self.use_lang_emb = config.use_lang_emb
self.n_words = config.n_words
self.eos_index = config.eos_index
self.pad_index = config.pad_index
# self.dico = dico
# self.id2lang = config.id2lang
# self.lang2id = config.lang2id
# assert len(self.dico) == self.n_words
# assert len(self.id2lang) == len(self.lang2id) == self.n_langs
# model parameters
self.dim = config.emb_dim # 512 by default
self.hidden_dim = self.dim * 4 # 2048 by default
self.n_heads = config.n_heads # 8 by default
self.n_layers = config.n_layers
self.dropout = config.dropout
self.attention_dropout = config.attention_dropout
assert self.dim % self.n_heads == 0, "transformer dim must be a multiple of n_heads"
# embeddings
self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.dim)
if config.sinusoidal_embeddings:
create_sinusoidal_embeddings(config.max_position_embeddings, self.dim, out=self.position_embeddings.weight)
if config.n_langs > 1 and config.use_lang_emb:
self.lang_embeddings = nn.Embedding(self.n_langs, self.dim)
self.embeddings = nn.Embedding(self.n_words, self.dim, padding_idx=self.pad_index)
self.layer_norm_emb = nn.LayerNorm(self.dim, eps=config.layer_norm_eps)
# transformer layers
self.attentions = nn.ModuleList()
self.layer_norm1 = nn.ModuleList()
self.ffns = nn.ModuleList()
self.layer_norm2 = nn.ModuleList()
# if self.is_decoder:
# self.layer_norm15 = nn.ModuleList()
# self.encoder_attn = nn.ModuleList()
for _ in range(self.n_layers):
self.attentions.append(MultiHeadAttention(self.n_heads, self.dim, config=config))
self.layer_norm1.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# if self.is_decoder:
# self.layer_norm15.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
# self.encoder_attn.append(MultiHeadAttention(self.n_heads, self.dim, dropout=self.attention_dropout))
self.ffns.append(TransformerFFN(self.dim, self.hidden_dim, self.dim, config=config))
self.layer_norm2.append(nn.LayerNorm(self.dim, eps=config.layer_norm_eps))
if hasattr(config, "pruned_heads"):
pruned_heads = config.pruned_heads.copy().items()
config.pruned_heads = {}
for layer, heads in pruned_heads:
if self.attentions[int(layer)].n_heads == config.n_heads:
self.prune_heads({int(layer): list(map(int, heads))})
# Initialize weights and apply final processing
self.post_init()
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.attentions[layer].prune_heads(heads)
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
bs, slen = input_ids.size()
else:
bs, slen = inputs_embeds.size()[:-1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if lengths is None:
if input_ids is not None:
lengths = (input_ids != self.pad_index).sum(dim=1).long()
else:
lengths = torch.tensor([slen] * bs, device=device)
# mask = input_ids != self.pad_index
# check inputs
assert lengths.size(0) == bs
assert lengths.max().item() <= slen
# input_ids = input_ids.transpose(0, 1) # batch size as dimension 0
# assert (src_enc is None) == (src_len is None)
# if src_enc is not None:
# assert self.is_decoder
# assert src_enc.size(0) == bs
# generate masks
mask, attn_mask = get_masks(slen, lengths, self.causal, padding_mask=attention_mask)
# if self.is_decoder and src_enc is not None:
# src_mask = torch.arange(src_len.max(), dtype=torch.long, device=lengths.device) < src_len[:, None]
# position_ids
if position_ids is None:
position_ids = self.position_ids[:, :slen]
else:
assert position_ids.size() == (bs, slen) # (slen, bs)
# position_ids = position_ids.transpose(0, 1)
# langs
if langs is not None:
assert langs.size() == (bs, slen) # (slen, bs)
# langs = langs.transpose(0, 1)
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layers)
# do not recompute cached elements
if cache is not None and input_ids is not None:
_slen = slen - cache["slen"]
input_ids = input_ids[:, -_slen:]
position_ids = position_ids[:, -_slen:]
if langs is not None:
langs = langs[:, -_slen:]
mask = mask[:, -_slen:]
attn_mask = attn_mask[:, -_slen:]
# embeddings
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
tensor = inputs_embeds + self.position_embeddings(position_ids).expand_as(inputs_embeds)
if langs is not None and self.use_lang_emb and self.n_langs > 1:
tensor = tensor + self.lang_embeddings(langs)
if token_type_ids is not None:
tensor = tensor + self.embeddings(token_type_ids)
tensor = self.layer_norm_emb(tensor)
tensor = nn.functional.dropout(tensor, p=self.dropout, training=self.training)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# transformer layers
hidden_states = () if output_hidden_states else None
attentions = () if output_attentions else None
for i in range(self.n_layers):
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# self attention
attn_outputs = self.attentions[i](
tensor,
attn_mask,
cache=cache,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
attn = attn_outputs[0]
if output_attentions:
attentions = attentions + (attn_outputs[1],)
attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
tensor = tensor + attn
tensor = self.layer_norm1[i](tensor)
# encoder attention (for decoder only)
# if self.is_decoder and src_enc is not None:
# attn = self.encoder_attn[i](tensor, src_mask, kv=src_enc, cache=cache)
# attn = nn.functional.dropout(attn, p=self.dropout, training=self.training)
# tensor = tensor + attn
# tensor = self.layer_norm15[i](tensor)
# FFN
tensor = tensor + self.ffns[i](tensor)
tensor = self.layer_norm2[i](tensor)
tensor *= mask.unsqueeze(-1).to(tensor.dtype)
# Add last hidden state
if output_hidden_states:
hidden_states = hidden_states + (tensor,)
# update cache length
if cache is not None:
cache["slen"] += tensor.size(1)
# move back sequence length to dimension 0
# tensor = tensor.transpose(0, 1)
if not return_dict:
return tuple(v for v in [tensor, hidden_states, attentions] if v is not None)
return BaseModelOutput(last_hidden_state=tensor, hidden_states=hidden_states, attentions=attentions)
class XLMPredLayer(nn.Module):
"""
Prediction layer (cross_entropy or adaptive_softmax).
"""
def __init__(self, config):
super().__init__()
self.asm = config.asm
self.n_words = config.n_words
self.pad_index = config.pad_index
dim = config.emb_dim
if config.asm is False:
self.proj = nn.Linear(dim, config.n_words, bias=True)
else:
self.proj = nn.AdaptiveLogSoftmaxWithLoss(
in_features=dim,
n_classes=config.n_words,
cutoffs=config.asm_cutoffs,
div_value=config.asm_div_value,
head_bias=True, # default is False
)
def forward(self, x, y=None):
"""Compute the loss, and optionally the scores."""
outputs = ()
if self.asm is False:
scores = self.proj(x)
outputs = (scores,) + outputs
if y is not None:
loss = nn.functional.cross_entropy(scores.view(-1, self.n_words), y.view(-1), reduction="mean")
outputs = (loss,) + outputs
else:
scores = self.proj.log_prob(x)
outputs = (scores,) + outputs
if y is not None:
_, loss = self.proj(x, y)
outputs = (loss,) + outputs
return outputs
@add_start_docstrings(
"""
The XLM Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
""",
XLM_START_DOCSTRING,
)
class XLMWithLMHeadModel(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.pred_layer = XLMPredLayer(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.pred_layer.proj
def set_output_embeddings(self, new_embeddings):
self.pred_layer.proj = new_embeddings
def prepare_inputs_for_generation(self, input_ids, **kwargs):
mask_token_id = self.config.mask_token_id
lang_id = self.config.lang_id
effective_batch_size = input_ids.shape[0]
mask_token = torch.full((effective_batch_size, 1), mask_token_id, dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, mask_token], dim=1)
if lang_id is not None:
langs = torch.full_like(input_ids, lang_id)
else:
langs = None
return {"input_ids": input_ids, "langs": langs}
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
mask="<special1>",
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MaskedLMOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.pred_layer(output, labels) # (loss, logits) or (logits,) depending on if labels are provided.
if not return_dict:
return outputs + transformer_outputs[1:]
return MaskedLMOutput(
loss=outputs[0] if labels is not None else None,
logits=outputs[0] if labels is None else outputs[1],
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
XLM Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g.
for GLUE tasks.
""",
XLM_START_DOCSTRING,
)
class XLMForSequenceClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.transformer = XLMModel(config)
self.sequence_summary = SequenceSummary(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
XLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
XLM_START_DOCSTRING,
)
class XLMForQuestionAnsweringSimple(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
XLM Model with a beam-search span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
XLM_START_DOCSTRING,
)
class XLMForQuestionAnswering(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLMModel(config)
self.qa_outputs = SQuADHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=XLMForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
start_positions: Optional[torch.Tensor] = None,
end_positions: Optional[torch.Tensor] = None,
is_impossible: Optional[torch.Tensor] = None,
cls_index: Optional[torch.Tensor] = None,
p_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, XLMForQuestionAnsweringOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the classification token to use as input for computing plausibility of the
answer.
p_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be
masked. 0.0 mean token is not masked.
Returns:
Example:
```python
>>> from transformers import XLMTokenizer, XLMForQuestionAnswering
>>> import torch
>>> tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
>>> model = XLMForQuestionAnswering.from_pretrained("xlm-mlm-en-2048")
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(
... 0
... ) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs.loss
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
outputs = self.qa_outputs(
output,
start_positions=start_positions,
end_positions=end_positions,
cls_index=cls_index,
is_impossible=is_impossible,
p_mask=p_mask,
return_dict=return_dict,
)
if not return_dict:
return outputs + transformer_outputs[1:]
return XLMForQuestionAnsweringOutput(
loss=outputs.loss,
start_top_log_probs=outputs.start_top_log_probs,
start_top_index=outputs.start_top_index,
end_top_log_probs=outputs.end_top_log_probs,
end_top_index=outputs.end_top_index,
cls_logits=outputs.cls_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
XLM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
XLM_START_DOCSTRING,
)
class XLMForTokenClassification(XLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLMModel(config)
self.dropout = nn.Dropout(config.dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TokenClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
XLM Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
XLM_START_DOCSTRING,
)
class XLMForMultipleChoice(XLMPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = XLMModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.num_labels, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(XLM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
langs: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
cache: Optional[Dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, MultipleChoiceModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
langs = langs.view(-1, langs.size(-1)) if langs is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
if lengths is not None:
logger.warning(
"The `lengths` parameter cannot be used with the XLM multiple choice models. Please use the "
"attention mask instead."
)
lengths = None
transformer_outputs = self.transformer(
input_ids=input_ids,
attention_mask=attention_mask,
langs=langs,
token_type_ids=token_type_ids,
position_ids=position_ids,
lengths=lengths,
cache=cache,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
output = transformer_outputs[0]
logits = self.sequence_summary(output)
logits = self.logits_proj(logits)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.AdaptiveLogSoftmaxWithLoss",
"torch.nn.ModuleList",
"torch.finfo",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"torch.nn.LayerNorm",
"torch.nn.init.constant_",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.functional.dropout",
"torch.full_like",
"torch.full",
"torch.matmul",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Embedding"
] | 1.0 | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 |
1.7 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .block import Mish, SeparableConv2d, Block
class WideTipXception(nn.Module):
def __init__(self, num_class):
super(WideTipXception, self).__init__()
self.conv1 = nn.Conv2d(1, 192, 3, 2, 1, bias=True)
self.bn1 = nn.BatchNorm2d(192)
self.mish = Mish()
self.conv2 = nn.Conv2d(192, 512, 3, 1, 1, bias=True)
self.bn2 = nn.BatchNorm2d(512)
self.block1 = Block(512,1024,3,1)
self.block2 = Block(1024,1024,3,1)
self.block3 = Block(1024,1024,3,1)
self.block4 = Block(1024,1024,3,1)
self.block5 = Block(1024,1024,3,1)
self.block6 = Block(1024,2048,2,2)
self.block7 = Block(2048,3072,2,2)
self.conv3 = SeparableConv2d(3072,4096,3,stride=1,padding=0,bias=True)
self.fc = nn.Linear(4096, num_class)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.mish(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.mish(x)
x = self.conv3(x)
x = self.mish(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
result = self.fc(x)
return result
def get_classifiernet(num_class):
model = WideTipXception(num_class)
return model
| [
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.nn.functional.adaptive_avg_pool2d"
] | 1.7.1 | Luciano233/OCR_Japanease | 055bdd0cc8e4d053dfb471cd642b1616ba0938d1 |
1.1 | import torch
import torch.nn as nn
import spconv
from functools import partial
from .spconv_backbone import post_act_block
from ...utils import common_utils
class SparseBasicBlock(spconv.SparseModule):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, indice_key=None, norm_fn=None):
super(SparseBasicBlock, self).__init__()
self.conv1 = spconv.SubMConv3d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False, indice_key=indice_key
)
self.bn1 = norm_fn(planes)
self.relu = nn.ReLU()
self.conv2 = spconv.SubMConv3d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False, indice_key=indice_key
)
self.bn2 = norm_fn(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x.features
assert x.features.dim() == 2, 'x.features.dim()=%d' % x.features.dim()
out = self.conv1(x)
out.features = self.bn1(out.features)
out.features = self.relu(out.features)
out = self.conv2(out)
out.features = self.bn2(out.features)
if self.downsample is not None:
identity = self.downsample(x)
out.features += identity
out.features = self.relu(out.features)
return out
class UNetV2(nn.Module):
"""
Sparse Convolution based UNet for point-wise feature learning.
Reference Paper: https://arxiv.org/abs/1907.03670 (Shaoshuai Shi, et. al)
From Points to Parts: 3D Object Detection from Point Cloud with Part-aware and Part-aggregation Network
"""
def __init__(self, model_cfg, input_channels, grid_size, voxel_size, point_cloud_range, **kwargs):
super().__init__()
self.model_cfg = model_cfg
self.sparse_shape = grid_size[::-1] + [1, 0, 0]
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
norm_fn = partial(nn.BatchNorm1d, eps=1e-3, momentum=0.01)
self.conv_input = spconv.SparseSequential(
spconv.SubMConv3d(input_channels, 16, 3, padding=1, bias=False, indice_key='subm1'),
norm_fn(16),
nn.ReLU(),
)
block = post_act_block
self.conv1 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1'),
)
self.conv2 = spconv.SparseSequential(
# [1600, 1408, 41] <- [800, 704, 21]
block(16, 32, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv2', conv_type='spconv'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
block(32, 32, 3, norm_fn=norm_fn, padding=1, indice_key='subm2'),
)
self.conv3 = spconv.SparseSequential(
# [800, 704, 21] <- [400, 352, 11]
block(32, 64, 3, norm_fn=norm_fn, stride=2, padding=1, indice_key='spconv3', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3'),
)
self.conv4 = spconv.SparseSequential(
# [400, 352, 11] <- [200, 176, 5]
block(64, 64, 3, norm_fn=norm_fn, stride=2, padding=(0, 1, 1), indice_key='spconv4', conv_type='spconv'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
block(64, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4'),
)
last_pad = 0
last_pad = self.model_cfg.get('last_pad', last_pad)
self.conv_out = spconv.SparseSequential(
# [200, 150, 5] -> [200, 150, 2]
spconv.SparseConv3d(64, 128, (3, 1, 1), stride=(2, 1, 1), padding=last_pad,
bias=False, indice_key='spconv_down2'),
norm_fn(128),
nn.ReLU(),
)
# decoder
# [400, 352, 11] <- [200, 176, 5]
self.conv_up_t4 = SparseBasicBlock(64, 64, indice_key='subm4', norm_fn=norm_fn)
self.conv_up_m4 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm4')
self.inv_conv4 = block(64, 64, 3, norm_fn=norm_fn, indice_key='spconv4', conv_type='inverseconv')
# [800, 704, 21] <- [400, 352, 11]
self.conv_up_t3 = SparseBasicBlock(64, 64, indice_key='subm3', norm_fn=norm_fn)
self.conv_up_m3 = block(128, 64, 3, norm_fn=norm_fn, padding=1, indice_key='subm3')
self.inv_conv3 = block(64, 32, 3, norm_fn=norm_fn, indice_key='spconv3', conv_type='inverseconv')
# [1600, 1408, 41] <- [800, 704, 21]
self.conv_up_t2 = SparseBasicBlock(32, 32, indice_key='subm2', norm_fn=norm_fn)
self.conv_up_m2 = block(64, 32, 3, norm_fn=norm_fn, indice_key='subm2')
self.inv_conv2 = block(32, 16, 3, norm_fn=norm_fn, indice_key='spconv2', conv_type='inverseconv')
# [1600, 1408, 41] <- [1600, 1408, 41]
self.conv_up_t1 = SparseBasicBlock(16, 16, indice_key='subm1', norm_fn=norm_fn)
self.conv_up_m1 = block(32, 16, 3, norm_fn=norm_fn, indice_key='subm1')
self.conv5 = spconv.SparseSequential(
block(16, 16, 3, norm_fn=norm_fn, padding=1, indice_key='subm1')
)
self.num_point_features = 16
def UR_block_forward(self, x_lateral, x_bottom, conv_t, conv_m, conv_inv):
x_trans = conv_t(x_lateral)
x = x_trans
x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
x_m = conv_m(x)
x = self.channel_reduction(x, x_m.features.shape[1])
x.features = x_m.features + x.features
x = conv_inv(x)
return x
@staticmethod
def channel_reduction(x, out_channels):
"""
Args:
x: x.features (N, C1)
out_channels: C2
Returns:
"""
features = x.features
n, in_channels = features.shape
assert (in_channels % out_channels == 0) and (in_channels >= out_channels)
x.features = features.view(n, out_channels, -1).sum(dim=2)
return x
def forward(self, batch_dict):
"""
Args:
batch_dict:
batch_size: int
vfe_features: (num_voxels, C)
voxel_coords: (num_voxels, 4), [batch_idx, z_idx, y_idx, x_idx]
Returns:
batch_dict:
encoded_spconv_tensor: sparse tensor
point_features: (N, C)
"""
voxel_features, voxel_coords = batch_dict['voxel_features'], batch_dict['voxel_coords']
batch_size = batch_dict['batch_size']
input_sp_tensor = spconv.SparseConvTensor(
features=voxel_features,
indices=voxel_coords.int(),
spatial_shape=self.sparse_shape,
batch_size=batch_size
)
x = self.conv_input(input_sp_tensor)
x_conv1 = self.conv1(x)
x_conv2 = self.conv2(x_conv1)
x_conv3 = self.conv3(x_conv2)
x_conv4 = self.conv4(x_conv3)
# for detection head
# [200, 176, 5] -> [200, 176, 2]
out = self.conv_out(x_conv4)
# for segmentation head
# [400, 352, 11] <- [200, 176, 5]
x_up4 = self.UR_block_forward(x_conv4, x_conv4, self.conv_up_t4, self.conv_up_m4, self.inv_conv4)
# [800, 704, 21] <- [400, 352, 11]
x_up3 = self.UR_block_forward(x_conv3, x_up4, self.conv_up_t3, self.conv_up_m3, self.inv_conv3)
# [1600, 1408, 41] <- [800, 704, 21]
x_up2 = self.UR_block_forward(x_conv2, x_up3, self.conv_up_t2, self.conv_up_m2, self.inv_conv2)
# [1600, 1408, 41] <- [1600, 1408, 41]
x_up1 = self.UR_block_forward(x_conv1, x_up2, self.conv_up_t1, self.conv_up_m1, self.conv5)
batch_dict['point_features'] = x_up1.features
point_coords = common_utils.get_voxel_centers(
x_up1.indices[:, 1:], downsample_times=1, voxel_size=self.voxel_size,
point_cloud_range=self.point_cloud_range
)
batch_dict['point_coords'] = torch.cat((x_up1.indices[:, 0:1].float(), point_coords), dim=1)
batch_dict['encoded_spconv_tensor'] = out
batch_dict['encoded_spconv_tensor_stride'] = 8
return batch_dict
| [
"torch.nn.ReLU",
"torch.cat"
] | 1.1 | StarGazer1995/OpenPCDet | 4af33e8badb0c8e68c7c94c71b0ec5667aad2348 |
1.9 | """
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from copy import deepcopy
from functools import partial
import pytest
import torch
from nncf.common.utils.logger import logger as nncf_logger
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.torch.model_creation import create_nncf_network
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import get_empty_config
from tests.torch.nas.creators import build_elastic_model_from_handler
from tests.torch.nas.descriptors import ElasticityDesc
from tests.torch.nas.helpers import do_conv2d
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS
from tests.torch.nas.test_elastic_depth import BasicTestSuperNet
from tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel
from tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS
from tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS
from tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel
from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel
@pytest.yield_fixture()
def _nncf_caplog(caplog):
nncf_logger.propagate = True
yield caplog
nncf_logger.propagate = False
def ref_width_output_fn(model, x):
return model.get_minimal_subnet_output_without_reorg(x)
COMMON_WIDTH_STATE_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoConvAddConvTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',
'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoSequentialConvBNTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
]
def ref_kernel_output_fn(model, x):
conv = model.conv
ref_padding = 1
ref_weights = conv.weight[:, :, 1:4, 1:4]
return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)
COMMON_KERNEL_DESC = ElasticityDesc(
ElasticityDim.KERNEL,
model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_output_fn=ref_kernel_output_fn,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']
},
input_size=[1, 1, 5, 5]
)
COMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=BasicTestSuperNet,
params={
'mode': 'auto',
'min_block_size': 2
},
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 2,
'skipped_blocks': None
},
EDBuilderStateNames.SKIPPED_BLOCKS: [
{
'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',
'end_node_name': 'BasicTestSuperNet/__add___0'
}
],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},
EDBuilderStateNames.OrdinalIds: [[1, 3]],
},
ref_search_space=[[0], []]
)
def ref_depth_output_fn(model, x):
model.set_skipped_layers(['conv1'])
return model(x)
COMMON_DEPTH_BASIC_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=DepthBasicConvTestModel,
params=BASIC_ELASTIC_DEPTH_PARAMS,
ref_output_fn=ref_depth_output_fn,
ref_search_space=[[0], []],
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 6,
'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',
'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]
},
EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],
EDBuilderStateNames.OrdinalIds: None,
}
)
LIST_STATE_AFTER_BUILD_DESCS = [
*COMMON_WIDTH_STATE_DESCS,
COMMON_DEPTH_SUPERNET_DESC,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))
def test_can_get_builder_state_after_build(desc):
_, builder = desc.build_handler()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
ELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}
LIST_STATE_BEFORE_BUILD_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
params=ELASTIC_WIDTH_PARAMS_BB,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,
EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []
}
),
ElasticityDesc(
ElasticityDim.KERNEL,
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []
}
),
COMMON_DEPTH_BASIC_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))
class TestBeforeBuild:
def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):
builder = desc.create_builder()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder_with_config({})
old_state = old_builder.get_state()
new_params = desc.params
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
record = next(iter(_nncf_caplog.records))
assert record.levelno == logging.WARNING
def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder()
old_state = old_builder.get_state()
new_params = desc.params.copy()
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
assert not _nncf_caplog.records
LIST_LOAD_STATE_DESCS = [
COMMON_DEPTH_BASIC_DESC,
*COMMON_WIDTH_STATE_DESCS,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))
def test_can_load_handler_state(desc: ElasticityDesc):
model = desc.model_cls()
move_model_to_cuda_if_available(model)
model_copy = deepcopy(model)
device = next(iter(model.parameters())).device
dummy_input = torch.ones(model.INPUT_SIZE).to(device)
input_size = desc.input_size
if not input_size:
input_size = model.INPUT_SIZE
config = get_empty_config(input_sample_sizes=input_size)
old_nncf_network = create_nncf_network(model, config)
old_builder = desc.create_builder()
old_handler = old_builder.build(old_nncf_network)
elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)
old_handler.activate_minimum_subnet()
old_output = elastic_model(dummy_input)
ref_output = desc.ref_output_fn(model, dummy_input)
assert torch.allclose(old_output, ref_output)
new_nncf_network = create_nncf_network(model_copy, config)
builder_state = old_builder.get_state()
# no need in config to restore builder state
new_builder = desc.create_builder_with_config({})
new_builder.load_state(builder_state)
new_handler = new_builder.build(new_nncf_network)
elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)
new_handler.activate_minimum_subnet()
new_output = elastic_model(dummy_input)
assert torch.allclose(old_output, new_output)
| [
"torch.allclose",
"torch.ones"
] | 1.9.1 | openvinotoolkit/nncf_pytorch | 13a483eac6ed891720ba90d7902142c4b3bfa599 |
1.7 | import torch.nn as nn
class Generator(nn.Module):
def __init__(self, img_size=32):
super(Generator, self).__init__()
# TODO: update to proper image size
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(10, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 1, 3, stride=1, padding=1), #3
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self, img_size=32):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(1, 16, bn=False), #3
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
# TODO: update to proper image size
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Linear(128 * ds_size ** 2, 1)
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
| [
"torch.nn.Linear",
"torch.nn.Tanh",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.Upsample",
"torch.nn.Conv2d",
"torch.nn.Dropout2d"
] | 1.7.1 | nata1y/fltk-testbed-group-3 | e23b59fa2a5e638d3804a39fe5012983e2988ca6 |
0.4 | from typing import Type
import torch
from torch import nn
from torch.jit import ScriptModule
from catalyst.dl.core import Experiment, Runner
class _ForwardOverrideModel(nn.Module):
"""
Model that calls specified method instead of forward
(Workaround, single method tracing is not supported)
"""
def __init__(self, model, method_name):
super().__init__()
self.model = model
self.method = method_name
def forward(self, *args, **kwargs):
return getattr(self.model, self.method)(*args, **kwargs)
class _TracingModelWrapper(nn.Module):
"""
Wrapper that traces model with batch instead of calling it
(Workaround, to use native model batch handler)
"""
def __init__(self, model, method_name):
super().__init__()
self.method_name = method_name
self.model = model
self.tracing_result: ScriptModule
def __call__(self, *args, **kwargs):
method_model = _ForwardOverrideModel(
self.model, self.method_name
)
self.tracing_result = \
torch.jit.trace(
method_model,
*args, **kwargs
)
def _get_native_batch(
experiment: Experiment, stage: str
):
"""Returns dataset from first loader provided by experiment"""
loaders = experiment.get_loaders(stage)
assert loaders, \
"Experiment must have at least one loader to support tracing"
# Take first loader
loader = next(iter(loaders.values()))
dataset = loader.dataset
collate_fn = loader.collate_fn
sample = collate_fn([dataset[0]])
return sample
def trace_model(
model: nn.Module,
experiment: Experiment,
runner_type: Type[Runner],
method_name: str = "forward"
) -> ScriptModule:
"""
Traces model using it's native experiment and runner.
Args:
model: Model to trace
NOTICE: will be switched to eval and
requires_grad=False will be set on all params
experiment: Native experiment that was used to train model
runner_type: Model's native runner that was used to train model
method_name: Model's method name that will be
used as entrypoint during tracing
Returns:
Traced model ScriptModule
"""
stage = list(experiment.stages)[0]
model.eval()
for p in model.parameters():
p.requires_grad_(False)
tracer = _TracingModelWrapper(model, method_name)
runner: Runner = runner_type(tracer.cpu(), torch.device("cpu"))
batch = _get_native_batch(experiment, stage)
batch = runner._batch2device(batch, device=runner.device)
runner.predict_batch(batch)
return tracer.tracing_result
__all__ = ["trace_model"]
| [
"torch.device",
"torch.jit.trace"
] | 0.4.1 | 162/catalyst | b4ba36be52c51160e0fabecdcb084a8d5cd96cb7 |
1.1 | from functools import partial
import torch
import random
import numpy as np
from ...ops.roiaware_pool3d import roiaware_pool3d_utils
from ...utils import common_utils, box_utils
from . import augmentor_utils, database_sampler
class DataAugmentor(object):
def __init__(self, root_path, augmentor_configs, class_names, logger=None):
self.root_path = root_path
self.class_names = class_names
self.logger = logger
self.data_augmentor_queue = []
aug_config_list = augmentor_configs if isinstance(augmentor_configs, list) \
else augmentor_configs.AUG_CONFIG_LIST
for cur_cfg in aug_config_list:
if not isinstance(augmentor_configs, list):
if cur_cfg.NAME in augmentor_configs.DISABLE_AUG_LIST:
continue
cur_augmentor = getattr(self, cur_cfg.NAME)(config=cur_cfg)
self.data_augmentor_queue.append(cur_augmentor)
def gt_sampling(self, config=None):
db_sampler = database_sampler.DataBaseSampler(
root_path=self.root_path,
sampler_cfg=config,
class_names=self.class_names,
logger=self.logger
)
return db_sampler
def __getstate__(self):
d = dict(self.__dict__)
del d['logger']
return d
def __setstate__(self, d):
self.__dict__.update(d)
def object_size_normalization(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.object_size_normalization, config=config)
gt_boxes, points = data_dict['gt_boxes'], data_dict['points']
if gt_boxes.shape[1] > 7:
gt_boxes = gt_boxes[:,:7]
offset = np.array(config['OFFSET'])
# get masks of points inside boxes
point_masks = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes)).numpy()
num_obj = gt_boxes.shape[0]
obj_points_list = []
gt_boxes_size = gt_boxes[:, 3:6]
new_gt_boxes_size = gt_boxes_size + offset
scale_factor = new_gt_boxes_size / gt_boxes_size
# scale the objects
for i in range(num_obj):
point_mask = point_masks[i]
obj_points = points[point_mask > 0] # get object points within the gt box
obj_points[:, :3] -= gt_boxes[i, :3] # relative to box center
obj_points[:, :3] *= scale_factor[i] # scale
obj_points[:, :3] += gt_boxes[i, :3] # back to global coordinate
obj_points_list.append(obj_points)
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# scale the boxes
gt_boxes[:, 3:6] *= scale_factor
# remove points inside boxes
points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
# merge points
# points = box_utils.remove_points_in_boxes3d(points, gt_boxes)
obj_points = np.concatenate(obj_points_list, axis=0)
points = np.concatenate([points, obj_points], axis=0)
data_dict['points'] = points
data_dict['gt_boxes'][:,:7] = gt_boxes
return data_dict
def random_world_flip(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_flip, config=config)
gt_boxes = data_dict['gt_boxes'] if 'gt_boxes' in data_dict else None
points = data_dict['points']
for cur_axis in config['ALONG_AXIS_LIST']:
assert cur_axis in ['x', 'y']
if 'gt_boxes' in data_dict:
gt_boxes, points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s' % cur_axis)(
gt_boxes, points, return_enable=True
)
else:
points, world_flip_enabled = getattr(augmentor_utils, 'random_flip_along_%s_points' % cur_axis)(
points, return_enable=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_flip_enabled'] = world_flip_enabled
return data_dict
def random_world_rotation(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_rotation, config=config)
rot_range = config['WORLD_ROT_ANGLE']
if not isinstance(rot_range, list):
rot_range = [-rot_range, rot_range]
if 'gt_boxes' in data_dict:
gt_boxes, points, world_rotation = augmentor_utils.global_rotation(
data_dict['gt_boxes'], data_dict['points'], rot_range=rot_range, return_rotation=True
)
else:
points, world_rotation = augmentor_utils.global_rotation_points(
data_dict['points'], rot_range=rot_range, return_rotation=True
)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
data_dict['world_rotation'] = world_rotation
return data_dict
def random_world_scaling(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling, config=config)
if 'gt_boxes' in data_dict:
gt_boxes, points, scale_ratio = augmentor_utils.global_scaling(
data_dict['gt_boxes'], data_dict['points'], config['WORLD_SCALE_RANGE']
)
else:
points, scale_ratio = augmentor_utils.global_scaling_points(data_dict['points'], config['WORLD_SCALE_RANGE'])
data_dict['world_scaling'] = scale_ratio
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'] = gt_boxes
data_dict['points'] = points
return data_dict
def random_world_scaling_xyz(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_scaling_xyz, config=config)
gt_boxes = data_dict['gt_boxes']
points = data_dict['points']
scale_range = config['SCALE_RANGE']
noise_scale = np.random.uniform(scale_range[0], scale_range[1], 3)
points[:, :3] *= noise_scale
gt_boxes[:, :3] *= noise_scale
gt_boxes[:, 3:6] *= noise_scale
data_dict['points'] = points
data_dict['gt_boxes'] = gt_boxes
data_dict['world_scaling_xyz'] = noise_scale
return data_dict
def jitter_point_cloud(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.jitter_point_cloud, config=config)
sigma = config['SIGMA']
clip = config['CLIP']
assert(clip > 0)
points = data_dict['points']
jittered_data = np.clip(sigma * np.random.randn(points.shape[0], points.shape[1]), -1*clip, clip)
points += jittered_data
data_dict['points'] = points
data_dict['jittered'] = True
data_dict['jitter_values'] = jittered_data
return data_dict
def random_world_shift(self, data_dict=None, config=None):
if data_dict is None:
return partial(self.random_world_shift, config=config)
shift_range = config['RANGE']
shifts = np.random.uniform(-shift_range, shift_range, 3)
data_dict['points'] += shifts
data_dict['world_shifts'] = shifts
return data_dict
def forward(self, data_dict, augment=True):
"""
Args:
data_dict:
points: (N, 3 + C_in)
gt_boxes: optional, (N, 7) [x, y, z, dx, dy, dz, heading]
gt_names: optional, (N), string
...
Returns:
"""
if augment:
for cur_augmentor in self.data_augmentor_queue:
data_dict = cur_augmentor(data_dict=data_dict)
if 'gt_boxes' in data_dict:
data_dict['gt_boxes'][:, 6] = common_utils.limit_period(
data_dict['gt_boxes'][:, 6], offset=0.5, period=2 * np.pi
)
if 'road_plane' in data_dict:
data_dict.pop('road_plane')
if 'gt_boxes' in data_dict and 'gt_boxes_mask' in data_dict:
gt_boxes_mask = data_dict['gt_boxes_mask']
data_dict['gt_boxes'] = data_dict['gt_boxes'][gt_boxes_mask]
data_dict['gt_names'] = data_dict['gt_names'][gt_boxes_mask]
data_dict.pop('gt_boxes_mask')
return data_dict
| [
"torch.from_numpy"
] | 1.1 | Jasonkks/mlcnet | 8f89c860c709733c8baa663607004fc48d76291d |
1.4 | from typing import Any, Dict, List, Optional, Tuple, Type, Union
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common import logger
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import DQNPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1000000,
learning_starts: int = 50000,
batch_size: Optional[int] = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: int = 4,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super(DQN, self).__init__(
policy,
env,
DQNPolicy,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
n_episodes_rollout,
action_noise=None, # No action noise
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super(DQN, self)._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps, self.exploration_final_eps, self.exploration_fraction
)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
if self.num_timesteps % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
logger.record("rollout/exploration rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for gradient_step in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the target Q values
target_q = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
target_q, _ = target_q.max(dim=1)
# Avoid potential broadcast issue
target_q = target_q.reshape(-1, 1)
# 1-step TD target
target_q = replay_data.rewards + (1 - replay_data.dones) * self.gamma * target_q
# Get current Q estimates
current_q = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q = th.gather(current_q, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q, target_q)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(observation, self.observation_space):
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, mask, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super(DQN, self).learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super(DQN, self)._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| [
"torch.no_grad",
"torch.nn.functional.smooth_l1_loss"
] | 1.4.0 | haorang/285 | 3b7369b8eb4433952c9cdf27d4feaa015a6c40e4 |
1.0 | # coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import unittest
from transformers import BloomConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomTokenizerFast,
)
@require_torch
class BloomModelTester:
def __init__(
self,
parent,
batch_size=14,
seq_length=7,
is_training=True,
use_token_type_ids=False,
use_input_mask=True,
use_labels=True,
use_mc_token_ids=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_token_type_ids = use_token_type_ids
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.use_mc_token_ids = use_mc_token_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = None
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
def get_large_model_config(self):
return BloomConfig.from_pretrained("bigscience/bloom")
def prepare_config_and_inputs(self, gradient_checkpointing=False):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
sequence_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config(gradient_checkpointing=gradient_checkpointing)
return (config, input_ids, input_mask, sequence_labels)
def get_config(self, gradient_checkpointing=False, slow_but_exact=True):
return BloomConfig(
vocab_size=self.vocab_size,
seq_length=self.seq_length,
hidden_size=self.hidden_size,
n_layer=self.num_hidden_layers,
n_head=self.num_attention_heads,
resid_pdrop=self.hidden_dropout_prob,
attn_pdrop=self.attention_probs_dropout_prob,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
num_labels=self.num_labels,
gradient_checkpointing=gradient_checkpointing,
slow_but_exact=slow_but_exact,
dtype="float32",
)
def create_and_check_bloom_model(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values), config.n_layer)
def create_and_check_bloom_model_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=torch.ones_like(input_ids), use_cache=True)
outputs_use_cache_conf = model(input_ids, attention_mask=torch.ones_like(input_ids))
outputs_no_past = model(input_ids, use_cache=False, attention_mask=torch.ones_like(input_ids))
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_attention_mask_past(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = self.seq_length // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past = model(input_ids, attention_mask=attn_mask).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past, attention_mask=attn_mask)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_bloom_model_past_large_inputs(self, config, input_ids, input_mask, *args):
model = BloomModel(config=config)
model.to(torch_device)
model.eval()
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
output, past = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and token_type_ids
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past)[
"last_hidden_state"
]
self.parent.assertTrue(output_from_past.shape[1] == next_tokens.shape[1])
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_lm_head_model(self, config, input_ids, input_mask, *args):
model = BloomForCausalLM(config)
model.to(torch_device)
model.eval()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_sequence_classification_model(self, config, input_ids, input_mask, *args):
config.num_labels = self.num_labels
model = BloomForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_token_classification_model(self, config, input_ids, input_mask, *args):
model = BloomForTokenClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False
):
model = BloomForCausalLM(config)
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
result = model(input_ids, labels=input_ids)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def create_and_check_bloom_weight_initialization(self, config, *args):
model = BloomModel(config)
model_std = model.config.initializer_range / math.sqrt(2 * model.config.n_layer)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std), 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0), 0.01)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask, sequence_labels = config_and_inputs
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
@require_torch
class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (
(
BloomModel,
BloomForCausalLM,
BloomForSequenceClassification,
BloomForTokenClassification,
)
if is_torch_available()
else ()
)
all_generative_model_classes = (BloomForCausalLM,) if is_torch_available() else ()
fx_compatible = False
test_missing_keys = False
test_pruning = False
test_torchscript = True # torch.autograd functions seems to be not supported
def setUp(self):
self.model_tester = BloomModelTester(self)
self.config_tester = ConfigTester(self, config_class=BloomConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_bloom_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model(*config_and_inputs)
def test_bloom_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past(*config_and_inputs)
def test_bloom_model_att_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_attention_mask_past(*config_and_inputs)
def test_bloom_model_past_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_model_past_large_inputs(*config_and_inputs)
def test_bloom_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*config_and_inputs)
def test_bloom_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_sequence_classification_model(*config_and_inputs)
def test_bloom_token_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_token_classification_model(*config_and_inputs)
def test_bloom_gradient_checkpointing(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*config_and_inputs, gradient_checkpointing=True)
def test_bloom_weight_initialization(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_bloom_weight_initialization(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = BloomModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@slow
@require_torch_gpu
def test_simple_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m)
input_sentence = "I enjoy walking with my cute dog"
EXPECTED_OUTPUT = (
"I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am"
" a very good listener. I am a very good person, and I am a very good person. I am a"
)
input_ids = tokenizer.encode(input_sentence, return_tensors="pt")
greedy_output = model.generate(input_ids.cuda(), max_length=50)
self.assertEqual(tokenizer.decode(greedy_output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@slow
@require_torch_gpu
def test_batch_generation(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"]
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
self.assertEqual(
tokenizer.decode(greedy_output[0], skip_special_tokens=True),
tokenizer.decode(greedy_output[1], skip_special_tokens=True),
)
@slow
@require_torch_gpu
def test_batch_generation_padd(self):
path_350m = "bigscience/bloom-350m"
model = BloomForCausalLM.from_pretrained(path_350m, torch_dtype="auto", use_cache=True).cuda()
model = model.eval()
tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left")
input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"]
input_sentence_without_pad = "Hello my name is"
input_ids = tokenizer.batch_encode_plus(input_sentence, return_tensors="pt", padding=True)
input_ids_without_pad = tokenizer.encode(input_sentence_without_pad, return_tensors="pt")
greedy_output = model.generate(
input_ids["input_ids"].cuda(), attention_mask=input_ids["attention_mask"], max_length=50, do_sample=False
)
greedy_output_without_pad = model.generate(input_ids_without_pad.cuda(), max_length=50, do_sample=False)
# test token values
self.assertEqual(greedy_output[-1, 3:].tolist(), greedy_output_without_pad[0, :-3].tolist())
# test reconstructions
self.assertEqual(
tokenizer.decode(greedy_output[-1, 3:], skip_special_tokens=True),
tokenizer.decode(greedy_output_without_pad[0, :-3], skip_special_tokens=True),
)
@require_torch
class BloomEmbeddingTest(unittest.TestCase):
"""
The goal here is to compare the embeddings generated by the model trained
using Megatron-LM with the one from the transformers library, with a small GPT2-like model
to ensure that the conversion from Megatron-LM to transformers has been done successfully.
The script compares the logits of the embedding layer and the transformer layers.
WARNING: It is expected that these logits will not have exactly the same statistics when running
the code on CPU or GPU. For more info, please visit:
- https://github.com/pytorch/pytorch/issues/76052#issuecomment-1103193548
- https://discuss.pytorch.org/t/reproducibility-issue-between-intel-and-amd-cpus/144779/9
You need to install tokenizers following this readme:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
Tokenizer used during training:
- https://huggingface.co/bigscience-catalogue-data-dev/byte-level-bpe-tokenizer-no-norm-250k-whitespace-and-eos-regex-alpha-v3-dedup-lines-articles
# TODO change the script (or just add skip) when building the env with tokenizers 0.12.0
"""
def setUp(self):
super().setUp()
self.path_bigscience_model = "bigscience/bigscience-small-testing"
@require_torch
def test_embeddings(self):
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") # load in fp32
model.eval()
EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = {
3478: 0.0002307891845703125,
368: -0.000568389892578125,
109586: -0.0003910064697265625,
35433: -0.000194549560546875,
2: 0.0004138946533203125,
77: 0.000659942626953125,
132619: -0.00031280517578125,
2175: 0.000457763671875,
23714: 0.000263214111328125,
73173: -0.000286102294921875,
144252: 0.00052642822265625,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM = {"value": 0.08203125}
EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN = {
132619: -0.00031256675720214844,
3478: 0.00023090839385986328,
368: -0.0005702972412109375,
109586: -0.00039124488830566406,
35433: -0.000194549560546875,
2: 0.0004146099090576172,
2175: 0.0004572868347167969,
23714: 0.00026416778564453125,
73173: -0.0002865791320800781,
144252: 0.0005254745483398438,
77: 0.0006618499755859375,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_16_SUM = {"value": 0.0821533203125}
EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN = {
132619: -0.00031267106533050537,
3478: 0.00023087859153747559,
368: -0.0005701072514057159,
109586: -0.0003911703824996948,
35433: -0.0001944899559020996,
2: 0.0004146844148635864,
2175: 0.00045740045607089996,
23714: 0.0002641640603542328,
73173: -0.0002864748239517212,
144252: 0.0005256589502096176,
77: 0.0006617321632802486,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MIN = {
3478: -0.00921630859375,
368: -0.010009765625,
109586: -0.01031494140625,
35433: -0.01177978515625,
2: -0.0074462890625,
77: -0.00848388671875,
132619: -0.009521484375,
2175: -0.0074462890625,
23714: -0.0145263671875,
73173: -0.007415771484375,
144252: -0.01007080078125,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_MAX = {
3478: 0.0128173828125,
368: 0.01214599609375,
109586: 0.0111083984375,
35433: 0.01019287109375,
2: 0.0157470703125,
77: 0.0174560546875,
132619: 0.0078125,
2175: 0.0113525390625,
23714: 0.0146484375,
73173: 0.01116943359375,
144252: 0.01141357421875,
}
EMBEDDINGS_DS_BEFORE_LN_F_32_SUM = {"value": 0.08217757940292358}
TEST_EMBEDDINGS = {
"torch.bfloat16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_BF_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_BF_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_BF_16_SUM,
},
"torch.float32": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_32_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_32_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_32_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_32_SUM,
},
"torch.float16": {
"mean": EMBEDDINGS_DS_BEFORE_LN_F_16_MEAN,
"max": EMBEDDINGS_DS_BEFORE_LN_F_16_MAX,
"min": EMBEDDINGS_DS_BEFORE_LN_F_16_MIN,
"sum": EMBEDDINGS_DS_BEFORE_LN_F_16_SUM,
},
}
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
EMBEDDINGS_DS_AFTER_LN_MEAN = {
3478: -6.580352783203125e-05,
368: 0.0001316070556640625,
109586: -0.00030517578125,
35433: 4.00543212890625e-05,
2: -7.2479248046875e-05,
77: -8.96453857421875e-05,
132619: 0.0001583099365234375,
2175: 2.1219253540039062e-05,
23714: -0.000247955322265625,
73173: -0.00021839141845703125,
144252: -0.0001430511474609375,
}
EMBEDDINGS_DS_AFTER_LN_MIN = {
3478: -1.6953125,
368: -1.6875,
109586: -1.6875,
35433: -2.125,
2: -1.390625,
77: -1.5390625,
132619: -1.875,
2175: -1.4609375,
23714: -2.296875,
73173: -1.3515625,
144252: -1.78125,
}
EMBEDDINGS_DS_AFTER_LN_MAX = {
3478: 2.265625,
368: 2.28125,
109586: 1.953125,
35433: 1.90625,
2: 2.703125,
77: 2.828125,
132619: 1.65625,
2175: 2.015625,
23714: 2.234375,
73173: 2.171875,
144252: 1.828125,
}
EMBEDDINGS_DS_AFTER_LN = {
"mean": EMBEDDINGS_DS_AFTER_LN_MEAN,
"min": EMBEDDINGS_DS_AFTER_LN_MIN,
"max": EMBEDDINGS_DS_AFTER_LN_MAX,
}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
embeddings = model.transformer.word_embeddings(tensor_ids)
embeddings_ln = model.transformer.word_embeddings_layernorm(embeddings) #
# first check the embeddings before LN
output_dict = {"min": {}, "max": {}, "mean": {}, "sum": {"value": embeddings.sum().item()}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict["min"][idx] = embeddings.min(dim=-1).values[0][i].item()
output_dict["max"][idx] = embeddings.max(dim=-1).values[0][i].item()
output_dict["mean"][idx] = embeddings.mean(dim=-1)[0][i].item()
for key in TEST_EMBEDDINGS[str(model.dtype)].keys():
self.assertDictEqual(TEST_EMBEDDINGS[str(model.dtype)][key], output_dict[key])
output_dict_norm = {"min": {}, "max": {}, "mean": {}}
for i, idx in enumerate(EXAMPLE_IDS):
output_dict_norm["min"][idx] = embeddings_ln.min(dim=-1).values[0][i].item()
output_dict_norm["max"][idx] = embeddings_ln.max(dim=-1).values[0][i].item()
output_dict_norm["mean"][idx] = embeddings_ln.mean(dim=-1)[0][i].item()
# This test does not pass when places = 2
for i, key in enumerate(output_dict_norm.keys()):
for j, idx in enumerate(output_dict[key].keys()):
self.assertAlmostEqual(EMBEDDINGS_DS_AFTER_LN[key][idx], output_dict_norm[key][idx], places=1)
@require_torch
def test_hidden_states_transformers(self):
cuda_available = torch.cuda.is_available()
model = BloomModel.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
)
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_VALUE_LAST_LM = -4.3392181396484375e-05
MIN_MAX_DICT = {"min": -2.0625, "max": 2.75}
tensor_ids = torch.LongTensor([EXAMPLE_IDS])
with torch.no_grad():
logits = model(tensor_ids.to(torch_device))
output_dict = {
"min": logits.last_hidden_state.min(dim=-1).values[0][0].item(),
"max": logits.last_hidden_state.max(dim=-1).values[0][0].item(),
}
if cuda_available:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=4)
else:
self.assertAlmostEqual(MEAN_VALUE_LAST_LM, logits.last_hidden_state.mean().item(), places=3)
self.assertDictEqual(MIN_MAX_DICT, output_dict)
@require_torch
def test_logits(self):
cuda_available = torch.cuda.is_available()
model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, use_cache=False, torch_dtype="auto").to(
torch_device
) # load in bf16
model.eval()
# fmt: off
EXAMPLE_IDS = [3478, 368, 109586, 35433, 2, 77, 132619, 3478, 368, 109586, 35433, 2, 2175, 23714, 73173, 144252, 2, 77, 132619, 3478]
# fmt: on
MEAN_LOGITS_GPU_1 = -1.823902130126953e-05
MEAN_LOGITS_GPU_2 = 1.9431114196777344e-05
tensor_ids = torch.LongTensor([EXAMPLE_IDS]).to(torch_device)
with torch.no_grad():
output = model(tensor_ids).logits
output_gpu_1, output_gpu_2 = output.split(125440, dim=-1)
if cuda_available:
self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1)
self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2)
else:
self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!!
self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6)
| [
"torch.cat",
"torch.no_grad",
"torch.ones",
"torch.cuda.is_available",
"torch.LongTensor",
"torch.ones_like",
"torch.allclose"
] | 1.0 | JingyaHuang/transformers | 6589e510fa4e6c442059de2fab84752535de9b23 |
1.0 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PyTorch Transformer XL model. Adapted from https://github.com/kimiyoung/transformer-xl. In particular
https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/mem_transformer.py
"""
import warnings
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
)
from .configuration_transfo_xl import TransfoXLConfig
from .modeling_transfo_xl_utilities import ProjectedAdaptiveLogSoftmax
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "transfo-xl-wt103"
_CONFIG_FOR_DOC = "TransfoXLConfig"
_TOKENIZER_FOR_DOC = "TransfoXLTokenizer"
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"transfo-xl-wt103",
# See all Transformer XL models at https://huggingface.co/models?filter=transfo-xl
]
def build_tf_to_pytorch_map(model, config):
"""
A map of modules from TF to PyTorch. This time I use a map to keep the PyTorch model as identical to the original
PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update(
{
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
}
)
for i, (out_l, proj_l, tie_proj) in enumerate(
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
):
layer_str = f"transformer/adaptive_softmax/cutoff_{i}/"
if config.tie_word_embeddings:
tf_to_pt_map.update({layer_str + "b": out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
if not tie_proj:
tf_to_pt_map.update({layer_str + "proj": proj_l})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = f"transformer/adaptive_embed/cutoff_{i}/"
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = f"transformer/layer_{i}/"
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
return tf_to_pt_map
def load_tf_weights_in_transfo_xl(model, config, tf_path):
"""Load tf checkpoints in a pytorch model"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name or "proj" in name:
array = np.transpose(array)
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info(f"Initialize PyTorch weight {name} for layer {i}")
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}")
return model
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super().__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False, layer_norm_epsilon=1e-5):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class RelPartialLearnableMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
pre_lnorm=False,
r_r_bias=None,
r_w_bias=None,
layer_norm_epsilon=1e-5,
):
super().__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
self.scale = 1 / (d_head**0.5)
self.pre_lnorm = pre_lnorm
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
else:
self.r_r_bias = r_r_bias
self.r_w_bias = r_w_bias
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def _rel_shift(self, x):
zero_pad_shape = (x.size(0), 1) + x.size()[2:]
zero_pad = torch.zeros(zero_pad_shape, device=x.device, dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded_shape = (x.size(1) + 1, x.size(0)) + x.size()[2:]
x_padded = x_padded.view(*x_padded_shape)
x = x_padded[1:].view_as(x)
return x
def forward(self, w, r, attn_mask=None, mems=None, head_mask=None, output_attentions=False):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head
# compute attention score
rw_head_q = w_head_q + self.r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum("ibnd,jbnd->ijbn", (rw_head_q, w_head_k)) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + self.r_r_bias
BD = torch.einsum("ibnd,jnd->ijbn", (rr_head_q, r_head_k)) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
# compute attention probability
if attn_mask is not None and torch.sum(attn_mask).item():
attn_mask = attn_mask == 1 # Switch to bool
if attn_mask.dim() == 2:
if next(self.parameters()).dtype == torch.float16:
attn_score = (
attn_score.float().masked_fill(attn_mask[None, :, :, None], -65000).type_as(attn_score)
)
else:
attn_score = attn_score.float().masked_fill(attn_mask[None, :, :, None], -1e30).type_as(attn_score)
elif attn_mask.dim() == 3:
if next(self.parameters()).dtype == torch.float16:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -65000).type_as(attn_score)
else:
attn_score = attn_score.float().masked_fill(attn_mask[:, :, :, None], -1e30).type_as(attn_score)
# [qlen x klen x bsz x n_head]
attn_prob = nn.functional.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * head_mask
# compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
outputs = [w + attn_out]
else:
# residual connection + layer normalization
outputs = [self.layer_norm(w + attn_out)]
if output_attentions:
outputs.append(attn_prob)
return outputs
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, layer_norm_epsilon=1e-5, **kwargs):
super().__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, layer_norm_epsilon=layer_norm_epsilon, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm"), layer_norm_epsilon=layer_norm_epsilon
)
def forward(self, dec_inp, r, dec_attn_mask=None, mems=None, head_mask=None, output_attentions=False):
attn_outputs = self.dec_attn(
dec_inp,
r,
attn_mask=dec_attn_mask,
mems=mems,
head_mask=head_mask,
output_attentions=output_attentions,
)
ff_output = self.pos_ff(attn_outputs[0])
outputs = [ff_output] + attn_outputs[1:]
return outputs
class AdaptiveEmbedding(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj**0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = nn.functional.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = nn.functional.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
class TransfoXLPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = TransfoXLConfig
load_tf_weights = load_tf_weights_in_transfo_xl
base_model_prefix = "transformer"
def _init_weight(self, weight):
if self.config.init == "uniform":
nn.init.uniform_(weight, -self.config.init_range, self.config.init_range)
elif self.config.init == "normal":
nn.init.normal_(weight, 0.0, self.config.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
"""Initialize the weights."""
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
self._init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
self._init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
self._init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, self.config.proj_init_std)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, self.config.init_std)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
else:
if hasattr(m, "r_emb"):
self._init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
self._init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
self._init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
self._init_bias(m.r_bias)
def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: Optional[int] = -1):
"""
Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size. Take care of tying
weights embeddings afterwards if the model class has a *tie_weights()* method.
Arguments:
new_num_tokens: (*optional*) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
just returns a pointer to the input tokens `torch.nn.Embeddings` Module of the model.
layer: (*optional*) int:
Layer of the *AdaptiveEmbedding* where the resizing should be done. Per default the last layer will be
resized. Be aware that when resizing other than the last layer, you have to ensure that the new
token(s) in the tokenizer are at the corresponding position.
Return: `torch.nn.Embeddings` Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
if new_num_tokens is None:
return self.get_input_embeddings()
new_num_tokens_layer, layer = self._get_new_num_tokens_layer(new_num_tokens, layer)
assert new_num_tokens_layer > 0, "The size of the new embedding layer cannot be 0 or less"
model_embeds = base_model._resize_token_embeddings(new_num_tokens_layer, layer)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
base_model.n_token = new_num_tokens
new_embedding_shapes = self._get_embedding_shapes()
self._resize_cutoffs(new_num_tokens, new_num_tokens_layer, new_embedding_shapes, layer)
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _get_new_num_tokens_layer(self, new_num_tokens, layer):
embeddings = self.get_input_embeddings()
if layer == -1:
layer = len(embeddings.emb_layers) - 1
assert 0 <= layer <= len(embeddings.emb_layers) - 1
new_num_tokens_layer = (
new_num_tokens
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[:layer]])
- sum([emb.weight.shape[0] for emb in embeddings.emb_layers[layer + 1 :]])
)
return new_num_tokens_layer, layer
def _get_embedding_shapes(self):
embeddings = self.get_input_embeddings()
return [emb.weight.shape[0] for emb in embeddings.emb_layers]
def _resize_token_embeddings(self, new_num_tokens, layer=-1):
embeddings = self.get_input_embeddings()
if new_num_tokens is None:
return embeddings
new_embeddings_layer = self._get_resized_embeddings(embeddings.emb_layers[layer], new_num_tokens)
embeddings.emb_layers[layer] = new_embeddings_layer
self.set_input_embeddings(embeddings)
return self.get_input_embeddings()
def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
embeddings = self.get_input_embeddings()
for i in range(layer, len(embeddings.cutoffs)):
embeddings.cutoffs[i] = sum(new_embedding_shapes[: i + 1])
embeddings.cutoff_ends = [0] + embeddings.cutoffs
embeddings.n_token = new_num_tokens
self.config.cutoffs = embeddings.cutoffs[:-1]
return embeddings.cutoffs
@dataclass
class TransfoXLModelOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
input) to speed up sequential decoding. The token ids which have their past given to this model should not
be passed as input ids as they have already been computed.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
mems: List[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class TransfoXLSequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
input) to speed up sequential decoding. The token ids which have their past given to this model should not
be passed as input ids as they have already been computed.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
mems: List[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class TransfoXLLMHeadModelOutput(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
losses (`torch.FloatTensor` of shape *(batch_size, sequence_length-1)*, *optional*, returned when `labels` is provided):
Language modeling losses (not reduced).
prediction_scores (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token after SoftMax).
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks). Can be used (see `mems`
input) to speed up sequential decoding. The token ids which have their past given to this model should not
be passed as input ids as they have already been computed.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
loss (`torch.FloatTensor` of shape `()`, *optional*, returned when `labels` is provided)
Reduced language modeling loss.
"""
losses: Optional[torch.FloatTensor] = None
prediction_scores: torch.FloatTensor = None
mems: List[torch.FloatTensor] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
loss: Optional[torch.FloatTensor] = None
@property
def logits(self):
# prediction scores are the output of the adaptive softmax, see
# the file `modeling_transfo_xl_utilities`. Since the adaptive
# softmax returns the log softmax value, `self.prediction_scores`
# are strictly speaking not exactly `logits`, but behave the same
# way logits do.
return self.prediction_scores
TRANSFO_XL_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`TransfoXLConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
TRANSFO_XL_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`TransfoXLTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
mems (`List[torch.FloatTensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
`mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as `input_ids` as they have already been computed.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.n_token = config.vocab_size
self.d_embed = config.d_embed
self.d_model = config.d_model
self.n_head = config.n_head
self.d_head = config.d_head
self.word_emb = AdaptiveEmbedding(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.mem_len = config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
dropatt=config.dropatt,
pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
layer_norm_epsilon=config.layer_norm_epsilon,
)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
self.clamp_len = config.clamp_len
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.word_emb
def set_input_embeddings(self, new_embeddings):
self.word_emb = new_embeddings
def backward_compatible(self):
self.sample_softmax = -1
def reset_memory_length(self, mem_len):
self.mem_len = mem_len
def _prune_heads(self, heads):
logger.info("Head pruning is not implemented for Transformer-XL model")
pass
def init_mems(self, bsz):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer):
empty = torch.zeros(self.mem_len, bsz, self.config.d_model, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, mlen, qlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TransfoXLModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
mems: Optional[List[torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TransfoXLModelOutput]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library
# so we transpose here from shape [bsz, len] to shape [len, bsz]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.size()
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if mems is None:
mems = self.init_mems(bsz)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to float if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_emb = inputs_embeds
else:
word_emb = self.word_emb(input_ids)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones((qlen, klen), dtype=torch.uint8)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len))[:, :, None] # -1
else:
dec_attn_mask = torch.triu(word_emb.new_ones((qlen, klen), dtype=torch.uint8), diagonal=1 + mlen)[
:, :, None
]
hids = []
attentions = [] if output_attentions else None
if self.attn_type == 0: # default
pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
for i, layer in enumerate(self.layers):
hids.append(core_out)
mems_i = None if mems is None else mems[i]
layer_outputs = layer(
core_out,
pos_emb,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
core_out = layer_outputs[0]
if output_attentions:
attentions.append(layer_outputs[1])
else: # learnable embeddings and absolute embeddings
raise NotImplementedError # Removed these to avoid maintaining dead code - They are not used in our pretrained checkpoint
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
if output_hidden_states:
# Add last layer and transpose to library standard shape [bsz, len, hidden_dim]
hids.append(core_out)
hids = tuple(t.transpose(0, 1).contiguous() for t in hids)
else:
hids = None
if output_attentions:
# Transpose to library standard shape [bsz, n_heads, query_seq_len, key_seq_len]
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
# We transpose back here to shape [bsz, len, hidden_dim]
core_out = core_out.transpose(0, 1).contiguous()
if not return_dict:
return tuple(v for v in [core_out, new_mems, hids, attentions] if v is not None)
return TransfoXLModelOutput(
last_hidden_state=core_out,
mems=new_mems,
hidden_states=hids,
attentions=attentions,
)
@add_start_docstrings(
"""
The Transformer-XL Model with a language modeling head on top (adaptive softmax with weights tied to the adaptive
input embeddings)
""",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLLMHeadModel(TransfoXLPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = TransfoXLModel(config)
self.sample_softmax = config.sample_softmax
self.trainer_compatible = getattr(config, "trainer_compatible", False)
if not self.trainer_compatible:
warnings.warn(
"The output of TransfoXL will be updated in v5 to support a single loss as first argument. In order"
"to use that updated output, please specify `trainer_compatible=True` as your configuration"
" attribute.",
DeprecationWarning,
)
assert self.sample_softmax <= 0, (
"Sampling from the softmax is not implemented yet. Please look at issue: #3310:"
" https://github.com/huggingface/transformers/issues/3310"
)
self.crit = ProjectedAdaptiveLogSoftmax(
config.vocab_size, config.d_embed, config.d_model, config.cutoffs, div_val=config.div_val
)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self):
"""
Run this to be sure output and input (adaptive) softmax weights are tied
"""
if self.config.tie_word_embeddings:
for i in range(len(self.crit.out_layers)):
self._tie_or_clone_weights(self.crit.out_layers[i], self.transformer.word_emb.emb_layers[i])
if self.config.tie_projs:
for i, tie_proj in enumerate(self.config.tie_projs):
if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[0].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0]
elif tie_proj and self.config.div_val != 1:
if self.config.torchscript:
self.crit.out_projs[i] = nn.Parameter(self.transformer.word_emb.emb_projs[i].clone())
else:
self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
def reset_memory_length(self, mem_len):
self.transformer.reset_memory_length(mem_len)
def init_mems(self, bsz):
return self.transformer.init_mems(bsz)
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TransfoXLLMHeadModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
mems: Optional[List[torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TransfoXLLMHeadModelOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
bsz, tgt_len = input_ids.size(0), input_ids.size(1)
elif inputs_embeds is not None:
bsz, tgt_len = inputs_embeds.size(0), inputs_embeds.size(1)
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
transformer_outputs = self.transformer(
input_ids,
mems=mems,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden = transformer_outputs[0]
pred_hid = last_hidden[:, -tgt_len:]
if labels is not None:
# Prevents all labels being -100 and throwing an error
# when backwarding the loss
miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
if miss_valid_label:
# Sets an <EOS> token, just to prevent loss from being NaN
labels[0, 1] = self.config.eos_token_id
softmax_output = self.crit(pred_hid, labels)
prediction_scores = softmax_output.view(bsz, tgt_len, -1) if labels is None else ()
if labels is not None:
losses = softmax_output.view(bsz, tgt_len - 1)
# Avoids from incorporating padding (-100) tokens into loss value
loss = losses[losses != 0].mean()
else:
losses, loss = None, None
if not return_dict:
if self.trainer_compatible:
output = (prediction_scores, losses) if losses is not None else (prediction_scores,)
output += transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
else:
output = (prediction_scores, *transformer_outputs[1:])
output = ((losses,) + output) if losses is not None else output
return (output + (loss,)) if loss is not None else output
return TransfoXLLMHeadModelOutput(
loss=loss,
prediction_scores=prediction_scores,
losses=losses,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
def get_output_embeddings(self):
"""Double-check if you are using adaptive softmax."""
if self.sample_softmax > 0:
return self.out_layer
else:
return self.crit.out_layers[-1]
def prepare_inputs_for_generation(self, input_ids, past=None, **model_kwargs):
inputs = {}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
inputs["input_ids"] = input_ids[:, -1].unsqueeze(-1)
else:
inputs["input_ids"] = input_ids
return inputs
def _resize_cutoffs(self, new_num_tokens, new_emb_size, new_embedding_shapes, layer):
new_cutoffs = super()._resize_cutoffs(new_num_tokens, new_emb_size, new_embedding_shapes, layer)
self.crit.cutoffs = new_cutoffs
self.crit.cutoff_ends = [0] + new_cutoffs
self.crit.n_token = new_num_tokens
@staticmethod
def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]:
"""
This function is used to re-order the `mems` cache if [`~PreTrainedModel.beam_search`] or
[`~PreTrainedModel.beam_sample`] is called. This is required to match `mems` with the correct beam_idx at every
generation step.
"""
return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems]
@add_start_docstrings(
"""
The Transformer-XL Model transformer with a sequence classification head on top (linear layer).
[`TransfoXLForSequenceClassification`] uses the last token in order to do the classification, as other causal
models (e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
TRANSFO_XL_START_DOCSTRING,
)
class TransfoXLForSequenceClassification(TransfoXLPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = TransfoXLModel(config)
self.score = nn.Linear(config.d_embed, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(TRANSFO_XL_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TransfoXLSequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
mems: Optional[List[torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, TransfoXLSequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
mems=mems,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size, sequence_length = input_ids.shape[:2]
else:
batch_size, sequence_length = inputs_embeds.shape[:2]
assert (
self.config.pad_token_id is not None or batch_size == 1
), "Cannot handle batch sizes > 1 if no padding token is defined."
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[range(batch_size), sequence_lengths]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TransfoXLSequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ParameterList",
"torch.einsum",
"torch.nn.ModuleList",
"torch.ne",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"torch.chunk",
"torch.ger",
"torch.sum",
"torch.nn.LayerNorm",
"torch.nn.init.constant_",
"torch.FloatTensor",
"torch.tril",
"torch.nn.init.normal_",
"torch.zeros",
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.nn.functional.softmax",
"torch.nn.init.uniform_",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.no_grad",
"torch.from_numpy",
"torch.triu",
"torch.nn.Embedding"
] | 1.0 | JingyaHuang/transformers | 6589e510fa4e6c442059de2fab84752535de9b23 |
1.6 | from typing import Union
import torch
import torch.nn as nn
import torch.utils.data
from torch.optim.lr_scheduler import _LRScheduler, ReduceLROnPlateau
from torch.optim.optimizer import Optimizer
from mighty.loss import LossPenalty
from mighty.models import AutoencoderLinear
from mighty.monitor.monitor import MonitorAutoencoder
from mighty.utils.var_online import MeanOnline
from mighty.utils.signal import peak_to_signal_noise_ratio
from mighty.utils.common import input_from_batch, batch_to_cuda
from mighty.utils.data import DataLoader
from .embedding import TrainerEmbedding
__all__ = [
"TrainerAutoencoder"
]
class TrainerAutoencoder(TrainerEmbedding):
"""
An unsupervised AutoEncoder trainer that not only transforms inputs to
meaningful embeddings but also aims to restore the input signal from it.
Parameters
----------
model : nn.Module
A neural network to train.
criterion : nn.Module
A loss function.
data_loader : DataLoader
A data loader.
optimizer : Optimizer
An optimizer (Adam, SGD, etc.).
scheduler : _LRScheduler or ReduceLROnPlateau, or None
A learning rate scheduler.
Default: None
accuracy_measure : AccuracyEmbedding, optional
Calculates the accuracy of embedding vectors.
Default: ``AccuracyEmbedding()``
**kwargs
Passed to the base class.
"""
watch_modules = TrainerEmbedding.watch_modules + (AutoencoderLinear,)
def __init__(self,
model: nn.Module,
criterion: nn.Module,
data_loader: DataLoader,
optimizer: Optimizer,
scheduler: Union[_LRScheduler, ReduceLROnPlateau] = None,
**kwargs):
super().__init__(model, criterion=criterion, data_loader=data_loader,
optimizer=optimizer, scheduler=scheduler, **kwargs)
def _init_monitor(self, mutual_info) -> MonitorAutoencoder:
monitor = MonitorAutoencoder(
mutual_info=mutual_info,
normalize_inverse=self.data_loader.normalize_inverse
)
return monitor
def _init_online_measures(self):
online = super()._init_online_measures()
# peak signal-to-noise ratio
online['psnr-train'] = MeanOnline()
online['psnr-test'] = MeanOnline()
return online
def _get_loss(self, batch, output):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, LossPenalty):
loss = self.criterion(reconstructed, input, latent)
else:
loss = self.criterion(reconstructed, input)
return loss
def _on_forward_pass_batch(self, batch, output, train):
input = input_from_batch(batch)
latent, reconstructed = output
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
psnr = peak_to_signal_noise_ratio(input, reconstructed)
fold = 'train' if train else 'test'
if torch.isfinite(psnr):
self.online[f'psnr-{fold}'].update(psnr.cpu())
super()._on_forward_pass_batch(batch, latent, train)
def _epoch_finished(self, loss):
self.plot_autoencoder()
for fold in ('train', 'test'):
self.monitor.plot_psnr(self.online[f'psnr-{fold}'].get_mean(),
mode=fold)
super()._epoch_finished(loss)
def plot_autoencoder(self):
"""
Plots AutoEncoder reconstruction.
"""
batch = self.data_loader.sample()
batch = batch_to_cuda(batch)
mode_saved = self.model.training
self.model.train(False)
with torch.no_grad():
latent, reconstructed = self._forward(batch)
if isinstance(self.criterion, nn.BCEWithLogitsLoss):
reconstructed = reconstructed.sigmoid()
self._plot_autoencoder(batch, reconstructed)
self.model.train(mode_saved)
def _plot_autoencoder(self, batch, reconstructed, mode='train'):
input = input_from_batch(batch)
self.monitor.plot_autoencoder(input, reconstructed, mode=mode)
| [
"torch.no_grad",
"torch.isfinite"
] | 1.6.0 | dizcza/pytorch-mighty | 942c53b529377c9100bffc2f7f20ec740763e6ae |
0.3 | import torch
from torch.autograd import Variable
import onmt.translate.Beam
import onmt.io
class Translator(object):
"""
Uses a model to translate a batch of sentences.
Args:
model (:obj:`onmt.modules.NMTModel`):
NMT model to use for translation
fields (dict of Fields): data fields
beam_size (int): size of beam to use
n_best (int): number of translations produced
max_length (int): maximum length output to produce
global_scores (:obj:`GlobalScorer`):
object to rescore final translations
copy_attn (bool): use copy attention during translation
cuda (bool): use cuda
beam_trace (bool): trace beam search for debugging
"""
def __init__(self, model, fields,
beam_size, n_best=1,
max_length=100,
global_scorer=None,
copy_attn=False,
cuda=False,
beam_trace=False,
min_length=0,
stepwise_penalty=False):
self.model = model
self.fields = fields
self.n_best = n_best
self.max_length = max_length
self.global_scorer = global_scorer
self.copy_attn = copy_attn
self.beam_size = beam_size
self.cuda = cuda
self.min_length = min_length
self.stepwise_penalty = stepwise_penalty
# for debugging
self.beam_accum = None
if beam_trace:
self.beam_accum = {
"predicted_ids": [],
"beam_parent_ids": [],
"scores": [],
"log_probs": []}
def translate_batch(self, batch, data):
"""
Translate a batch of sentences.
Mostly a wrapper around :obj:`Beam`.
Args:
batch (:obj:`Batch`): a batch from a dataset object
data (:obj:`Dataset`): the dataset object
Todo:
Shouldn't need the original dataset.
"""
# (0) Prep each of the components of the search.
# And helper method for reducing verbosity.
beam_size = self.beam_size
batch_size = batch.batch_size
data_type = data.data_type
vocab = self.fields["tgt"].vocab
beam = [onmt.translate.Beam(beam_size, n_best=self.n_best,
cuda=self.cuda,
global_scorer=self.global_scorer,
pad=vocab.stoi[onmt.io.PAD_WORD],
eos=vocab.stoi[onmt.io.EOS_WORD],
bos=vocab.stoi[onmt.io.BOS_WORD],
min_length=self.min_length,
stepwise_penalty=self.stepwise_penalty)
for __ in range(batch_size)]
# Help functions for working with beams and batches
def var(a): return Variable(a, volatile=True)
def rvar(a): return var(a.repeat(1, beam_size, 1))
def bottle(m):
return m.view(batch_size * beam_size, -1)
def unbottle(m):
return m.view(beam_size, batch_size, -1)
# (1) Run the encoder on the src.
src = onmt.io.make_features(batch, 'src', data_type)
src_lengths = None
if data_type == 'text':
_, src_lengths = batch.src
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = self.model.decoder.init_decoder_state(
src, memory_bank, enc_states)
if src_lengths is None:
src_lengths = torch.Tensor(batch_size).type_as(memory_bank.data)\
.long()\
.fill_(memory_bank.size(0))
# (2) Repeat src objects `beam_size` times.
src_map = rvar(batch.src_map.data) \
if data_type == 'text' and self.copy_attn else None
memory_bank = rvar(memory_bank.data)
memory_lengths = src_lengths.repeat(beam_size)
dec_states.repeat_beam_size_times(beam_size)
# (3) run the decoder to generate sentences, using beam search.
for i in range(self.max_length):
if all((b.done() for b in beam)):
break
# Construct batch x beam_size nxt words.
# Get all the pending current beam words and arrange for forward.
inp = var(torch.stack([b.get_current_state() for b in beam])
.t().contiguous().view(1, -1))
# Turn any copied words to UNKs
# 0 is unk
if self.copy_attn:
inp = inp.masked_fill(
inp.gt(len(self.fields["tgt"].vocab) - 1), 0)
# Temporary kludge solution to handle changed dim expectation
# in the decoder
inp = inp.unsqueeze(2)
# Run one step.
dec_out, dec_states, attn = self.model.decoder(
inp, memory_bank, dec_states, memory_lengths=memory_lengths)
dec_out = dec_out.squeeze(0)
# dec_out: beam x rnn_size
# (b) Compute a vector of batch x beam word scores.
if not self.copy_attn:
out = self.model.generator.forward(dec_out).data
out = unbottle(out)
# beam x tgt_vocab
beam_attn = unbottle(attn["std"])
else:
out = self.model.generator.forward(dec_out,
attn["copy"].squeeze(0),
src_map)
# beam x (tgt_vocab + extra_vocab)
out = data.collapse_copy_scores(
unbottle(out.data),
batch, self.fields["tgt"].vocab, data.src_vocabs)
# beam x tgt_vocab
out = out.log()
beam_attn = unbottle(attn["copy"])
# (c) Advance each beam.
for j, b in enumerate(beam):
b.advance(out[:, j],
beam_attn.data[:, j, :memory_lengths[j]])
dec_states.beam_update(j, b.get_current_origin(), beam_size)
# (4) Extract sentences from beam.
ret = self._from_beam(beam)
ret["gold_score"] = [0] * batch_size
if "tgt" in batch.__dict__:
ret["gold_score"] = self._run_target(batch, data)
ret["batch"] = batch
return ret
def _from_beam(self, beam):
ret = {"predictions": [],
"scores": [],
"attention": []}
for b in beam:
n_best = self.n_best
scores, ks = b.sort_finished(minimum=n_best)
hyps, attn = [], []
for i, (times, k) in enumerate(ks[:n_best]):
hyp, att = b.get_hyp(times, k)
hyps.append(hyp)
attn.append(att)
ret["predictions"].append(hyps)
ret["scores"].append(scores)
ret["attention"].append(attn)
return ret
def _run_target(self, batch, data):
data_type = data.data_type
if data_type == 'text':
_, src_lengths = batch.src
else:
src_lengths = None
src = onmt.io.make_features(batch, 'src', data_type)
tgt_in = onmt.io.make_features(batch, 'tgt')[:-1]
# (1) run the encoder on the src
enc_states, memory_bank = self.model.encoder(src, src_lengths)
dec_states = \
self.model.decoder.init_decoder_state(src, memory_bank, enc_states)
# (2) if a target is specified, compute the 'goldScore'
# (i.e. log likelihood) of the target under the model
tt = torch.cuda if self.cuda else torch
gold_scores = tt.FloatTensor(batch.batch_size).fill_(0)
dec_out, dec_states, attn = self.model.decoder(
tgt_in, memory_bank, dec_states, memory_lengths=src_lengths)
tgt_pad = self.fields["tgt"].vocab.stoi[onmt.io.PAD_WORD]
for dec, tgt in zip(dec_out, batch.tgt[1:].data):
# Log prob of each word.
out = self.model.generator.forward(dec)
tgt = tgt.unsqueeze(1)
scores = out.data.gather(1, tgt)
scores.masked_fill_(tgt.eq(tgt_pad), 0)
gold_scores += scores
return gold_scores
| [
"torch.autograd.Variable",
"torch.Tensor"
] | 0.3.1 | Priyansh2/csnli | de31f3f5ae0a956496b76a4643fa9ce7f3736d29 |
1.5 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
super().__init__()
for name, parameter in backbone.named_parameters():
if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
parameter.requires_grad_(False)
if return_interm_layers:
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
else:
return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool):
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
super().__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = False # args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, False)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| [
"torch.zeros",
"torch.ones"
] | 1.5.0 | playerkk/HoiTransformer | b710216d6b338863ebe9d40a96765ab52780cefa |
0.4 | import logging
from typing import Any, Dict, List, Tuple
import difflib
import sqlparse
from overrides import overrides
import torch
from allennlp.common.util import pad_sequence_to_length
from allennlp.data import Vocabulary
from allennlp.data.fields.production_rule_field import ProductionRuleArray
from allennlp.semparse.executors import SqlExecutor
from allennlp.models.model import Model
from allennlp.modules import Attention, Seq2SeqEncoder, TextFieldEmbedder, \
Embedding
from allennlp.nn import util
from allennlp.semparse.worlds import AtisWorld
from allennlp.semparse.contexts.sql_context_utils import action_sequence_to_sql
from allennlp.state_machines.states import GrammarBasedState
from allennlp.state_machines.transition_functions.linking_transition_function import LinkingTransitionFunction
from allennlp.state_machines import BeamSearch
from allennlp.state_machines.trainers import MaximumMarginalLikelihood
from allennlp.state_machines.states import GrammarStatelet, RnnStatelet
from allennlp.training.metrics import Average
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("atis_parser")
class AtisSemanticParser(Model):
"""
Parameters
----------
vocab : ``Vocabulary``
utterance_embedder : ``TextFieldEmbedder``
Embedder for utterances.
action_embedding_dim : ``int``
Dimension to use for action embeddings.
encoder : ``Seq2SeqEncoder``
The encoder to use for the input utterance.
decoder_beam_search : ``BeamSearch``
Beam search used to retrieve best sequences after training.
max_decoding_steps : ``int``
When we're decoding with a beam search, what's the maximum number of steps we should take?
This only applies at evaluation time, not during training.
input_attention: ``Attention``
We compute an attention over the input utterance at each step of the decoder, using the
decoder hidden state as the query. Passed to the transition function.
add_action_bias : ``bool``, optional (default=True)
If ``True``, we will learn a bias weight for each action that gets used when predicting
that action, in addition to its embedding.
dropout : ``float``, optional (default=0)
If greater than 0, we will apply dropout with this probability after all encoders (pytorch
LSTMs do not apply dropout to their last layer).
rule_namespace : ``str``, optional (default=rule_labels)
The vocabulary namespace to use for production rules. The default corresponds to the
default used in the dataset reader, so you likely don't need to modify this.
database_file: ``str``, optional (default=/atis/atis.db)
The path of the SQLite database when evaluating SQL queries. SQLite is disk based, so we need
the file location to connect to it.
"""
def __init__(self,
vocab: Vocabulary,
utterance_embedder: TextFieldEmbedder,
action_embedding_dim: int,
encoder: Seq2SeqEncoder,
decoder_beam_search: BeamSearch,
max_decoding_steps: int,
input_attention: Attention,
add_action_bias: bool = True,
training_beam_size: int = None,
dropout: float = 0.0,
rule_namespace: str = 'rule_labels',
database_file='/atis/atis.db') -> None:
# Atis semantic parser init
super().__init__(vocab)
self._utterance_embedder = utterance_embedder
self._encoder = encoder
self._max_decoding_steps = max_decoding_steps
self._add_action_bias = add_action_bias
if dropout > 0:
self._dropout = torch.nn.Dropout(p=dropout)
else:
self._dropout = lambda x: x
self._rule_namespace = rule_namespace
self._exact_match = Average()
self._valid_sql_query = Average()
self._action_similarity = Average()
self._denotation_accuracy = Average()
self._executor = SqlExecutor(database_file)
self._action_padding_index = -1 # the padding value used by IndexField
num_actions = vocab.get_vocab_size(self._rule_namespace)
if self._add_action_bias:
input_action_dim = action_embedding_dim + 1
else:
input_action_dim = action_embedding_dim
self._action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=input_action_dim)
self._output_action_embedder = Embedding(num_embeddings=num_actions, embedding_dim=action_embedding_dim)
# This is what we pass as input in the first step of decoding, when we don't have a
# previous action, or a previous utterance attention.
self._first_action_embedding = torch.nn.Parameter(torch.FloatTensor(action_embedding_dim))
self._first_attended_utterance = torch.nn.Parameter(torch.FloatTensor(encoder.get_output_dim()))
torch.nn.init.normal_(self._first_action_embedding)
torch.nn.init.normal_(self._first_attended_utterance)
self._num_entity_types = 2 # TODO(kevin): get this in a more principled way somehow?
self._entity_type_decoder_embedding = Embedding(self._num_entity_types, action_embedding_dim)
self._beam_search = decoder_beam_search
self._decoder_trainer = MaximumMarginalLikelihood(training_beam_size)
self._transition_function = LinkingTransitionFunction(encoder_output_dim=self._encoder.get_output_dim(),
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
predict_start_type_separately=False,
add_action_bias=self._add_action_bias,
dropout=dropout)
@overrides
def forward(self, # type: ignore
utterance: Dict[str, torch.LongTensor],
world: List[AtisWorld],
actions: List[List[ProductionRuleArray]],
linking_scores: torch.Tensor,
target_action_sequence: torch.LongTensor = None,
sql_queries: List[List[str]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
We set up the initial state for the decoder, and pass that state off to either a DecoderTrainer,
if we're training, or a BeamSearch for inference, if we're not.
Parameters
----------
utterance : Dict[str, torch.LongTensor]
The output of ``TextField.as_array()`` applied on the utterance ``TextField``. This will
be passed through a ``TextFieldEmbedder`` and then through an encoder.
world : ``List[AtisWorld]``
We use a ``MetadataField`` to get the ``World`` for each input instance. Because of
how ``MetadataField`` works, this gets passed to us as a ``List[AtisWorld]``,
actions : ``List[List[ProductionRuleArray]]``
A list of all possible actions for each ``World`` in the batch, indexed into a
``ProductionRuleArray`` using a ``ProductionRuleField``. We will embed all of these
and use the embeddings to determine which action to take at each timestep in the
decoder.
linking_scores: ``torch.Tensor``
A matrix of the linking the utterance tokens and the entities. This is a binary matrix that
is deterministically generated where each entry indicates whether a token generated an entity.
This tensor has shape ``(batch_size, num_entities, num_utterance_tokens)``.
target_action_sequence : torch.Tensor, optional (default=None)
The action sequence for the correct action sequence, where each action is an index into the list
of possible actions. This tensor has shape ``(batch_size, sequence_length, 1)``. We remove the
trailing dimension.
sql_queries : List[List[str]], optional (default=None)
A list of the SQL queries that are given during training or validation.
"""
initial_state = self._get_initial_state(utterance, world, actions, linking_scores)
batch_size = linking_scores.shape[0]
if target_action_sequence is not None:
# Remove the trailing dimension (from ListField[ListField[IndexField]]).
target_action_sequence = target_action_sequence.squeeze(-1)
target_mask = target_action_sequence != self._action_padding_index
else:
target_mask = None
if self.training:
# target_action_sequence is of shape (batch_size, 1, sequence_length) here after we unsqueeze it for
# the MML trainer.
return self._decoder_trainer.decode(initial_state,
self._transition_function,
(target_action_sequence.unsqueeze(1), target_mask.unsqueeze(1)))
else:
# TODO(kevin) Move some of this functionality to a separate method for computing validation outputs.
action_mapping = {}
for batch_index, batch_actions in enumerate(actions):
for action_index, action in enumerate(batch_actions):
action_mapping[(batch_index, action_index)] = action[0]
outputs: Dict[str, Any] = {'action_mapping': action_mapping}
outputs['linking_scores'] = linking_scores
if target_action_sequence is not None:
outputs['loss'] = self._decoder_trainer.decode(initial_state,
self._transition_function,
(target_action_sequence.unsqueeze(1),
target_mask.unsqueeze(1)))['loss']
num_steps = self._max_decoding_steps
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size)]
best_final_states = self._beam_search.search(num_steps,
initial_state,
self._transition_function,
keep_final_unfinished_states=False)
outputs['best_action_sequence'] = []
outputs['debug_info'] = []
outputs['entities'] = []
outputs['predicted_sql_query'] = []
outputs['sql_queries'] = []
outputs['utterance'] = []
outputs['tokenized_utterance'] = []
for i in range(batch_size):
# Decoding may not have terminated with any completed valid SQL queries, if `num_steps`
# isn't long enough (or if the model is not trained enough and gets into an
# infinite action loop).
if i not in best_final_states:
self._exact_match(0)
self._denotation_accuracy(0)
self._valid_sql_query(0)
self._action_similarity(0)
outputs['predicted_sql_query'].append('')
continue
best_action_indices = best_final_states[i][0].action_history[0]
action_strings = [action_mapping[(i, action_index)]
for action_index in best_action_indices]
predicted_sql_query = action_sequence_to_sql(action_strings)
if target_action_sequence is not None:
# Use a Tensor, not a Variable, to avoid a memory leak.
targets = target_action_sequence[i].data
sequence_in_targets = 0
sequence_in_targets = self._action_history_match(best_action_indices, targets)
self._exact_match(sequence_in_targets)
similarity = difflib.SequenceMatcher(None, best_action_indices, targets)
self._action_similarity(similarity.ratio())
if sql_queries and sql_queries[i]:
denotation_correct = self._executor.evaluate_sql_query(predicted_sql_query, sql_queries[i])
self._denotation_accuracy(denotation_correct)
outputs['sql_queries'].append(sql_queries[i])
outputs['utterance'].append(world[i].utterances[-1])
outputs['tokenized_utterance'].append([token.text
for token in world[i].tokenized_utterances[-1]])
outputs['entities'].append(world[i].entities)
outputs['best_action_sequence'].append(action_strings)
outputs['predicted_sql_query'].append(sqlparse.format(predicted_sql_query, reindent=True))
outputs['debug_info'].append(best_final_states[i][0].debug_info[0]) # type: ignore
return outputs
def _get_initial_state(self,
utterance: Dict[str, torch.LongTensor],
worlds: List[AtisWorld],
actions: List[List[ProductionRuleArray]],
linking_scores: torch.Tensor) -> GrammarBasedState:
embedded_utterance = self._utterance_embedder(utterance)
utterance_mask = util.get_text_field_mask(utterance).float()
batch_size = embedded_utterance.size(0)
num_entities = max([len(world.entities) for world in worlds])
# entity_types: tensor with shape (batch_size, num_entities)
entity_types, _ = self._get_type_vector(worlds, num_entities, embedded_utterance)
# (batch_size, num_utterance_tokens, embedding_dim)
encoder_input = embedded_utterance
# (batch_size, utterance_length, encoder_output_dim)
encoder_outputs = self._dropout(self._encoder(encoder_input, utterance_mask))
# This will be our initial hidden state and memory cell for the decoder LSTM.
final_encoder_output = util.get_final_encoder_states(encoder_outputs,
utterance_mask,
self._encoder.is_bidirectional())
memory_cell = encoder_outputs.new_zeros(batch_size, self._encoder.get_output_dim())
initial_score = embedded_utterance.data.new_zeros(batch_size)
# To make grouping states together in the decoder easier, we convert the batch dimension in
# all of our tensors into an outer list. For instance, the encoder outputs have shape
# `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list
# of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we
# won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.
initial_score_list = [initial_score[i] for i in range(batch_size)]
encoder_output_list = [encoder_outputs[i] for i in range(batch_size)]
utterance_mask_list = [utterance_mask[i] for i in range(batch_size)]
initial_rnn_state = []
for i in range(batch_size):
initial_rnn_state.append(RnnStatelet(final_encoder_output[i],
memory_cell[i],
self._first_action_embedding,
self._first_attended_utterance,
encoder_output_list,
utterance_mask_list))
initial_grammar_state = [self._create_grammar_state(worlds[i],
actions[i],
linking_scores[i],
entity_types[i])
for i in range(batch_size)]
initial_state = GrammarBasedState(batch_indices=list(range(batch_size)),
action_history=[[] for _ in range(batch_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
possible_actions=actions,
debug_info=None)
return initial_state
@staticmethod
def _get_type_vector(worlds: List[AtisWorld],
num_entities: int,
tensor: torch.Tensor = None) -> Tuple[torch.LongTensor, Dict[int, int]]:
"""
Produces the encoding for each entity's type. In addition, a map from a flattened entity
index to type is returned to combine entity type operations into one method.
Parameters
----------
worlds : ``List[AtisWorld]``
num_entities : ``int``
tensor : ``torch.Tensor``
Used for copying the constructed list onto the right device.
Returns
-------
A ``torch.LongTensor`` with shape ``(batch_size, num_entities, num_types)``.
entity_types : ``Dict[int, int]``
This is a mapping from ((batch_index * num_entities) + entity_index) to entity type id.
"""
entity_types = {}
batch_types = []
for batch_index, world in enumerate(worlds):
types = []
entities = [('number', entity)
if 'number' or 'time_range' in entity
else ('string', entity)
for entity in world.entities]
for entity_index, entity in enumerate(entities):
# We need numbers to be first, then strings, since our entities are going to be
# sorted. We do a split by type and then a merge later, and it relies on this sorting.
if entity[0] == 'number':
entity_type = 1
else:
entity_type = 0
types.append(entity_type)
# For easier lookups later, we're actually using a _flattened_ version
# of (batch_index, entity_index) for the key, because this is how the
# linking scores are stored.
flattened_entity_index = batch_index * num_entities + entity_index
entity_types[flattened_entity_index] = entity_type
padded = pad_sequence_to_length(types, num_entities, lambda: 0)
batch_types.append(padded)
return tensor.new_tensor(batch_types, dtype=torch.long), entity_types
@staticmethod
def _action_history_match(predicted: List[int], targets: torch.LongTensor) -> int:
# TODO(mattg): this could probably be moved into a FullSequenceMatch metric, or something.
# Check if target is big enough to cover prediction (including start/end symbols)
if len(predicted) > targets.size(0):
return 0
predicted_tensor = targets.new_tensor(predicted)
targets_trimmed = targets[:len(predicted)]
# Return 1 if the predicted sequence is anywhere in the list of targets.
return predicted_tensor.equal(targets_trimmed)
@staticmethod
def is_nonterminal(token: str):
if token[0] == '"' and token[-1] == '"':
return False
return True
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
"""
We track four metrics here:
1. exact_match, which is the percentage of the time that our best output action sequence
matches the SQL query exactly.
2. denotation_acc, which is the percentage of examples where we get the correct
denotation. This is the typical "accuracy" metric, and it is what you should usually
report in an experimental result. You need to be careful, though, that you're
computing this on the full data, and not just the subset that can be parsed. (make sure
you pass "keep_if_unparseable=True" to the dataset reader, which we do for validation data,
but not training data).
3. valid_sql_query, which is the percentage of time that decoding actually produces a
valid SQL query. We might not produce a valid SQL query if the decoder gets
into a repetitive loop, or we're trying to produce a super long SQL query and run
out of time steps, or something.
4. action_similarity, which is how similar the action sequence predicted is to the actual
action sequence. This is basically a soft measure of exact_match.
"""
return {
'exact_match': self._exact_match.get_metric(reset),
'denotation_acc': self._denotation_accuracy.get_metric(reset),
'valid_sql_query': self._valid_sql_query.get_metric(reset),
'action_similarity': self._action_similarity.get_metric(reset)
}
def _create_grammar_state(self,
world: AtisWorld,
possible_actions: List[ProductionRuleArray],
linking_scores: torch.Tensor,
entity_types: torch.Tensor) -> GrammarStatelet:
"""
This method creates the GrammarStatelet object that's used for decoding. Part of creating
that is creating the `valid_actions` dictionary, which contains embedded representations of
all of the valid actions. So, we create that here as well.
The inputs to this method are for a `single instance in the batch`; none of the tensors we
create here are batched. We grab the global action ids from the input
``ProductionRuleArrays``, and we use those to embed the valid actions for every
non-terminal type. We use the input ``linking_scores`` for non-global actions.
Parameters
----------
world : ``AtisWorld``
From the input to ``forward`` for a single batch instance.
possible_actions : ``List[ProductionRuleArray]``
From the input to ``forward`` for a single batch instance.
linking_scores : ``torch.Tensor``
Assumed to have shape ``(num_entities, num_utterance_tokens)`` (i.e., there is no batch
dimension).
entity_types : ``torch.Tensor``
Assumed to have shape ``(num_entities,)`` (i.e., there is no batch dimension).
"""
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
valid_actions = world.valid_actions
entity_map = {}
entities = world.entities
for entity_index, entity in enumerate(entities):
entity_map[entity] = entity_index
translated_valid_actions: Dict[str, Dict[str, Tuple[torch.Tensor, torch.Tensor, List[int]]]] = {}
for key, action_strings in valid_actions.items():
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
production_rule_arrays = [(possible_actions[index], index) for index in action_indices]
global_actions = []
linked_actions = []
for production_rule_array, action_index in production_rule_arrays:
if production_rule_array[1]:
global_actions.append((production_rule_array[2], action_index))
else:
linked_actions.append((production_rule_array[0], action_index))
if global_actions:
global_action_tensors, global_action_ids = zip(*global_actions)
global_action_tensor = entity_types.new_tensor(torch.cat(global_action_tensors, dim=0),
dtype=torch.long)
global_input_embeddings = self._action_embedder(global_action_tensor)
global_output_embeddings = self._output_action_embedder(global_action_tensor)
translated_valid_actions[key]['global'] = (global_input_embeddings,
global_output_embeddings,
list(global_action_ids))
if linked_actions:
linked_rules, linked_action_ids = zip(*linked_actions)
entities = linked_rules
entity_ids = [entity_map[entity] for entity in entities]
entity_linking_scores = linking_scores[entity_ids]
entity_type_tensor = entity_types[entity_ids]
entity_type_embeddings = self._entity_type_decoder_embedding(entity_type_tensor)
entity_type_embeddings = entity_types.new_tensor(entity_type_embeddings, dtype=torch.float)
translated_valid_actions[key]['linked'] = (entity_linking_scores,
entity_type_embeddings,
list(linked_action_ids))
return GrammarStatelet(['statement'],
{},
translated_valid_actions,
{},
self.is_nonterminal,
reverse_productions=False)
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
This method overrides ``Model.decode``, which gets called after ``Model.forward``, at test
time, to finalize predictions. This is (confusingly) a separate notion from the "decoder"
in "encoder/decoder", where that decoder logic lives in ``TransitionFunction``.
This method trims the output predictions to the first end symbol, replaces indices with
corresponding tokens, and adds a field called ``predicted_actions`` to the ``output_dict``.
"""
action_mapping = output_dict['action_mapping']
best_actions = output_dict["best_action_sequence"]
debug_infos = output_dict['debug_info']
batch_action_info = []
for batch_index, (predicted_actions, debug_info) in enumerate(zip(best_actions, debug_infos)):
instance_action_info = []
for predicted_action, action_debug_info in zip(predicted_actions, debug_info):
action_info = {}
action_info['predicted_action'] = predicted_action
considered_actions = action_debug_info['considered_actions']
probabilities = action_debug_info['probabilities']
actions = []
for action, probability in zip(considered_actions, probabilities):
if action != -1:
actions.append((action_mapping[(batch_index, action)], probability))
actions.sort()
considered_actions, probabilities = zip(*actions)
action_info['considered_actions'] = considered_actions
action_info['action_probabilities'] = probabilities
action_info['utterance_attention'] = action_debug_info.get('question_attention', [])
instance_action_info.append(action_info)
batch_action_info.append(instance_action_info)
output_dict["predicted_actions"] = batch_action_info
return output_dict
| [
"torch.nn.init.normal_",
"torch.cat",
"torch.FloatTensor",
"torch.nn.Dropout"
] | 0.4.1 | ljch2018/allennlp | 63ba3fb28897578d4798039d1713e2b7995eb753 |
1.8 | import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1,
stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert ((depth-4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
print('Wide-Resnet %dx%d' % (depth, k))
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(
wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(
wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(
wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# print(f'Shape before avg pooling: {out.shape}')
out = F.avg_pool2d(out, int(out.shape[3]))
# print(f'Shape after avg pooling: {out.shape}')
out = out.view(out.size(0), -1)
penultimate = out
out = self.linear(out)
return out, penultimate
# feature extraction for Mahalanobis
def feature_list(self, x):
out_list = []
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# print shape
# print(f'Shape: {out.shape}')
# out2 = F.max_pool3d(out, (4,4,4))
out2 = F.max_pool2d(out, (8,8))
out_list.append(out2)
print(f'Shape: {out2.shape}')
out = F.avg_pool2d(out, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), out_list
def intermediate_forward(self, x, layer_index):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
return F.max_pool2d(out, (8,8))# F.max_pool3d(out, (4,4,4))
# function to extract the penultimate features
def penultimate_forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
penultimate = F.relu(self.bn1(out))
penultimate = F.max_pool2d(penultimate, (8,8))
# penultimate = F.max_pool3d(penultimate, (4,4,4))
out = F.avg_pool2d(penultimate, int(out.shape[3]))
out = out.view(out.size(0), -1)
return self.linear(out), penultimate
| [
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.functional.max_pool2d"
] | 1.8.1 | christophbrgr/ood_detection_framework | c3b7e3064ed8ee4aeb112cd2ab946ee41636f79f |
1.8 | import math
import warnings
import torch
from typing import List, Union
from torch import Tensor, nn
from ..common import EncoderModule, _take
__all__ = ["GenericTimmEncoder", "make_n_channel_input_std_conv"]
class GenericTimmEncoder(EncoderModule):
def __init__(self, timm_encoder: Union[nn.Module, str], layers: List[int] = None):
strides = []
channels = []
default_layers = []
if isinstance(timm_encoder, str):
import timm.models.factory
timm_encoder = timm.models.factory.create_model(timm_encoder, pretrained=True)
for i, oi in enumerate(timm_encoder.feature_info.out_indices):
fi = timm_encoder.feature_info.info[i]
strides.append(fi["reduction"])
channels.append(fi["num_chs"])
default_layers.append(i)
if layers is None:
layers = default_layers
super().__init__(channels, strides, layers)
self.encoder = timm_encoder
def forward(self, x: Tensor) -> List[Tensor]:
return _take(self.encoder(x), self._layers)
def make_n_channel_input_std_conv(conv: nn.Module, in_channels: int, mode="auto", **kwargs) -> nn.Module:
"""
Return the same convolution class but with desired number of channels
Args:
conv: Input nn.Conv2D object to copy settings/weights from
in_channels: Desired number of input channels
mode:
**kwargs: Optional overrides for Conv2D parameters
"""
conv_cls = conv.__class__
if conv.in_channels == in_channels:
warnings.warn("make_n_channel_input call is spurious")
return conv
new_conv = conv_cls(
in_channels,
out_channels=conv.out_channels,
kernel_size=kwargs.get("kernel_size", conv.kernel_size),
stride=kwargs.get("stride", conv.stride),
padding=kwargs.get("padding", conv.padding),
dilation=kwargs.get("dilation", conv.dilation),
groups=kwargs.get("groups", conv.groups),
bias=kwargs.get("bias", conv.bias is not None),
eps=kwargs.get("eps", conv.eps),
)
w = conv.weight
if in_channels > conv.in_channels:
n = math.ceil(in_channels / float(conv.in_channels))
w = torch.cat([w] * n, dim=1)
w = w[:, :in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
else:
w = w[:, 0:in_channels, ...]
new_conv.weight = nn.Parameter(w, requires_grad=True)
return new_conv
| [
"torch.cat",
"torch.nn.Parameter"
] | 1.8.1 | George-Jiao/pytorch-toolbelt | 920e03876805351ed5645e439a64074cb4f37589 |
1.9 | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
import torch.distributions as torchdist
from torch.distributions import constraints
import pyro
import pyro.distributions as dist
from pyro.contrib.gp.models.model import GPModel
from pyro.contrib.gp.util import conditional
from pyro.nn.module import PyroParam, pyro_method
from pyro.util import warn_if_nan
class GPRegression(GPModel):
r"""
Gaussian Process Regression model.
The core of a Gaussian Process is a covariance function :math:`k` which governs
the similarity between input points. Given :math:`k`, we can establish a
distribution over functions :math:`f` by a multivarite normal distribution
.. math:: p(f(X)) = \mathcal{N}(0, k(X, X)),
where :math:`X` is any set of input points and :math:`k(X, X)` is a covariance
matrix whose entries are outputs :math:`k(x, z)` of :math:`k` over input pairs
:math:`(x, z)`. This distribution is usually denoted by
.. math:: f \sim \mathcal{GP}(0, k).
.. note:: Generally, beside a covariance matrix :math:`k`, a Gaussian Process can
also be specified by a mean function :math:`m` (which is a zero-value function
by default). In that case, its distribution will be
.. math:: p(f(X)) = \mathcal{N}(m(X), k(X, X)).
Given inputs :math:`X` and their noisy observations :math:`y`, the Gaussian Process
Regression model takes the form
.. math::
f &\sim \mathcal{GP}(0, k(X, X)),\\
y & \sim f + \epsilon,
where :math:`\epsilon` is Gaussian noise.
.. note:: This model has :math:`\mathcal{O}(N^3)` complexity for training,
:math:`\mathcal{O}(N^3)` complexity for testing. Here, :math:`N` is the number
of train inputs.
Reference:
[1] `Gaussian Processes for Machine Learning`,
Carl E. Rasmussen, Christopher K. I. Williams
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param torch.Tensor noise: Variance of Gaussian noise of this model.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""
def __init__(self, X, y, kernel, noise=None, mean_function=None, jitter=1e-6):
assert isinstance(
X, torch.Tensor
), "X needs to be a torch Tensor instead of a {}".format(type(X))
if y is not None:
assert isinstance(
y, torch.Tensor
), "y needs to be a torch Tensor instead of a {}".format(type(y))
super().__init__(X, y, kernel, mean_function, jitter)
noise = self.X.new_tensor(1.0) if noise is None else noise
self.noise = PyroParam(noise, constraints.positive)
@pyro_method
def model(self):
self.set_mode("model")
N = self.X.size(0)
Kff = self.kernel(self.X)
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to diagonal
Lff = torch.linalg.cholesky(Kff)
zero_loc = self.X.new_zeros(self.X.size(0))
f_loc = zero_loc + self.mean_function(self.X)
if self.y is None:
f_var = Lff.pow(2).sum(dim=-1)
return f_loc, f_var
else:
return pyro.sample(
self._pyro_get_fullname("y"),
dist.MultivariateNormal(f_loc, scale_tril=Lff)
.expand_by(self.y.shape[:-1])
.to_event(self.y.dim() - 1),
obs=self.y,
)
@pyro_method
def guide(self):
self.set_mode("guide")
self._load_pyro_samples()
def forward(self, Xnew, full_cov=False, noiseless=True):
r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, \epsilon) = \mathcal{N}(loc, cov).
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:param bool noiseless: A flag to decide if we want to include noise in the
prediction output or not.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
self._check_Xnew_shape(Xnew)
self.set_mode("guide")
N = self.X.size(0)
Kff = self.kernel(self.X).contiguous()
Kff.view(-1)[:: N + 1] += self.jitter + self.noise # add noise to the diagonal
Lff = torch.linalg.cholesky(Kff)
y_residual = self.y - self.mean_function(self.X)
loc, cov = conditional(
Xnew,
self.X,
self.kernel,
y_residual,
None,
Lff,
full_cov,
jitter=self.jitter,
)
if full_cov and not noiseless:
M = Xnew.size(0)
cov = cov.contiguous()
cov.view(-1, M * M)[:, :: M + 1] += self.noise # add noise to the diagonal
if not full_cov and not noiseless:
cov = cov + self.noise
return loc + self.mean_function(Xnew), cov
def iter_sample(self, noiseless=True):
r"""
Iteratively constructs a sample from the Gaussian Process posterior.
Recall that at test input points :math:`X_{new}`, the posterior is
multivariate Gaussian distributed with mean and covariance matrix
given by :func:`forward`.
This method samples lazily from this multivariate Gaussian. The advantage
of this approach is that later query points can depend upon earlier ones.
Particularly useful when the querying is to be done by an optimisation
routine.
.. note:: The noise parameter ``noise`` (:math:`\epsilon`) together with
kernel's parameters have been learned from a training procedure (MCMC or
SVI).
:param bool noiseless: A flag to decide if we want to add sampling noise
to the samples beyond the noise inherent in the GP posterior.
:returns: sampler
:rtype: function
"""
noise = self.noise.detach()
X = self.X.clone().detach()
y = self.y.clone().detach()
N = X.size(0)
Kff = self.kernel(X).contiguous()
Kff.view(-1)[:: N + 1] += noise # add noise to the diagonal
outside_vars = {"X": X, "y": y, "N": N, "Kff": Kff}
def sample_next(xnew, outside_vars):
"""Repeatedly samples from the Gaussian process posterior,
conditioning on previously sampled values.
"""
warn_if_nan(xnew)
# Variables from outer scope
X, y, Kff = outside_vars["X"], outside_vars["y"], outside_vars["Kff"]
# Compute Cholesky decomposition of kernel matrix
Lff = torch.linalg.cholesky(Kff)
y_residual = y - self.mean_function(X)
# Compute conditional mean and variance
loc, cov = conditional(
xnew, X, self.kernel, y_residual, None, Lff, False, jitter=self.jitter
)
if not noiseless:
cov = cov + noise
ynew = torchdist.Normal(
loc + self.mean_function(xnew), cov.sqrt()
).rsample()
# Update kernel matrix
N = outside_vars["N"]
Kffnew = Kff.new_empty(N + 1, N + 1)
Kffnew[:N, :N] = Kff
cross = self.kernel(X, xnew).squeeze()
end = self.kernel(xnew, xnew).squeeze()
Kffnew[N, :N] = cross
Kffnew[:N, N] = cross
# No noise, just jitter for numerical stability
Kffnew[N, N] = end + self.jitter
# Heuristic to avoid adding degenerate points
if Kffnew.logdet() > -15.0:
outside_vars["Kff"] = Kffnew
outside_vars["N"] += 1
outside_vars["X"] = torch.cat((X, xnew))
outside_vars["y"] = torch.cat((y, ynew))
return ynew
return lambda xnew: sample_next(xnew, outside_vars)
| [
"torch.cat",
"torch.linalg.cholesky"
] | 1.9.0 | GautamV234/pyro | d5474ebc6101b330bf9060a3731830d4b6a585d5 |
1.8 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trainer to automate the training."""
import inspect
import logging
import math
import os
import traceback
import warnings
from argparse import ArgumentParser, Namespace
from copy import deepcopy
from datetime import timedelta
from pathlib import Path
from typing import Any, Callable, cast, Dict, Iterable, List, Optional, Tuple, Type, Union
from weakref import proxy
import torch
from packaging.version import Version
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.accelerators import Accelerator, GPUAccelerator, IPUAccelerator, TPUAccelerator
from pytorch_lightning.callbacks import Callback, EarlyStopping, ModelCheckpoint, ProgressBarBase
from pytorch_lightning.callbacks.prediction_writer import BasePredictionWriter
from pytorch_lightning.core.datamodule import LightningDataModule
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.loggers import LightningLoggerBase
from pytorch_lightning.loggers.base import DummyLogger, LoggerCollection
from pytorch_lightning.loggers.tensorboard import TensorBoardLogger
from pytorch_lightning.loops import PredictionLoop, TrainingEpochLoop
from pytorch_lightning.loops.dataloader.evaluation_loop import EvaluationLoop
from pytorch_lightning.loops.fit_loop import FitLoop
from pytorch_lightning.loops.utilities import _parse_loop_limits, _reset_progress
from pytorch_lightning.plugins import (
ApexMixedPrecisionPlugin,
NativeMixedPrecisionPlugin,
PLUGIN_INPUT,
PrecisionPlugin,
)
from pytorch_lightning.plugins.environments.slurm_environment import SLURMEnvironment
from pytorch_lightning.profiler import (
AdvancedProfiler,
BaseProfiler,
PassThroughProfiler,
PyTorchProfiler,
SimpleProfiler,
XLAProfiler,
)
from pytorch_lightning.strategies import ParallelStrategy, Strategy
from pytorch_lightning.strategies.ddp_spawn import DDPSpawnStrategy
from pytorch_lightning.trainer.callback_hook import TrainerCallbackHookMixin
from pytorch_lightning.trainer.configuration_validator import verify_loop_configurations
from pytorch_lightning.trainer.connectors.accelerator_connector import AcceleratorConnector
from pytorch_lightning.trainer.connectors.callback_connector import CallbackConnector
from pytorch_lightning.trainer.connectors.checkpoint_connector import CheckpointConnector
from pytorch_lightning.trainer.connectors.data_connector import DataConnector
from pytorch_lightning.trainer.connectors.logger_connector import LoggerConnector
from pytorch_lightning.trainer.connectors.logger_connector.result import _ResultCollection
from pytorch_lightning.trainer.connectors.signal_connector import SignalConnector
from pytorch_lightning.trainer.data_loading import TrainerDataLoadingMixin
from pytorch_lightning.trainer.optimizers import TrainerOptimizersMixin
from pytorch_lightning.trainer.states import RunningStage, TrainerFn, TrainerState, TrainerStatus
from pytorch_lightning.trainer.supporters import CombinedLoader
from pytorch_lightning.tuner.lr_finder import _LRFinder
from pytorch_lightning.tuner.tuning import Tuner
from pytorch_lightning.utilities import (
_IPU_AVAILABLE,
_TPU_AVAILABLE,
AMPType,
device_parser,
GradClipAlgorithmType,
parsing,
)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.argparse import (
_defaults_from_env_vars,
add_argparse_args,
from_argparse_args,
parse_argparser,
parse_env_variables,
)
from pytorch_lightning.utilities.auto_restart import _add_capture_metadata_collate
from pytorch_lightning.utilities.cloud_io import get_filesystem
from pytorch_lightning.utilities.data import _auto_add_worker_init_fn, has_len_all_ranks
from pytorch_lightning.utilities.distributed import distributed_available
from pytorch_lightning.utilities.exceptions import ExitGracefullyException, MisconfigurationException
from pytorch_lightning.utilities.imports import _fault_tolerant_training
from pytorch_lightning.utilities.meta import is_on_meta_device, materialize_module
from pytorch_lightning.utilities.model_helpers import is_overridden
from pytorch_lightning.utilities.rank_zero import rank_zero_deprecation, rank_zero_info, rank_zero_warn
from pytorch_lightning.utilities.seed import isolate_rng
from pytorch_lightning.utilities.signature_utils import is_param_in_hook_signature
from pytorch_lightning.utilities.types import (
_EVALUATE_OUTPUT,
_PATH,
_PREDICT_OUTPUT,
EVAL_DATALOADERS,
LRSchedulerConfig,
STEP_OUTPUT,
TRAIN_DATALOADERS,
)
from pytorch_lightning.utilities.warnings import PossibleUserWarning
log = logging.getLogger(__name__)
# warnings to ignore in trainer
warnings.filterwarnings(
"ignore", message="torch.distributed.reduce_op is deprecated, please use torch.distributed.ReduceOp instead"
)
class Trainer(
TrainerCallbackHookMixin, # TODO: Remove in v1.8
TrainerOptimizersMixin, # TODO: Remove in v1.8
TrainerDataLoadingMixin, # TODO: Remove in v1.8
):
@_defaults_from_env_vars
def __init__(
self,
logger: Union[LightningLoggerBase, Iterable[LightningLoggerBase], bool] = True,
checkpoint_callback: Optional[bool] = None,
enable_checkpointing: bool = True,
callbacks: Optional[Union[List[Callback], Callback]] = None,
default_root_dir: Optional[str] = None,
gradient_clip_val: Optional[Union[int, float]] = None,
gradient_clip_algorithm: Optional[str] = None,
process_position: int = 0,
num_nodes: int = 1,
num_processes: Optional[int] = None,
devices: Optional[Union[List[int], str, int]] = None,
gpus: Optional[Union[List[int], str, int]] = None,
auto_select_gpus: bool = False,
tpu_cores: Optional[Union[List[int], str, int]] = None,
ipus: Optional[int] = None,
log_gpu_memory: Optional[str] = None, # TODO: Remove in 1.7
progress_bar_refresh_rate: Optional[int] = None, # TODO: remove in v1.7
enable_progress_bar: bool = True,
overfit_batches: Union[int, float] = 0.0,
track_grad_norm: Union[int, float, str] = -1,
check_val_every_n_epoch: int = 1,
fast_dev_run: Union[int, bool] = False,
accumulate_grad_batches: Optional[Union[int, Dict[int, int]]] = None,
max_epochs: Optional[int] = None,
min_epochs: Optional[int] = None,
max_steps: int = -1,
min_steps: Optional[int] = None,
max_time: Optional[Union[str, timedelta, Dict[str, int]]] = None,
limit_train_batches: Optional[Union[int, float]] = None,
limit_val_batches: Optional[Union[int, float]] = None,
limit_test_batches: Optional[Union[int, float]] = None,
limit_predict_batches: Optional[Union[int, float]] = None,
val_check_interval: Optional[Union[int, float]] = None,
flush_logs_every_n_steps: Optional[int] = None,
log_every_n_steps: int = 50,
accelerator: Optional[Union[str, Accelerator]] = None,
strategy: Optional[Union[str, Strategy]] = None,
sync_batchnorm: bool = False,
precision: Union[int, str] = 32,
enable_model_summary: bool = True,
weights_summary: Optional[str] = "top",
weights_save_path: Optional[str] = None, # TODO: Remove in 1.8
num_sanity_val_steps: int = 2,
resume_from_checkpoint: Optional[Union[Path, str]] = None,
profiler: Optional[Union[BaseProfiler, str]] = None,
benchmark: Optional[bool] = None,
deterministic: bool = False,
reload_dataloaders_every_n_epochs: int = 0,
auto_lr_find: Union[bool, str] = False,
replace_sampler_ddp: bool = True,
detect_anomaly: bool = False,
auto_scale_batch_size: Union[str, bool] = False,
prepare_data_per_node: Optional[bool] = None,
plugins: Optional[Union[PLUGIN_INPUT, List[PLUGIN_INPUT]]] = None,
amp_backend: str = "native",
amp_level: Optional[str] = None,
move_metrics_to_cpu: bool = False,
multiple_trainloader_mode: str = "max_size_cycle",
stochastic_weight_avg: bool = False,
terminate_on_nan: Optional[bool] = None,
) -> None:
r"""
Customize every aspect of training via flags.
Args:
accelerator: Supports passing different accelerator types ("cpu", "gpu", "tpu", "ipu", "auto")
as well as custom accelerator instances.
.. deprecated:: v1.5
Passing training strategies (e.g., 'ddp') to ``accelerator`` has been deprecated in v1.5.0
and will be removed in v1.7.0. Please use the ``strategy`` argument instead.
accumulate_grad_batches: Accumulates grads every k batches or as set up in the dict.
Default: ``None``.
amp_backend: The mixed precision backend to use ("native" or "apex").
Default: ``'native''``.
amp_level: The optimization level to use (O1, O2, etc...). By default it will be set to "O2"
if ``amp_backend`` is set to "apex".
auto_lr_find: If set to True, will make trainer.tune() run a learning rate finder,
trying to optimize initial learning for faster convergence. trainer.tune() method will
set the suggested learning rate in self.lr or self.learning_rate in the LightningModule.
To use a different key set a string instead of True with the key name.
Default: ``False``.
auto_scale_batch_size: If set to True, will `initially` run a batch size
finder trying to find the largest batch size that fits into memory.
The result will be stored in self.batch_size in the LightningModule.
Additionally, can be set to either `power` that estimates the batch size through
a power search or `binsearch` that estimates the batch size through a binary search.
Default: ``False``.
auto_select_gpus: If enabled and ``gpus`` is an integer, pick available
gpus automatically. This is especially useful when
GPUs are configured to be in "exclusive mode", such
that only one process at a time can access them.
Default: ``False``.
benchmark: Sets ``torch.backends.cudnn.benchmark``.
Defaults to ``True`` if :paramref:`~pytorch_lightning.trainer.trainer.Trainer.deterministic`
is ``False``. Overwrite to manually set a different value. Default: ``None``.
callbacks: Add a callback or list of callbacks.
Default: ``None``.
checkpoint_callback: If ``True``, enable checkpointing.
Default: ``None``.
.. deprecated:: v1.5
``checkpoint_callback`` has been deprecated in v1.5 and will be removed in v1.7.
Please consider using ``enable_checkpointing`` instead.
enable_checkpointing: If ``True``, enable checkpointing.
It will configure a default ModelCheckpoint callback if there is no user-defined ModelCheckpoint in
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.callbacks`.
Default: ``True``.
check_val_every_n_epoch: Check val every n train epochs.
Default: ``1``.
default_root_dir: Default path for logs and weights when no logger/ckpt_callback passed.
Default: ``os.getcwd()``.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
detect_anomaly: Enable anomaly detection for the autograd engine.
Default: ``False``.
deterministic: If ``True``, sets whether PyTorch operations must use deterministic algorithms.
Default: ``False``.
devices: Will be mapped to either `gpus`, `tpu_cores`, `num_processes` or `ipus`,
based on the accelerator type.
fast_dev_run: Runs n if set to ``n`` (int) else 1 if set to ``True`` batch(es)
of train, val and test to find any bugs (ie: a sort of unit test).
Default: ``False``.
flush_logs_every_n_steps: How often to flush logs to disk (defaults to every 100 steps).
.. deprecated:: v1.5
``flush_logs_every_n_steps`` has been deprecated in v1.5 and will be removed in v1.7.
Please configure flushing directly in the logger instead.
gpus: Number of GPUs to train on (int) or which GPUs to train on (list or str) applied per node
Default: ``None``.
gradient_clip_val: The value at which to clip gradients. Passing ``gradient_clip_val=None`` disables
gradient clipping. If using Automatic Mixed Precision (AMP), the gradients will be unscaled before.
Default: ``None``.
gradient_clip_algorithm: The gradient clipping algorithm to use. Pass ``gradient_clip_algorithm="value"``
to clip by value, and ``gradient_clip_algorithm="norm"`` to clip by norm. By default it will
be set to ``"norm"``.
limit_train_batches: How much of training dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_val_batches: How much of validation dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_test_batches: How much of test dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
limit_predict_batches: How much of prediction dataset to check (float = fraction, int = num_batches).
Default: ``1.0``.
logger: Logger (or iterable collection of loggers) for experiment tracking. A ``True`` value uses
the default ``TensorBoardLogger``. ``False`` will disable logging. If multiple loggers are
provided and the `save_dir` property of that logger is not set, local files (checkpoints,
profiler traces, etc.) are saved in ``default_root_dir`` rather than in the ``log_dir`` of any
of the individual loggers.
Default: ``True``.
log_gpu_memory: None, 'min_max', 'all'. Might slow performance.
.. deprecated:: v1.5
Deprecated in v1.5.0 and will be removed in v1.7.0
Please use the ``DeviceStatsMonitor`` callback directly instead.
log_every_n_steps: How often to log within steps.
Default: ``50``.
prepare_data_per_node: If True, each LOCAL_RANK=0 will call prepare data.
Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare data
.. deprecated:: v1.5
Deprecated in v1.5.0 and will be removed in v1.7.0
Please set ``prepare_data_per_node`` in ``LightningDataModule`` and/or
``LightningModule`` directly instead.
process_position: Orders the progress bar when running multiple models on same machine.
.. deprecated:: v1.5
``process_position`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``process_position``
directly to the Trainer's ``callbacks`` argument instead.
progress_bar_refresh_rate: How often to refresh progress bar (in steps). Value ``0`` disables progress bar.
Ignored when a custom progress bar is passed to :paramref:`~Trainer.callbacks`. Default: None, means
a suitable value will be chosen based on the environment (terminal, Google COLAB, etc.).
.. deprecated:: v1.5
``progress_bar_refresh_rate`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.progress.TQDMProgressBar` with ``refresh_rate``
directly to the Trainer's ``callbacks`` argument instead. To disable the progress bar,
pass ``enable_progress_bar = False`` to the Trainer.
enable_progress_bar: Whether to enable to progress bar by default.
Default: ``False``.
profiler: To profile individual steps during training and assist in identifying bottlenecks.
Default: ``None``.
overfit_batches: Overfit a fraction of training data (float) or a set number of batches (int).
Default: ``0.0``.
plugins: Plugins allow modification of core behavior like ddp and amp, and enable custom lightning plugins.
Default: ``None``.
precision: Double precision (64), full precision (32), half precision (16) or bfloat16 precision (bf16).
Can be used on CPU, GPU, TPUs or IPUs.
Default: ``32``.
max_epochs: Stop training once this number of epochs is reached. Disabled by default (None).
If both max_epochs and max_steps are not specified, defaults to ``max_epochs = 1000``.
To enable infinite training, set ``max_epochs = -1``.
min_epochs: Force training for at least these many epochs. Disabled by default (None).
max_steps: Stop training after this number of steps. Disabled by default (-1). If ``max_steps = -1``
and ``max_epochs = None``, will default to ``max_epochs = 1000``. To enable infinite training, set
``max_epochs`` to ``-1``.
min_steps: Force training for at least these number of steps. Disabled by default (``None``).
max_time: Stop training after this amount of time has passed. Disabled by default (``None``).
The time duration can be specified in the format DD:HH:MM:SS (days, hours, minutes seconds), as a
:class:`datetime.timedelta`, or a dictionary with keys that will be passed to
:class:`datetime.timedelta`.
num_nodes: Number of GPU nodes for distributed training.
Default: ``1``.
num_processes: Number of processes for distributed training with ``accelerator="cpu"``.
Default: ``1``.
num_sanity_val_steps: Sanity check runs n validation batches before starting the training routine.
Set it to `-1` to run all batches in all validation dataloaders.
Default: ``2``.
reload_dataloaders_every_n_epochs: Set to a non-negative integer to reload dataloaders every n epochs.
Default: ``0``.
replace_sampler_ddp: Explicitly enables or disables sampler replacement. If not specified this
will toggled automatically when DDP is used. By default it will add ``shuffle=True`` for
train sampler and ``shuffle=False`` for val/test sampler. If you want to customize it,
you can set ``replace_sampler_ddp=False`` and add your own distributed sampler.
resume_from_checkpoint: Path/URL of the checkpoint from which training is resumed. If there is
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
training will start from the beginning of the next epoch.
.. deprecated:: v1.5
``resume_from_checkpoint`` is deprecated in v1.5 and will be removed in v2.0.
Please pass the path to ``Trainer.fit(..., ckpt_path=...)`` instead.
strategy: Supports different training strategies with aliases
as well custom training type plugins.
Default: ``None``.
sync_batchnorm: Synchronize batch norm layers between process groups/whole world.
Default: ``False``.
terminate_on_nan: If set to True, will terminate training (by raising a `ValueError`) at the
end of each training batch, if any of the parameters or the loss are NaN or +/-inf.
.. deprecated:: v1.5
Trainer argument ``terminate_on_nan`` was deprecated in v1.5 and will be removed in 1.7.
Please use ``detect_anomaly`` instead.
detect_anomaly: Enable anomaly detection for the autograd engine.
Default: ``False``.
tpu_cores: How many TPU cores to train on (1 or 8) / Single TPU to train on (1)
Default: ``None``.
ipus: How many IPUs to train on.
Default: ``None``.
track_grad_norm: -1 no tracking. Otherwise tracks that p-norm. May be set to 'inf' infinity-norm. If using
Automatic Mixed Precision (AMP), the gradients will be unscaled before logging them.
Default: ``-1``.
val_check_interval: How often to check the validation set. Pass a ``float`` in the range [0.0, 1.0] to check
after a fraction of the training epoch. Pass an ``int`` to check after a fixed number of training
batches.
Default: ``1.0``.
enable_model_summary: Whether to enable model summarization by default.
Default: ``True``.
weights_summary: Prints a summary of the weights when training begins.
.. deprecated:: v1.5
``weights_summary`` has been deprecated in v1.5 and will be removed in v1.7.
To disable the summary, pass ``enable_model_summary = False`` to the Trainer.
To customize the summary, pass :class:`~pytorch_lightning.callbacks.model_summary.ModelSummary`
directly to the Trainer's ``callbacks`` argument.
weights_save_path: Where to save weights if specified. Will override default_root_dir
for checkpoints only. Use this if for whatever reason you need the checkpoints
stored in a different place than the logs written in `default_root_dir`.
Can be remote file paths such as `s3://mybucket/path` or 'hdfs://path/'
Defaults to `default_root_dir`.
.. deprecated:: v1.6
``weights_save_path`` has been deprecated in v1.6 and will be removed in v1.8. Please pass
``dirpath`` directly to the :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
callback.
move_metrics_to_cpu: Whether to force internal logged metrics to be moved to cpu.
This can save some gpu memory, but can make training slower. Use with attention.
Default: ``False``.
multiple_trainloader_mode: How to loop over the datasets when there are multiple train loaders.
In 'max_size_cycle' mode, the trainer ends one epoch when the largest dataset is traversed,
and smaller datasets reload when running out of their data. In 'min_size' mode, all the datasets
reload when reaching the minimum length of datasets.
Default: ``"max_size_cycle"``.
stochastic_weight_avg: Whether to use `Stochastic Weight Averaging (SWA)
<https://pytorch.org/blog/pytorch-1.6-now-includes-stochastic-weight-averaging/>`_.
Default: ``False``.
.. deprecated:: v1.5
``stochastic_weight_avg`` has been deprecated in v1.5 and will be removed in v1.7.
Please pass :class:`~pytorch_lightning.callbacks.stochastic_weight_avg.StochasticWeightAveraging`
directly to the Trainer's ``callbacks`` argument instead.
"""
super().__init__()
Trainer._log_api_event("init")
log.detail(f"{self.__class__.__name__}: Initializing trainer with parameters: {locals()}")
self.state = TrainerState()
gpu_ids, tpu_cores = self._parse_devices(gpus, auto_select_gpus, tpu_cores)
# init connectors
self._data_connector = DataConnector(self, multiple_trainloader_mode)
self._accelerator_connector = AcceleratorConnector(
num_processes=num_processes,
devices=devices,
tpu_cores=tpu_cores,
ipus=ipus,
accelerator=accelerator,
strategy=strategy,
gpus=gpus,
gpu_ids=gpu_ids,
num_nodes=num_nodes,
sync_batchnorm=sync_batchnorm,
benchmark=benchmark,
replace_sampler_ddp=replace_sampler_ddp,
deterministic=deterministic,
precision=precision,
amp_type=amp_backend,
amp_level=amp_level,
plugins=plugins,
)
self._logger_connector = LoggerConnector(self, log_gpu_memory)
self._callback_connector = CallbackConnector(self)
self._checkpoint_connector = CheckpointConnector(self, resume_from_checkpoint)
self._signal_connector = SignalConnector(self)
self.tuner = Tuner(self)
min_steps, max_steps, min_epochs, max_epochs, max_time = _parse_loop_limits(
min_steps, max_steps, min_epochs, max_epochs, max_time
)
fit_loop = FitLoop(min_epochs=min_epochs, max_epochs=max_epochs)
training_epoch_loop = TrainingEpochLoop(min_steps=min_steps, max_steps=max_steps)
fit_loop.connect(epoch_loop=training_epoch_loop)
# default .fit() loop
self.fit_loop = fit_loop
# default .validate() loop
self.validate_loop = EvaluationLoop()
# default .test() loop
self.test_loop = EvaluationLoop()
# default .predict() loop
self.predict_loop = PredictionLoop()
# set when a checkpoint is loaded via `Trainer.{fit,validate,test,predict}`.
self._ckpt_path: Optional[str] = None
# .validate(), predict() and .test() set these when they load a checkpoint. They will be removed in favor of
# the unified read-only `Trainer.ckpt_path` attribute in v1.8
self._validated_ckpt_path: Optional[str] = None # TODO: remove in v1.8
self._tested_ckpt_path: Optional[str] = None # TODO: remove in v1.8
self._predicted_ckpt_path: Optional[str] = None # TODO: remove in v1.8
# todo: remove in v1.7
self._weights_summary: Optional[str] = None
# init callbacks
# Declare attributes to be set in _callback_connector on_trainer_init
self._callback_connector.on_trainer_init(
callbacks,
checkpoint_callback,
enable_checkpointing,
enable_progress_bar,
progress_bar_refresh_rate,
process_position,
default_root_dir,
weights_save_path,
enable_model_summary,
weights_summary,
stochastic_weight_avg,
max_time,
accumulate_grad_batches,
)
# hook
self._call_callback_hooks("on_init_start")
# init data flags
self.check_val_every_n_epoch: int
self._data_connector.on_trainer_init(
check_val_every_n_epoch,
reload_dataloaders_every_n_epochs,
prepare_data_per_node,
)
if terminate_on_nan is not None:
rank_zero_deprecation(
"Trainer argument `terminate_on_nan` was deprecated in v1.5 and will be removed in 1.7."
" Please use `Trainer(detect_anomaly=True)` instead."
)
if not isinstance(terminate_on_nan, bool):
raise TypeError(f"`terminate_on_nan` should be a bool, got {terminate_on_nan}.")
# gradient clipping
if gradient_clip_val is not None and not isinstance(gradient_clip_val, (int, float)):
raise TypeError(f"`gradient_clip_val` should be an int or a float. Got {gradient_clip_val}.")
if gradient_clip_algorithm is not None and not GradClipAlgorithmType.supported_type(
gradient_clip_algorithm.lower()
):
raise MisconfigurationException(
f"`gradient_clip_algorithm` {gradient_clip_algorithm} is invalid. "
f"Allowed algorithms: {GradClipAlgorithmType.supported_types()}."
)
# gradient norm tracking
if track_grad_norm != -1 and not (
(isinstance(track_grad_norm, (int, float)) or track_grad_norm == "inf") and float(track_grad_norm) > 0
):
raise MisconfigurationException(
f"`track_grad_norm` must be a positive number or 'inf' (infinity norm). Got {track_grad_norm}."
)
self._terminate_on_nan = terminate_on_nan
self.gradient_clip_val: Union[int, float] = gradient_clip_val
self.gradient_clip_algorithm = (
GradClipAlgorithmType(gradient_clip_algorithm.lower())
if gradient_clip_algorithm is not None
else gradient_clip_algorithm
)
self.track_grad_norm: float = float(track_grad_norm)
self._detect_anomaly: bool = detect_anomaly
self._setup_on_init(num_sanity_val_steps)
# configure tuner
self.tuner.on_trainer_init(auto_lr_find, auto_scale_batch_size)
# configure profiler
self.__init_profiler(profiler)
# init logger flags
self._loggers: List[LightningLoggerBase]
self._logger_connector.on_trainer_init(logger, flush_logs_every_n_steps, log_every_n_steps, move_metrics_to_cpu)
# init debugging flags
self.val_check_interval: Union[int, float]
self._init_debugging_flags(
limit_train_batches,
limit_val_batches,
limit_test_batches,
limit_predict_batches,
val_check_interval,
overfit_batches,
fast_dev_run,
)
# Callback system
self._call_callback_hooks("on_init_end")
def _init_debugging_flags(
self,
limit_train_batches: Optional[Union[int, float]],
limit_val_batches: Optional[Union[int, float]],
limit_test_batches: Optional[Union[int, float]],
limit_predict_batches: Optional[Union[int, float]],
val_check_interval: Optional[Union[int, float]],
overfit_batches: Union[int, float],
fast_dev_run: Union[int, bool],
) -> None:
if isinstance(fast_dev_run, int) and (fast_dev_run < 0):
raise MisconfigurationException(
f"fast_dev_run={fast_dev_run} is not a valid configuration. It should be >= 0."
)
self.fast_dev_run = fast_dev_run
# set fast_dev_run=True when it is 1, used while logging
if fast_dev_run == 1:
self.fast_dev_run = True
if fast_dev_run:
num_batches = int(fast_dev_run)
limit_train_batches = num_batches
limit_val_batches = num_batches
limit_test_batches = num_batches
limit_predict_batches = num_batches
self.fit_loop.max_steps = num_batches
self.num_sanity_val_steps = 0
self.fit_loop.max_epochs = 1
val_check_interval = 1.0
self.check_val_every_n_epoch = 1
self.loggers = [DummyLogger()] if self.loggers else []
rank_zero_info(
"Running in fast_dev_run mode: will run a full train,"
f" val, test and prediction loop using {num_batches} batch(es)."
)
self.limit_train_batches = _determine_batch_limits(limit_train_batches, "limit_train_batches")
self.limit_val_batches = _determine_batch_limits(limit_val_batches, "limit_val_batches")
self.limit_test_batches = _determine_batch_limits(limit_test_batches, "limit_test_batches")
self.limit_predict_batches = _determine_batch_limits(limit_predict_batches, "limit_predict_batches")
self.val_check_interval = _determine_batch_limits(val_check_interval, "val_check_interval")
self.overfit_batches = _determine_batch_limits(overfit_batches, "overfit_batches")
self._determine_data_use_amount(self.overfit_batches)
def _determine_data_use_amount(self, overfit_batches: float) -> None:
"""Use less data for debugging purposes."""
if overfit_batches > 0:
self.limit_train_batches = overfit_batches
self.limit_val_batches = 0
def _setup_on_init(self, num_sanity_val_steps: int) -> None:
self._log_device_info()
self.should_stop = False
self.state = TrainerState()
self.num_training_batches = float("inf")
self.train_dataloader = None
if num_sanity_val_steps == -1:
self.num_sanity_val_steps = float("inf")
else:
self.num_sanity_val_steps = num_sanity_val_steps
self.num_sanity_val_batches = []
self.num_test_batches = []
self.num_val_batches = []
self.test_dataloaders = None
self.val_dataloaders = None
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
self.num_predict_batches = []
def _call_and_handle_interrupt(self, trainer_fn: Callable, *args: Any, **kwargs: Any) -> Any:
r"""
Error handling, intended to be used only for main trainer function entry points (fit, validate, test, predict)
as all errors should funnel through them
Args:
trainer_fn: one of (fit, validate, test, predict)
*args: positional arguments to be passed to the `trainer_fn`
**kwargs: keyword arguments to be passed to `trainer_fn`
"""
try:
if self.strategy.launcher is not None:
return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
else:
return trainer_fn(*args, **kwargs)
# TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
except KeyboardInterrupt as exception:
rank_zero_warn("Detected KeyboardInterrupt, attempting graceful shutdown...")
# user could press Ctrl+c many times... only shutdown once
if not self.interrupted:
self.state.status = TrainerStatus.INTERRUPTED
self._call_callback_hooks("on_keyboard_interrupt")
self._call_callback_hooks("on_exception", exception)
except BaseException as exception:
self.state.status = TrainerStatus.INTERRUPTED
if distributed_available() and self.world_size > 1:
# try syncing remaining processes, kill otherwise
self.strategy.reconciliate_processes(traceback.format_exc())
self._call_callback_hooks("on_exception", exception)
self._teardown()
# teardown might access the stage so we reset it after
self.state.stage = None
raise
def fit(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
r"""
Runs the full optimization routine.
Args:
model: Model to fit.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
ckpt_path: Path/URL of the checkpoint from which training is resumed. If there is
no checkpoint file at the path, an exception is raised. If resuming from mid-epoch checkpoint,
training will start from the beginning of the next epoch.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
"""
self.strategy.model = model
self._call_and_handle_interrupt(
self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
)
def _fit_impl(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
ckpt_path: Optional[str] = None,
) -> None:
Trainer._log_api_event("fit")
log.detail(f"{self.__class__.__name__}: trainer fit stage")
self.state.fn = TrainerFn.FITTING
self.state.status = TrainerStatus.RUNNING
self.training = True
self._last_train_dl_reload_epoch = float("-inf")
self._last_val_dl_reload_epoch = float("-inf")
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
# If you supply a datamodule you can't supply train_dataloader or val_dataloaders
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.fit(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
# TODO: ckpt_path only in v2.0
ckpt_path = ckpt_path or self.resume_from_checkpoint
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
)
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.training = False
return results
def validate(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
r"""
Perform one evaluation epoch over the validation set.
Args:
model: The model to validate.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying validation samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to validate.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the validation results.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
Returns:
List of dictionaries with metrics logged during the validation phase, e.g., in model- or callback hooks
like :meth:`~pytorch_lightning.core.lightning.LightningModule.validation_step`,
:meth:`~pytorch_lightning.core.lightning.LightningModule.validation_epoch_end`, etc.
The length of the list corresponds to the number of validation dataloaders used.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._validate_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _validate_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("validate")
log.detail(f"{self.__class__.__name__}: trainer validate stage")
self.state.fn = TrainerFn.VALIDATING
self.state.status = TrainerStatus.RUNNING
self.validating = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply val_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.validate(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.validate()` when it hasn't been passed in a previous run"
)
self.validate_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, val_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._validated_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run validate
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.validating = False
return results
def test(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
r"""
Perform one evaluation epoch over the test set.
It's separated from fit to make sure you never run on your test set until you want to.
Args:
model: The model to test.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying test samples.
ckpt_path: Either ``best`` or path to the checkpoint you wish to test.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
verbose: If True, prints the test results.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
Returns:
List of dictionaries with metrics logged during the test phase, e.g., in model- or callback hooks
like :meth:`~pytorch_lightning.core.lightning.LightningModule.test_step`,
:meth:`~pytorch_lightning.core.lightning.LightningModule.test_epoch_end`, etc.
The length of the list corresponds to the number of test dataloaders used.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(self._test_impl, model, dataloaders, ckpt_path, verbose, datamodule)
def _test_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
ckpt_path: Optional[str] = None,
verbose: bool = True,
datamodule: Optional[LightningDataModule] = None,
) -> _EVALUATE_OUTPUT:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("test")
log.detail(f"{self.__class__.__name__}: trainer test stage")
self.state.fn = TrainerFn.TESTING
self.state.status = TrainerStatus.RUNNING
self.testing = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
# If you supply a datamodule you can't supply test_dataloaders
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.test(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.test()` when it hasn't been passed in a previous run"
)
self.test_loop.verbose = verbose
# links data to the trainer
self._data_connector.attach_data(model, test_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._tested_ckpt_path = self.ckpt_path # TODO: remove in v1.8
# run test
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.testing = False
return results
def predict(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
r"""
Run inference on your data.
This will call the model forward function to compute predictions. Useful to perform distributed
and batched predictions. Logging is disabled in the predict hooks.
Args:
model: The model to predict with.
dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them,
or a :class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying prediction samples.
datamodule: The datamodule with a predict_dataloader method that returns one or more dataloaders.
return_predictions: Whether to return predictions.
``True`` by default except when an accelerator that spawns processes is used (not supported).
ckpt_path: Either ``best`` or path to the checkpoint you wish to predict.
If ``None`` and the model instance was passed, use the current weights.
Otherwise, the best model checkpoint from the previous ``trainer.fit`` call will be loaded
if a checkpoint callback is configured.
Returns:
Returns a list of dictionaries, one for each provided dataloader containing their respective predictions.
"""
self.strategy.model = model or self.lightning_module
return self._call_and_handle_interrupt(
self._predict_impl, model, dataloaders, datamodule, return_predictions, ckpt_path
)
def _predict_impl(
self,
model: Optional["pl.LightningModule"] = None,
dataloaders: Optional[Union[EVAL_DATALOADERS, LightningDataModule]] = None,
datamodule: Optional[LightningDataModule] = None,
return_predictions: Optional[bool] = None,
ckpt_path: Optional[str] = None,
) -> Optional[_PREDICT_OUTPUT]:
# --------------------
# SETUP HOOK
# --------------------
Trainer._log_api_event("predict")
log.detail(f"{self.__class__.__name__}: trainer predict stage")
self.state.fn = TrainerFn.PREDICTING
self.state.status = TrainerStatus.RUNNING
self.predicting = True
self.predict_loop.return_predictions = return_predictions
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(dataloaders, LightningDataModule):
datamodule = dataloaders
dataloaders = None
if dataloaders is not None and datamodule:
raise MisconfigurationException("You cannot pass both `trainer.predict(dataloaders=..., datamodule=...)`")
model_provided = model is not None
model = model or self.lightning_module
if model is None:
raise MisconfigurationException(
"`model` must be provided to `trainer.predict()` when it hasn't been passed in a previous run"
)
# links data to the trainer
self._data_connector.attach_data(model, predict_dataloaders=dataloaders, datamodule=datamodule)
self._ckpt_path = self.__set_ckpt_path(
ckpt_path, model_provided=model_provided, model_connected=self.lightning_module is not None
)
self._predicted_ckpt_path = self.ckpt_path # TODO: remove in v1.8
results = self._run(model, ckpt_path=self.ckpt_path)
assert self.state.stopped
self.predicting = False
return results
def tune(
self,
model: "pl.LightningModule",
train_dataloaders: Optional[Union[TRAIN_DATALOADERS, LightningDataModule]] = None,
val_dataloaders: Optional[EVAL_DATALOADERS] = None,
datamodule: Optional[LightningDataModule] = None,
scale_batch_size_kwargs: Optional[Dict[str, Any]] = None,
lr_find_kwargs: Optional[Dict[str, Any]] = None,
) -> Dict[str, Optional[Union[int, _LRFinder]]]:
r"""
Runs routines to tune hyperparameters before training.
Args:
model: Model to tune.
train_dataloaders: A collection of :class:`torch.utils.data.DataLoader` or a
:class:`~pytorch_lightning.core.datamodule.LightningDataModule` specifying training samples.
In the case of multiple dataloaders, please see this :ref:`section <multiple-dataloaders>`.
val_dataloaders: A :class:`torch.utils.data.DataLoader` or a sequence of them specifying validation samples.
datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
scale_batch_size_kwargs: Arguments for :func:`~pytorch_lightning.tuner.batch_size_scaling.scale_batch_size`
lr_find_kwargs: Arguments for :func:`~pytorch_lightning.tuner.lr_finder.lr_find`
"""
Trainer._log_api_event("tune")
self.state.fn = TrainerFn.TUNING
self.state.status = TrainerStatus.RUNNING
self.tuning = True
# if a datamodule comes in as the second arg, then fix it for the user
if isinstance(train_dataloaders, LightningDataModule):
datamodule = train_dataloaders
train_dataloaders = None
# If you supply a datamodule you can't supply train_dataloader or val_dataloaders
if (train_dataloaders is not None or val_dataloaders is not None) and datamodule is not None:
raise MisconfigurationException(
"You cannot pass `train_dataloader` or `val_dataloaders` to `trainer.tune(datamodule=...)`"
)
# links data to the trainer
self._data_connector.attach_data(
model, train_dataloaders=train_dataloaders, val_dataloaders=val_dataloaders, datamodule=datamodule
)
with isolate_rng():
result = self.tuner._tune(
model, scale_batch_size_kwargs=scale_batch_size_kwargs, lr_find_kwargs=lr_find_kwargs
)
assert self.state.stopped
self.tuning = False
return result
def _restore_modules_and_callbacks(self, checkpoint_path: Optional[_PATH] = None) -> None:
# restore modules after setup
self._checkpoint_connector.resume_start(checkpoint_path)
self._checkpoint_connector.restore_model()
self._checkpoint_connector.restore_datamodule()
if self.state.fn == TrainerFn.FITTING:
# restore callback states
self._checkpoint_connector.restore_callbacks()
def _run(
self, model: "pl.LightningModule", ckpt_path: Optional[str] = None
) -> Optional[Union[_EVALUATE_OUTPUT, _PREDICT_OUTPUT]]:
# clean hparams
if hasattr(model, "hparams"):
parsing.clean_namespace(model.hparams)
# attach model to the training type plugin
self.strategy.connect(model)
self._callback_connector._attach_model_callbacks()
self._callback_connector._attach_model_logging_functions()
verify_loop_configurations(self)
# hook
log.detail(f"{self.__class__.__name__}: preparing data")
self._data_connector.prepare_data()
# ----------------------------
# SET UP TRAINING
# ----------------------------
self._call_callback_hooks("on_before_accelerator_backend_setup")
log.detail(f"{self.__class__.__name__}: setting up strategy environment")
self.strategy.setup_environment()
self.__setup_profiler()
self._call_setup_hook() # allow user to setup lightning_module in accelerator environment
# check if we should delay restoring checkpoint till later
if not self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
log.detail(f"{self.__class__.__name__}: configuring sharded model")
self._call_configure_sharded_model() # allow user to setup in model sharded environment
# ----------------------------
# INSPECT THE CORE LOOPS
# ----------------------------
fr"""
Lightning internal flow looks like this:
{Trainer.fit} or {Trainer.test} or {Trainer.predict} ||
| ||
spawn processes ||
{self.strategy.setup_environment} ||
| ||
setup accelerator ||
and strategy || LIGHTNING
| ||
{self._run_stage} || FLOW
| ||
{self._run_train} || DIRECTION
or {self._run_evaluate} ||
or {self._run_predict} ||
| ||
results \/
This is used to guide readers to the core loops: train, test, predict.
{self._run_predict} is the simplest to understand, use `Go to Definition` to read it :)
"""
# ----------------------------
# TRAIN
# ----------------------------
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# strategy will configure model and move it to the device
self.strategy.setup(self)
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_start")
self._call_lightning_module_hook("on_fit_start")
self._log_hyperparams()
if self.strategy.restore_checkpoint_after_setup:
log.detail(f"{self.__class__.__name__}: restoring module and callbacks from checkpoint path: {ckpt_path}")
self._restore_modules_and_callbacks(ckpt_path)
# restore optimizers, etc.
log.detail(f"{self.__class__.__name__}: restoring training state")
self._checkpoint_connector.restore_training_state()
self._checkpoint_connector.resume_end()
results = self._run_stage()
log.detail(f"{self.__class__.__name__}: trainer tearing down")
self._teardown()
# ----------------------------
# POST-Training CLEAN UP
# ----------------------------
# hook
if self.state.fn == TrainerFn.FITTING:
self._call_callback_hooks("on_fit_end")
self._call_lightning_module_hook("on_fit_end")
log.detail(f"{self.__class__.__name__}: calling teardown hooks")
self._call_teardown_hook()
self.state.status = TrainerStatus.FINISHED
self.state.stage = None
return results
def _log_hyperparams(self) -> None:
if not self.loggers:
return
# log hyper-parameters
hparams_initial = None
# save exp to get started (this is where the first experiment logs are written)
datamodule_log_hyperparams = self.datamodule._log_hyperparams if self.datamodule is not None else False
if self.lightning_module._log_hyperparams and datamodule_log_hyperparams:
datamodule_hparams = self.datamodule.hparams_initial
lightning_hparams = self.lightning_module.hparams_initial
inconsistent_keys = []
for key in lightning_hparams.keys() & datamodule_hparams.keys():
lm_val, dm_val = lightning_hparams[key], datamodule_hparams[key]
if type(lm_val) != type(dm_val):
inconsistent_keys.append(key)
elif isinstance(lm_val, torch.Tensor) and id(lm_val) != id(dm_val):
inconsistent_keys.append(key)
elif lm_val != dm_val:
inconsistent_keys.append(key)
if inconsistent_keys:
raise MisconfigurationException(
f"Error while merging hparams: the keys {inconsistent_keys} are present "
"in both the LightningModule's and LightningDataModule's hparams "
"but have different values."
)
hparams_initial = {**lightning_hparams, **datamodule_hparams}
elif self.lightning_module._log_hyperparams:
hparams_initial = self.lightning_module.hparams_initial
elif datamodule_log_hyperparams:
hparams_initial = self.datamodule.hparams_initial
for logger in self.loggers:
if hparams_initial is not None:
logger.log_hyperparams(hparams_initial)
logger.log_graph(self.lightning_module)
logger.save()
def _teardown(self):
"""This is the Trainer's internal teardown, unrelated to the `teardown` hooks in LightningModule and
Callback; those are handled by :meth:`_call_teardown_hook`."""
self.strategy.post_dispatch(self)
self.strategy.teardown()
loop = self._active_loop
# loop should never be `None` here but it can because we don't know the trainer stage with `ddp_spawn`
if loop is not None:
loop.teardown()
self._logger_connector.teardown()
self._signal_connector.teardown()
def run_stage(self) -> None:
rank_zero_deprecation(
"`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.{fit,validate,test,predict}` instead."
)
return self._run_stage()
def _run_stage(self):
self.strategy.barrier("run-stage")
self.strategy.dispatch(self)
if self.evaluating:
return self._run_evaluate()
if self.predicting:
return self._run_predict()
return self._run_train()
def _pre_training_routine(self):
# wait for all to join if on distributed
self.strategy.barrier("setup_training")
# register signals
self._signal_connector.register_signal_handlers()
# --------------------------
# Pre-train
# --------------------------
self._call_callback_hooks("on_pretrain_routine_start")
self._call_lightning_module_hook("on_pretrain_routine_start")
self._call_callback_hooks("on_pretrain_routine_end")
self._call_lightning_module_hook("on_pretrain_routine_end")
def _run_train(self) -> None:
self._pre_training_routine()
with isolate_rng():
self._run_sanity_check()
# enable train mode
self.model.train()
torch.set_grad_enabled(True)
self.fit_loop.trainer = self
with torch.autograd.set_detect_anomaly(self._detect_anomaly):
self.fit_loop.run()
def _run_evaluate(self) -> _EVALUATE_OUTPUT:
assert self.evaluating
# reload dataloaders
self._evaluation_loop._reload_evaluation_dataloaders()
# reset trainer on this loop and all child loops in case user connected a custom loop
self._evaluation_loop.trainer = self
with self.profiler.profile(f"run_{self.state.stage}_evaluation"), torch.no_grad():
eval_loop_results = self._evaluation_loop.run()
# remove the tensors from the eval results
for result in eval_loop_results:
if isinstance(result, dict):
for k, v in result.items():
if isinstance(v, torch.Tensor):
result[k] = v.cpu().item()
return eval_loop_results
def _run_predict(self) -> Optional[_PREDICT_OUTPUT]:
self.reset_predict_dataloader(self.lightning_module)
# reset trainer on this loop and all child loops in case user connected a custom loop
self.predict_loop.trainer = self
with torch.no_grad():
return self.predict_loop.run()
def _run_sanity_check(self) -> None:
val_loop = self.fit_loop.epoch_loop.val_loop
should_sanity_check = (
self.enable_validation
and self.num_sanity_val_steps > 0
# do not sanity check if restarting because it would mess up the loaded state
and not val_loop.restarting
)
# run tiny validation (if validation defined)
# to make sure program won't crash during val
if should_sanity_check:
stage = self.state.stage
self.sanity_checking = True
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
self._call_callback_hooks("on_sanity_check_start")
# reload dataloaders
val_loop._reload_evaluation_dataloaders()
self.num_sanity_val_batches = [
min(self.num_sanity_val_steps, val_batches) for val_batches in self.num_val_batches
]
# run eval step
with torch.no_grad():
val_loop.run()
self._call_callback_hooks("on_sanity_check_end")
# reset logger connector
self._logger_connector.reset_results()
self._logger_connector.reset_metrics()
# reset the progress tracking state after sanity checking. we don't need to set the state before
# because sanity check only runs when we are not restarting
_reset_progress(val_loop)
# restore the previous stage when the sanity check if finished
self.state.stage = stage
def __set_ckpt_path(self, ckpt_path: Optional[str], model_provided: bool, model_connected: bool) -> Optional[str]:
# fault-tolerance takes precedence
from pytorch_lightning.callbacks.fault_tolerance import _FaultToleranceCheckpoint
ft_checkpoints = [cb for cb in self.callbacks if isinstance(cb, _FaultToleranceCheckpoint)]
if ft_checkpoints:
ft_ckpt_path = ft_checkpoints[0].ckpt_path
fs = get_filesystem(ft_ckpt_path)
if fs.exists(ft_ckpt_path):
return ft_ckpt_path
if model_provided and ckpt_path is None:
# use passed model to function without loading weights
return
fn = self.state.fn.value
if model_connected and ckpt_path is None:
rank_zero_warn(
f"`.{fn}(ckpt_path=None)` was called without a model."
" The best model of the previous `fit` call will be used."
f" You can pass `{fn}(ckpt_path='best')` to use and best model"
" checkpoint and avoid this warning or"
" `ckpt_path=trainer.checkpoint_callback.last_model_path` to use the last model."
)
ckpt_path = "best"
if ckpt_path == "best":
if len(self.checkpoint_callbacks) > 1:
rank_zero_warn(
f'`.{fn}(ckpt_path="best")` is called with Trainer configured with multiple `ModelCheckpoint`'
" callbacks. It will use the best checkpoint path from first checkpoint callback."
)
if not self.checkpoint_callback:
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured.'
)
if not self.checkpoint_callback.best_model_path:
if self.fast_dev_run:
raise MisconfigurationException(
f'You cannot execute `.{fn}(ckpt_path="best")` with `fast_dev_run=True`.'
f" Please pass an exact checkpoint path to `.{fn}(ckpt_path=...)`"
)
raise MisconfigurationException(
f'`.{fn}(ckpt_path="best")` is set but `ModelCheckpoint` is not configured to save the best model.'
)
# load best weights
ckpt_path = self.checkpoint_callback.best_model_path
if not ckpt_path:
raise MisconfigurationException(
f"`.{fn}()` found no path for the best weights: {ckpt_path!r}. Please"
f" specify a path for a checkpoint `.{fn}(ckpt_path=PATH)`"
)
return ckpt_path
def _call_setup_hook(self) -> None:
fn = self.state.fn._setup_fn
self.strategy.barrier("pre_setup")
if self.datamodule is not None:
self.datamodule.setup(stage=fn)
self._call_callback_hooks("setup", stage=fn)
self._call_lightning_module_hook("setup", stage=fn)
self.strategy.barrier("post_setup")
def _call_configure_sharded_model(self) -> None:
with self.strategy.model_sharded_context():
self._handle_meta_model()
self._call_lightning_module_hook("configure_sharded_model")
self._call_callback_hooks("on_configure_sharded_model")
def _handle_meta_model(self) -> None:
if not is_on_meta_device(self.lightning_module):
return
if isinstance(self.strategy, DDPSpawnStrategy):
raise MisconfigurationException("LightningModule on meta device isn't supported with spawn.")
materialize_module(self.lightning_module)
# the trainer reference is lost during materialization
self.lightning_module.trainer = proxy(self)
def _call_teardown_hook(self) -> None:
fn = self.state.fn._setup_fn
if self.datamodule is not None:
self.datamodule.teardown(stage=fn)
self._call_callback_hooks("teardown", stage=fn)
self._call_lightning_module_hook("teardown", stage=fn)
self.lightning_module._current_fx_name = None
# these could have become stale if metrics are defined in `setup`
self.lightning_module._metric_attributes = None
# todo: TPU 8 cores hangs in flush with TensorBoard. Might do for all loggers.
# It might be related to xla tensors blocked when moving the cpu kill loggers.
for logger in self.loggers:
logger.finalize("success")
# summarize profile results
self.profiler.describe()
def call_hook(
self, hook_name: str, *args: Any, pl_module: Optional["pl.LightningModule"] = None, **kwargs: Any
) -> Any:
r"""
.. deprecated:: v1.6
The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.
"""
rank_zero_deprecation("The Trainer's `call_hook` method was deprecated in v1.6 and will be removed in v1.8.")
pl_module = self.lightning_module or pl_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
# always profile hooks
with self.profiler.profile(hook_name):
# first call trainer hook
callback_fx = getattr(self, hook_name, None)
if callable(callback_fx):
callback_fx(*args, **kwargs)
# next call hook in lightningModule
output = None
model_fx = getattr(pl_module, hook_name, None)
if callable(model_fx):
output = model_fx(*args, **kwargs)
# call the strategy hook
if hook_name not in ("setup", "teardown", "on_train_start") and hasattr(self.strategy, hook_name):
strategy_hook = getattr(self.strategy, hook_name)
strategy_output = strategy_hook(*args, **kwargs)
output = strategy_output if output is None else output
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
def _call_lightning_module_hook(
self,
hook_name: str,
*args: Any,
pl_module: Optional["pl.LightningModule"] = None,
**kwargs: Any,
) -> Any:
pl_module = pl_module or self.lightning_module
if pl_module is None:
raise TypeError("No Lightning Module is available to call hooks on")
fn = getattr(pl_module, hook_name)
if not callable(fn):
return
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
with self.profiler.profile(f"[LightningModule]{pl_module.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
def _call_callback_hooks(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> None:
log.detail(f"{self.__class__.__name__}: calling callback hook: {hook_name}")
# TODO: remove if block in v1.8
if hook_name in ("on_init_start", "on_init_end"):
# these `Callback` hooks are the only ones that do not take a lightning module.
# we also don't profile bc profiler hasn't been set yet
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
fn(self, *args, **kwargs)
return
pl_module = self.lightning_module
if pl_module:
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
# TODO: remove if block in v1.7
if hook_name == "on_train_batch_start":
with self.profiler.profile(hook_name):
self._on_train_batch_start(*args, **kwargs)
elif hook_name == "on_train_batch_end":
with self.profiler.profile(hook_name):
self._on_train_batch_end(*args, **kwargs)
else:
for callback in self.callbacks:
fn = getattr(callback, hook_name)
if callable(fn):
with self.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
fn(self, self.lightning_module, *args, **kwargs)
if pl_module:
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
# TODO: Delete this in v1.7 (deprecations: #9816 and #11148)
def _on_train_batch_start(self, batch, batch_idx, dataloader_idx=0):
r"""Called when the training batch begins. This function is needed because of two different deprecations affecting
the original function in TrainerCallbackHookMixin: #9816 and #11148.
"""
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_start, "dataloader_idx", explicit=True):
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx, 0)
else:
callback.on_train_batch_start(self, self.lightning_module, batch, batch_idx)
# TODO: Delete this in v1.7 (deprecations: #9816 and #11148)
def _on_train_batch_end(self, outputs: STEP_OUTPUT, batch, batch_idx, dataloader_idx=0):
r"""Called when the training batch ends. This function is needed because of two different deprecations affecting
the original function in TrainerCallbackHookMixin: #9816 and #11148.
"""
for callback in self.callbacks:
if is_param_in_hook_signature(callback.on_train_batch_end, "dataloader_idx", explicit=True):
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx, 0)
else:
callback.on_train_batch_end(self, self.lightning_module, outputs, batch, batch_idx)
def _call_callbacks_state_dict(self) -> Dict[str, dict]:
"""Called when saving a model checkpoint, calls and returns every callback's `state_dict`, keyed by
`Callback.state_key`."""
callback_state_dicts = {}
for callback in self.callbacks:
state_dict = callback.state_dict()
if state_dict:
callback_state_dicts[callback.state_key] = state_dict
return callback_state_dicts
def _call_callbacks_on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Called when saving a model checkpoint, calls every callback's `on_save_checkpoint` hook.
Will be removed in v1.8: If state is returned, we insert the callback state into
``checkpoint["callbacks"][Callback.state_key]``. It overrides ``state_dict`` if already present.
"""
for callback in self.callbacks:
# TODO: Add profiling for on_save_checkpoint hook
state = callback.on_save_checkpoint(self, self.lightning_module, checkpoint)
if state:
# TODO: Add deprecation warning if state is returned (see reference PR #11887)
checkpoint["callbacks"][callback.state_key] = state
def _call_callbacks_on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint.
Calls every callback's `on_load_checkpoint` hook. We have a dedicated function for this rather than using
`_call_callback_hooks` because we have special logic for getting callback_states.
"""
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
is_legacy_ckpt = Version(checkpoint["pytorch-lightning_version"]) < Version("1.5.0dev")
current_callbacks_keys = {cb._legacy_state_key if is_legacy_ckpt else cb.state_key for cb in self.callbacks}
difference = callback_states.keys() - current_callbacks_keys
if difference:
rank_zero_warn(
"Be aware that when using `ckpt_path`,"
" callbacks used to create the checkpoint need to be provided during `Trainer` instantiation."
f" Please add the following callbacks: {list(difference)}.",
)
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
# TODO: Add profiling for on_load_checkpoint hook
callback.on_load_checkpoint(self, self.lightning_module, state)
def _call_callbacks_load_state_dict(self, checkpoint: Dict[str, Any]) -> None:
"""Called when loading a model checkpoint, calls every callback's `load_state_dict`."""
callback_states: Dict[Union[Type, str], Dict] = checkpoint.get("callbacks")
if callback_states is None:
return
for callback in self.callbacks:
state = callback_states.get(callback.state_key, callback_states.get(callback._legacy_state_key))
if state:
state = deepcopy(state)
callback.load_state_dict(state)
def _call_strategy_hook(
self,
hook_name: str,
*args: Any,
**kwargs: Any,
) -> Any:
pl_module = self.lightning_module
prev_fx_name = pl_module._current_fx_name
pl_module._current_fx_name = hook_name
fn = getattr(self.strategy, hook_name)
if not callable(fn):
return
with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
output = fn(*args, **kwargs)
# restore current_fx when nested context
pl_module._current_fx_name = prev_fx_name
return output
@staticmethod
def _parse_devices(
gpus: Optional[Union[List[int], str, int]],
auto_select_gpus: bool,
tpu_cores: Optional[Union[List[int], str, int]],
) -> Tuple[Optional[List[int]], Optional[Union[List[int], int]]]:
return device_parser._parse_devices(gpus, auto_select_gpus, tpu_cores)
@staticmethod
def _log_api_event(event: str) -> None:
torch._C._log_api_usage_once("lightning.trainer." + event)
def __init_profiler(self, profiler: Optional[Union[BaseProfiler, str]]) -> None:
if isinstance(profiler, str):
PROFILERS = {
"simple": SimpleProfiler,
"advanced": AdvancedProfiler,
"pytorch": PyTorchProfiler,
"xla": XLAProfiler,
}
profiler = profiler.lower()
if profiler not in PROFILERS:
raise MisconfigurationException(
"When passing string value for the `profiler` parameter of `Trainer`,"
f" it can only be one of {list(PROFILERS.keys())}"
)
profiler_class = PROFILERS[profiler]
profiler = profiler_class()
self.profiler: BaseProfiler = profiler or PassThroughProfiler()
def __setup_profiler(self) -> None:
local_rank = self.local_rank if self.world_size > 1 else None
self.profiler._lightning_module = proxy(self.lightning_module)
self.profiler.setup(stage=self.state.fn._setup_fn, local_rank=local_rank, log_dir=self.log_dir)
def _log_device_info(self) -> None:
rank_zero_info(
f"GPU available: {torch.cuda.is_available()}, used: {isinstance(self.accelerator, GPUAccelerator)}"
)
num_tpu_cores = (
self.tpu_cores if self.tpu_cores is not None and isinstance(self.accelerator, TPUAccelerator) else 0
)
rank_zero_info(f"TPU available: {_TPU_AVAILABLE}, using: {num_tpu_cores} TPU cores")
num_ipus = self.ipus if self.ipus is not None else 0
rank_zero_info(f"IPU available: {_IPU_AVAILABLE}, using: {num_ipus} IPUs")
if torch.cuda.is_available() and not isinstance(self.accelerator, GPUAccelerator):
rank_zero_warn(
"GPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='gpu', devices={GPUAccelerator.auto_device_count()})`.",
category=PossibleUserWarning,
)
if _TPU_AVAILABLE and not isinstance(self.accelerator, TPUAccelerator):
rank_zero_warn(
"TPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='tpu', devices={TPUAccelerator.auto_device_count()})`."
)
if _IPU_AVAILABLE and not isinstance(self.accelerator, IPUAccelerator):
rank_zero_warn(
"IPU available but not used. Set `accelerator` and `devices` using"
f" `Trainer(accelerator='ipu', devices={IPUAccelerator.auto_device_count()})`."
)
"""
Data loading methods
"""
def reset_train_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the train dataloader and initialises required variables (number of batches, when to validate,
etc.).
Args:
model: The ``LightningModule`` if calling this outside of the trainer scope.
"""
source = self._data_connector._train_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("training_step", pl_module)
enable_training = self.limit_train_batches > 0
if not (source.is_defined() and has_step and enable_training):
return
self.train_dataloader = self._data_connector._request_dataloader(RunningStage.TRAINING, model=model)
if self.overfit_batches > 0:
self.train_dataloader = self._data_connector._resolve_overfit_batches(self.train_dataloader)
# automatically add samplers
self.train_dataloader = apply_to_collection(
self.train_dataloader,
(DataLoader, CombinedLoader),
self._data_connector._prepare_dataloader,
mode=RunningStage.TRAINING,
)
loaders = (
self.train_dataloader.loaders
if isinstance(self.train_dataloader, CombinedLoader)
else self.train_dataloader
)
# check the workers recursively
apply_to_collection(loaders, DataLoader, self._data_connector._worker_check, "train_dataloader")
# add worker_init_fn for correct seeding in worker processes
apply_to_collection(loaders, DataLoader, _auto_add_worker_init_fn, rank=self.global_rank)
# add collate_fn to collect metadata for fault tolerant training
if _fault_tolerant_training():
apply_to_collection(loaders, DataLoader, _add_capture_metadata_collate)
# wrap the sequence of train loaders to a CombinedLoader object for computing the num_training_batches
if not isinstance(self.train_dataloader, CombinedLoader):
self.train_dataloader = CombinedLoader(loaders, self._data_connector.multiple_trainloader_mode)
module = model or self.lightning_module or self.datamodule
self.num_training_batches = (
len(self.train_dataloader)
if has_len_all_ranks(self.train_dataloader, self.strategy, module)
else float("inf")
)
if isinstance(self.limit_train_batches, int):
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float("inf"):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
"When using an IterableDataset for `limit_train_batches`,"
" `Trainer(limit_train_batches)` must be `1.0` or an int. An int k specifies"
" `num_training_batches` to use."
)
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f"`val_check_interval` ({self.val_check_interval}) must be less than or equal "
f"to the number of the training batches ({self.num_training_batches}). "
"If you want to disable validation set `limit_val_batches` to 0.0 instead."
)
else:
if not has_len_all_ranks(self.train_dataloader, self.strategy, module):
if self.val_check_interval == 1.0:
self.val_check_batch = float("inf")
else:
raise MisconfigurationException(
"When using an IterableDataset for `train_dataloader`,"
" `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies"
" checking validation every k training batches."
)
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
if self.loggers and self.num_training_batches < self.log_every_n_steps:
rank_zero_warn(
f"The number of training samples ({self.num_training_batches}) is smaller than the logging interval"
f" Trainer(log_every_n_steps={self.log_every_n_steps}). Set a lower value for log_every_n_steps if"
" you want to see logs for the training epoch.",
category=PossibleUserWarning,
)
# store epoch of dataloader reset for reload_dataloaders_every_n_epochs
self._last_train_dl_reload_epoch = self.current_epoch
def reset_val_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the validation dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._val_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("validation_step", pl_module)
enable_validation = self.limit_val_batches > 0
if source.is_defined() and has_step and enable_validation:
self.num_val_batches, self.val_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.VALIDATING, model=pl_module
)
# store epoch of dataloader reset for reload_dataloaders_every_n_epochs
self._last_val_dl_reload_epoch = self.current_epoch
def reset_test_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the test dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._test_dataloader_source
pl_module = self.lightning_module or model
has_step = is_overridden("test_step", pl_module)
enable_testing = self.limit_test_batches > 0
if source.is_defined() and has_step and enable_testing:
self.num_test_batches, self.test_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.TESTING, model=pl_module
)
def reset_predict_dataloader(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets the predict dataloader and determines the number of batches.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
source = self._data_connector._predict_dataloader_source
pl_module = self.lightning_module or model
enable_prediction = self.limit_predict_batches > 0
if source.is_defined() and enable_prediction:
self.num_predict_batches, self.predict_dataloaders = self._data_connector._reset_eval_dataloader(
RunningStage.PREDICTING, model=pl_module
)
def reset_train_val_dataloaders(self, model: Optional["pl.LightningModule"] = None) -> None:
"""Resets train and val dataloaders if none are attached to the trainer.
The val dataloader must be initialized before training loop starts, as the training loop
inspects the val dataloader to determine whether to run the evaluation loop.
Args:
model: The ``LightningModule`` if called outside of the trainer scope.
"""
if self.train_dataloader is None:
self.reset_train_dataloader(model=model)
if self.val_dataloaders is None:
self.reset_val_dataloader(model=model)
"""
Accelerator properties
"""
@property
def accelerator(self) -> Accelerator:
return self.strategy.accelerator
@property
def strategy(self) -> Strategy:
return self._accelerator_connector.strategy
@property
def training_type_plugin(self) -> Strategy:
rank_zero_deprecation(
"`Trainer.training_type_plugin` is deprecated in v1.6 and will be removed in v1.8. Use"
" `Trainer.strategy` instead."
)
return self.strategy
@property
def precision_plugin(self) -> PrecisionPlugin:
return self.strategy.precision_plugin
@property
def global_rank(self) -> int:
return self.strategy.global_rank
@property
def local_rank(self) -> int:
# some training types define a local rank
return getattr(self.strategy, "local_rank", 0)
@property
def node_rank(self) -> int:
# some training types define a node rank
return getattr(self.strategy, "node_rank", 0)
@property
def world_size(self) -> int:
# some training types define a world size
return getattr(self.strategy, "world_size", 1)
@property
def should_rank_save_checkpoint(self) -> bool:
rank_zero_deprecation(
"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8.", stacklevel=5
)
strategy = self.strategy
return (
isinstance(strategy, pl.strategies.TPUSpawnStrategy) and strategy.local_rank == 0 or strategy.is_global_zero
)
@property
def num_nodes(self) -> int:
return getattr(self.strategy, "num_nodes", 1)
@property
def device_ids(self) -> List[int]:
"""List of device indexes per node."""
devices = getattr(self.strategy, "parallel_devices", [self.strategy.root_device])
device_ids = []
for idx, device in enumerate(devices):
if isinstance(device, torch.device):
device_ids.append(device.index or idx)
elif isinstance(device, int):
device_ids.append(device)
return device_ids
@property
def num_devices(self) -> int:
"""Number of devices the trainer uses per node."""
return len(self.device_ids)
@property
def num_processes(self) -> int:
return self._accelerator_connector.num_processes
@property
def root_gpu(self) -> Optional[int]:
rank_zero_deprecation(
"`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
"Please use `Trainer.strategy.root_device.index` instead."
)
return self.strategy.root_device.index if isinstance(self.accelerator, GPUAccelerator) else None
@property
def tpu_cores(self) -> int:
return self._accelerator_connector.tpu_cores
@property
def ipus(self) -> int:
return self._accelerator_connector.num_ipus
@property
def num_gpus(self) -> int:
rank_zero_deprecation(
"`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` instead."
)
return self.num_devices if isinstance(self.accelerator, GPUAccelerator) else 0
@property
def devices(self) -> int:
rank_zero_deprecation(
"`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
)
return self.num_devices
@property
def data_parallel_device_ids(self) -> Optional[List[int]]:
return (
self._accelerator_connector.parallel_device_ids if self._accelerator_connector.parallel_device_ids else None
)
@property
def lightning_module(self) -> "pl.LightningModule":
# TODO: this is actually an optional return
return self.strategy.lightning_module
@property
def optimizers(self) -> List[Optimizer]:
return self.strategy.optimizers
@optimizers.setter
def optimizers(self, new_optims: Optional[List[Optimizer]]) -> None:
self.strategy.optimizers = new_optims
@property
def lightning_optimizers(self) -> Dict[int, LightningOptimizer]:
rank_zero_deprecation(
"`Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8", stacklevel=5
)
return self.strategy._lightning_optimizers
@property
def lr_scheduler_configs(self) -> List[LRSchedulerConfig]:
return self.strategy.lr_scheduler_configs
@property
def lr_schedulers(self) -> List[Dict[str, Any]]:
rank_zero_deprecation(
"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8."
" You can use `trainer.lr_scheduler_configs` instead which contains dataclasses instead of dictionaries.",
stacklevel=5,
)
from dataclasses import asdict
return [asdict(config) for config in self.strategy.lr_scheduler_configs]
@property
def optimizer_frequencies(self) -> List[int]:
return self.strategy.optimizer_frequencies
@optimizer_frequencies.setter
def optimizer_frequencies(self, new_freqs: List[int]) -> None:
self.strategy.optimizer_frequencies = new_freqs
@property
def amp_backend(self) -> Optional[AMPType]:
if isinstance(self.precision_plugin, ApexMixedPrecisionPlugin):
return AMPType.APEX
if isinstance(self.precision_plugin, NativeMixedPrecisionPlugin):
return AMPType.NATIVE
return None
@property
def precision(self) -> Union[str, int]:
return self.strategy.precision_plugin.precision
@property
def scaler(self) -> Optional[Any]:
return getattr(self.precision_plugin, "scaler", None)
@property
def gpus(self) -> Optional[Union[List[int], str, int]]:
return self._accelerator_connector.gpus
@property
def model(self) -> torch.nn.Module:
"""The LightningModule, but possibly wrapped into DataParallel or DistributedDataParallel.
To access the pure LightningModule, use
:meth:`~pytorch_lightning.trainer.trainer.Trainer.lightning_module` instead.
"""
return self.strategy.model
@model.setter
def model(self, model: torch.nn.Module) -> None:
"""Setter for the model, pass-through to accelerator and plugin where the model reference is stored. Used
by the Tuner to reset the state of Trainer and Accelerator.
Args:
model: The LightningModule, possibly wrapped into DataParallel or DistributedDataParallel, depending
on the backend.
"""
self.strategy.model = model
"""
General properties
"""
@property
def log_dir(self) -> Optional[str]:
if len(self.loggers) == 1:
if isinstance(self.logger, TensorBoardLogger):
dirpath = self.logger.log_dir
else:
dirpath = self.logger.save_dir
else:
dirpath = self.default_root_dir
dirpath = self.strategy.broadcast(dirpath)
return dirpath
@property
def use_amp(self) -> bool:
rank_zero_deprecation(
"`Trainer.use_amp` is deprecated in v1.6.0 and will be removed in v1.8.0."
" Please use `Trainer.amp_backend` instead."
)
return self.precision == 16
@property
def is_global_zero(self) -> bool:
return self.strategy.is_global_zero
@property
def slurm_job_id(self) -> Optional[int]:
rank_zero_deprecation("Method `slurm_job_id` is deprecated in v1.6.0 and will be removed in v1.7.0.")
return SLURMEnvironment.job_id()
@property
def distributed_sampler_kwargs(self) -> Optional[dict]:
if isinstance(self.strategy, ParallelStrategy):
return self.strategy.distributed_sampler_kwargs
@property
def data_parallel(self) -> bool:
return isinstance(self.strategy, ParallelStrategy)
@property
def progress_bar_dict(self) -> dict:
"""Read-only for progress bar metrics."""
rank_zero_deprecation(
"`trainer.progress_bar_dict` is deprecated in v1.5 and will be removed in v1.7."
" Use `ProgressBarBase.get_metrics` instead."
)
ref_model = self.lightning_module
ref_model = cast(pl.LightningModule, ref_model)
if self.progress_bar_callback:
return self.progress_bar_callback.get_metrics(self, ref_model)
return self.progress_bar_metrics
@property
def enable_validation(self) -> bool:
"""Check if we should run validation during training."""
return (
self._data_connector._val_dataloader_source.is_defined()
and is_overridden("validation_step", self.lightning_module)
and self.limit_val_batches > 0
)
@property
def default_root_dir(self) -> str:
"""The default location to save artifacts of loggers, checkpoints etc.
It is used as a fallback if logger or checkpoint callback do not define specific save paths.
"""
if get_filesystem(self._default_root_dir).protocol == "file":
return os.path.normpath(self._default_root_dir)
return self._default_root_dir
@property
def weights_save_path(self) -> str:
"""
The default root location to save weights (checkpoints), e.g., when the
:class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` does not define a file path.
.. deprecated:: v1.6
`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.
"""
rank_zero_deprecation("`Trainer.weights_save_path` has been deprecated in v1.6 and will be removed in v1.8.")
return self._weights_save_path_internal
# TODO: Remove _weights_save_path_internal in v1.8
@property
def _weights_save_path_internal(self) -> str:
"""This is an internal implementation of weights_save_path which allows weights_save_path to be used
internally by the framework without emitting a deprecation warning.
To be removed in v1.8.
"""
if get_filesystem(self._weights_save_path).protocol == "file":
return os.path.normpath(self._weights_save_path)
return self._weights_save_path
@property
def early_stopping_callback(self) -> Optional[EarlyStopping]:
"""The first :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.early_stopping_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def early_stopping_callbacks(self) -> List[EarlyStopping]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.early_stopping.EarlyStopping` found in
the Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, EarlyStopping)]
@property
def prediction_writer_callbacks(self) -> List[BasePredictionWriter]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.prediction_writer.BasePredictionWriter`
found in the Trainer.callbacks list."""
return [cb for cb in self.callbacks if isinstance(cb, BasePredictionWriter)]
@property
def checkpoint_callback(self) -> Optional[ModelCheckpoint]:
"""The first :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` callback in the
Trainer.callbacks list, or ``None`` if it doesn't exist."""
callbacks = self.checkpoint_callbacks
return callbacks[0] if len(callbacks) > 0 else None
@property
def checkpoint_callbacks(self) -> List[ModelCheckpoint]:
"""A list of all instances of :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint` found
in the Trainer.callbacks list."""
return [c for c in self.callbacks if isinstance(c, ModelCheckpoint)]
@property
def progress_bar_callback(self) -> Optional[ProgressBarBase]:
"""An instance of :class:`~pytorch_lightning.callbacks.progress.base.ProgressBarBase` found in the
Trainer.callbacks list, or ``None`` if one doesn't exist."""
for c in self.callbacks:
if isinstance(c, ProgressBarBase):
return c
return None
@property
def resume_from_checkpoint(self) -> Optional[Union[str, Path]]:
resume_from_checkpoint = self._checkpoint_connector.resume_from_checkpoint_fit_path
if resume_from_checkpoint is not None:
rank_zero_deprecation(
"`trainer.resume_from_checkpoint` is deprecated in v1.5 and will be removed in v2.0."
" Specify the fit checkpoint path with `trainer.fit(ckpt_path=)` instead.",
stacklevel=5,
)
return resume_from_checkpoint
@property
def ckpt_path(self) -> Optional[str]:
"""Set to the path/URL of a checkpoint loaded via :meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`,
:meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`,
:meth:`~pytorch_lightning.trainer.trainer.Trainer.test`, or
:meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`. ``None`` otherwise."""
return self._ckpt_path
@property
def validated_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._validated_ckpt_path
@validated_ckpt_path.setter
def validated_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.validated_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path`.",
stacklevel=5,
)
self._validated_ckpt_path = ckpt_path
@property
def tested_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._tested_ckpt_path
@tested_ckpt_path.setter
def tested_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.tested_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._tested_ckpt_path = ckpt_path
@property
def predicted_ckpt_path(self) -> Optional[str]:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via"
" `Trainer.ckpt_path` instead.",
stacklevel=5,
)
return self._predicted_ckpt_path
@predicted_ckpt_path.setter
def predicted_ckpt_path(self, ckpt_path: Optional[str]) -> None:
rank_zero_deprecation(
"The `Trainer.predicted_ckpt_path` attribute was deprecated in v1.6 and will be removed in v1.8. The"
" path of a checkpoint loaded via `Trainer.{fit,validate,test,predict}` should be accessed via the"
" read-only `Trainer.ckpt_path` instead.",
stacklevel=5,
)
self._predicted_ckpt_path = ckpt_path
def save_checkpoint(
self, filepath: _PATH, weights_only: bool = False, storage_options: Optional[Any] = None
) -> None:
r"""
Runs routine to create a checkpoint.
Args:
filepath: Path where checkpoint is saved.
weights_only: If ``True``, will only save the model weights.
storage_options: parameter for how to save to storage, passed to ``CheckpointIO`` plugin
"""
self._checkpoint_connector.save_checkpoint(filepath, weights_only=weights_only, storage_options=storage_options)
"""
Parsing properties
"""
@classmethod
def default_attributes(cls) -> dict:
init_signature = inspect.signature(cls)
return {k: v.default for k, v in init_signature.parameters.items()}
@classmethod
def get_deprecated_arg_names(cls) -> List:
"""Returns a list with deprecated Trainer arguments."""
depr_arg_names = []
for name, val in cls.__dict__.items():
if name.startswith("DEPRECATED") and isinstance(val, (tuple, list)):
depr_arg_names.extend(val)
return depr_arg_names
@classmethod
def from_argparse_args(cls: Any, args: Union[Namespace, ArgumentParser], **kwargs) -> Any:
return from_argparse_args(cls, args, **kwargs)
@classmethod
def parse_argparser(cls, arg_parser: Union[ArgumentParser, Namespace]) -> Namespace:
return parse_argparser(cls, arg_parser)
@classmethod
def match_env_arguments(cls) -> Namespace:
return parse_env_variables(cls)
@classmethod
def add_argparse_args(cls, parent_parser: ArgumentParser, **kwargs) -> ArgumentParser:
return add_argparse_args(cls, parent_parser, **kwargs)
"""
State properties
"""
@property
def interrupted(self) -> bool:
return self.state.status == TrainerStatus.INTERRUPTED
@property
def training(self) -> bool:
return self.state.stage == RunningStage.TRAINING
@training.setter
def training(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TRAINING
elif self.training:
self.state.stage = None
@property
def testing(self) -> bool:
return self.state.stage == RunningStage.TESTING
@testing.setter
def testing(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TESTING
elif self.testing:
self.state.stage = None
@property
def predicting(self) -> bool:
return self.state.stage == RunningStage.PREDICTING
@predicting.setter
def predicting(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.PREDICTING
elif self.predicting:
self.state.stage = None
@property
def tuning(self) -> bool:
return self.state.stage == RunningStage.TUNING
@tuning.setter
def tuning(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.TUNING
elif self.tuning:
self.state.stage = None
@property
def validating(self) -> bool:
return self.state.stage == RunningStage.VALIDATING
@validating.setter
def validating(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.VALIDATING
elif self.validating:
self.state.stage = None
@property
def evaluating(self) -> bool:
return self.state.stage and self.state.stage.evaluating
@property
def sanity_checking(self) -> bool:
return self.state.stage == RunningStage.SANITY_CHECKING
@sanity_checking.setter
def sanity_checking(self, val: bool) -> None:
if val:
self.state.stage = RunningStage.SANITY_CHECKING
elif self.sanity_checking:
self.state.stage = None
"""
Loop properties
"""
@property
def global_step(self) -> int:
"""The number of optimizer steps taken (does not reset each epoch).
This includes multiple optimizers and TBPTT steps (if enabled).
"""
return self.fit_loop.epoch_loop.global_step
@property
def current_epoch(self) -> int:
"""The current epoch, updated after the epoch end hooks are run."""
return self.fit_loop.epoch_progress.current.completed
@property
def max_epochs(self) -> int:
return self.fit_loop.max_epochs
@property
def min_epochs(self) -> int:
return self.fit_loop.min_epochs
@property
def max_steps(self) -> int:
return self.fit_loop.max_steps
@property
def min_steps(self) -> Optional[int]:
return self.fit_loop.min_steps
@property
def is_last_batch(self) -> bool:
return self.fit_loop.epoch_loop.batch_progress.is_last_batch
@property
def fit_loop(self) -> FitLoop:
return self._fit_loop
@fit_loop.setter
def fit_loop(self, loop: FitLoop):
"""Attach a custom fit loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.fit`.
"""
loop.trainer = self
self._fit_loop = loop
@property
def validate_loop(self) -> EvaluationLoop:
return self._validate_loop
@validate_loop.setter
def validate_loop(self, loop: EvaluationLoop):
"""Attach a custom validation loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.validate`. Note that this loop is different from the one
running during training inside the :meth:`pytorch_lightning.trainer.trainer.Trainer.fit` call.
"""
loop.trainer = self
self._validate_loop = loop
@property
def test_loop(self) -> EvaluationLoop:
return self._test_loop
@test_loop.setter
def test_loop(self, loop: EvaluationLoop):
"""Attach a custom test loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.test`.
"""
loop.trainer = self
self._test_loop = loop
@property
def predict_loop(self) -> PredictionLoop:
return self._predict_loop
@predict_loop.setter
def predict_loop(self, loop: PredictionLoop):
"""Attach a custom prediction loop to this Trainer.
It will run with
:meth:`~pytorch_lightning.trainer.trainer.Trainer.predict`.
"""
loop.trainer = self
self._predict_loop = loop
@property
def verbose_evaluate(self) -> bool:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. The current value"
" returned is the union of the validate and test loop values. You can choose which one to access with"
" `trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
return self.validate_loop.verbose or self.test_loop.verbose
@verbose_evaluate.setter
def verbose_evaluate(self, verbose: bool) -> None:
rank_zero_deprecation(
"The `Trainer.verbose_evaluate` property has been deprecated and will be removed in v1.8. This will set"
" the value for both trainer.{validate,test}_loop.verbose`.",
stacklevel=5,
)
self.validate_loop.verbose = verbose
self.test_loop.verbose = verbose
@property
def _evaluation_loop(self) -> EvaluationLoop:
if self.state.fn in (TrainerFn.FITTING, TrainerFn.TUNING):
return self.fit_loop.epoch_loop.val_loop
if self.state.fn == TrainerFn.VALIDATING:
return self.validate_loop
if self.state.fn == TrainerFn.TESTING:
return self.test_loop
raise RuntimeError("The `Trainer._evaluation_loop` property isn't defined. Accessed outside of scope")
@property
def _active_loop(self) -> Optional[Union[FitLoop, EvaluationLoop, PredictionLoop]]:
if self.training:
return self.fit_loop
if self.sanity_checking or self.evaluating:
return self._evaluation_loop
if self.predicting:
return self.predict_loop
"""
Logging properties
"""
@property
def logger(self) -> Optional[LightningLoggerBase]:
if len(self.loggers) == 0:
return None
if len(self.loggers) == 1:
return self.loggers[0]
else:
rank_zero_warn(
"Using trainer.logger when Trainer is configured to use multiple loggers."
" This behavior will change in v1.8 when LoggerCollection is removed, and"
" trainer.logger will return the first logger in trainer.loggers"
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return LoggerCollection(self.loggers)
@logger.setter
def logger(self, logger: Optional[LightningLoggerBase]) -> None:
if not logger:
self.loggers = []
elif isinstance(logger, LoggerCollection):
self.loggers = list(logger)
else:
self.loggers = [logger]
@property
def loggers(self) -> List[LightningLoggerBase]:
return self._loggers
@loggers.setter
def loggers(self, loggers: Optional[List[LightningLoggerBase]]) -> None:
self._loggers = loggers if loggers else []
@property
def callback_metrics(self) -> dict:
return self._logger_connector.callback_metrics
@property
def logged_metrics(self) -> dict:
return self._logger_connector.logged_metrics
@property
def progress_bar_metrics(self) -> dict:
return self._logger_connector.progress_bar_metrics
@property
def _results(self) -> Optional[_ResultCollection]:
active_loop = self._active_loop
if active_loop is not None:
return active_loop._results
def _exit_gracefully_on_signal(self) -> None:
if not _fault_tolerant_training() or not self._should_terminate_gracefully():
return
raise ExitGracefullyException(0)
def _should_terminate_gracefully(self) -> bool:
value = torch.tensor(int(self._terminate_gracefully), device=self.strategy.root_device)
return self.strategy.reduce(value, reduce_op="sum") > 0
@property
def weights_summary(self) -> Optional[str]:
rank_zero_deprecation("`Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
return self._weights_summary
@weights_summary.setter
def weights_summary(self, val: Optional[str]) -> None:
rank_zero_deprecation("Setting `Trainer.weights_summary` is deprecated in v1.5 and will be removed in v1.7.")
self._weights_summary = val
"""
Other
"""
@property
def estimated_stepping_batches(self) -> Union[int, float]:
r"""
Estimated stepping batches for the complete training inferred from DataLoaders, gradient
accumulation factor and distributed setup.
Examples::
def configure_optimizers(self):
optimizer = ...
scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer, max_lr=1e-3, total_steps=self.trainer.estimated_stepping_batches
)
return [optimizer], [scheduler]
"""
accumulation_scheduler = self.accumulation_scheduler
if accumulation_scheduler.epochs != [0]:
raise MisconfigurationException(
"Estimated stepping batches cannot be computed with different"
" `accumulate_grad_batches` at different epochs."
)
# infinite training
if self.max_epochs == -1 and self.max_steps == -1:
return float("inf")
if self.train_dataloader is None:
rank_zero_info("Loading `train_dataloader` to estimate number of stepping batches.")
self.reset_train_dataloader()
total_batches = self.num_training_batches
# iterable dataset
if total_batches == float("inf"):
return self.max_steps
self.accumulate_grad_batches = accumulation_scheduler.get_accumulate_grad_batches(self.current_epoch)
effective_batch_size = self.accumulate_grad_batches
max_estimated_steps = math.ceil(total_batches / effective_batch_size) * max(self.max_epochs, 1)
max_estimated_steps = min(max_estimated_steps, self.max_steps) if self.max_steps != -1 else max_estimated_steps
return max_estimated_steps
@property
def terminate_on_nan(self) -> bool:
rank_zero_deprecation("`Trainer.terminate_on_nan` is deprecated in v1.5 and will be removed in 1.7.")
return self._terminate_on_nan
@terminate_on_nan.setter
def terminate_on_nan(self, val: bool) -> None:
rank_zero_deprecation(
f"Setting `Trainer.terminate_on_nan = {val}` is deprecated in v1.5 and will be removed in 1.7."
f" Please set `Trainer(detect_anomaly={val})` instead."
)
self._terminate_on_nan = val # : 212
def _determine_batch_limits(batches: Optional[Union[int, float]], name: str) -> Union[int, float]:
if batches is None:
# batches is optional to know if the user passed a value so that we can show the above info messages only to the
# users that set a value explicitly
return 1.0
# differentiating based on the type can be error-prone for users. show a message describing the chosen behaviour
if isinstance(batches, int) and batches == 1:
if name == "limit_train_batches":
message = "1 batch per epoch will be used."
elif name == "val_check_interval":
message = "validation will run after every batch."
else:
message = "1 batch will be used."
rank_zero_info(f"`Trainer({name}=1)` was configured so {message}")
elif isinstance(batches, float) and batches == 1.0:
if name == "limit_train_batches":
message = "100% of the batches per epoch will be used."
elif name == "val_check_interval":
message = "validation will run at the end of the training epoch."
else:
message = "100% of the batches will be used."
rank_zero_info(f"`Trainer({name}=1.0)` was configured so {message}.")
if 0 <= batches <= 1:
return batches
if batches > 1 and batches % 1.0 == 0:
return int(batches)
raise MisconfigurationException(
f"You have passed invalid value {batches} for {name}, it has to be in [0.0, 1.0] or an int."
)
| [
"torch._C._log_api_usage_once",
"torch.no_grad",
"torch.autograd.set_detect_anomaly",
"torch.cuda.is_available",
"torch.set_grad_enabled"
] | 1.8 | valanm22/pytorch-lightning | 5d190eabd28671a6222741f5dd9ee3f214e519b1 |
1.8 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test deprecated functionality which will be removed in v1.8.0."""
import os
import time
from unittest import mock
from unittest.mock import Mock
import numpy as np
import pytest
import torch
from torch import optim
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import CSVLogger, LightningLoggerBase, LoggerCollection
from pytorch_lightning.plugins.precision.precision_plugin import PrecisionPlugin
from pytorch_lightning.plugins.training_type.ddp import DDPPlugin
from pytorch_lightning.plugins.training_type.ddp2 import DDP2Plugin
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.plugins.training_type.deepspeed import DeepSpeedPlugin
from pytorch_lightning.plugins.training_type.dp import DataParallelPlugin
from pytorch_lightning.plugins.training_type.fully_sharded import DDPFullyShardedPlugin
from pytorch_lightning.plugins.training_type.ipu import IPUPlugin
from pytorch_lightning.plugins.training_type.sharded import DDPShardedPlugin
from pytorch_lightning.plugins.training_type.sharded_spawn import DDPSpawnShardedPlugin
from pytorch_lightning.plugins.training_type.single_device import SingleDevicePlugin
from pytorch_lightning.plugins.training_type.single_tpu import SingleTPUPlugin
from pytorch_lightning.plugins.training_type.tpu_spawn import TPUSpawnPlugin
from pytorch_lightning.profiler import AbstractProfiler, AdvancedProfiler, SimpleProfiler
from pytorch_lightning.strategies import ParallelStrategy
from pytorch_lightning.trainer.configuration_validator import _check_datamodule_checkpoint_hooks
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.apply_func import move_data_to_device
from pytorch_lightning.utilities.enums import DeviceType, DistributedType
from pytorch_lightning.utilities.imports import _TORCHTEXT_LEGACY
from pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn
from tests.deprecated_api import no_deprecated_call
from tests.helpers.boring_model import BoringDataModule, BoringModel
from tests.helpers.runif import RunIf
from tests.helpers.torchtext_utils import get_dummy_torchtext_data_iterator
def test_v1_8_0_deprecated_distributed_type_enum():
with pytest.deprecated_call(match="has been deprecated in v1.6 and will be removed in v1.8."):
_ = DistributedType.DDP
def test_v1_8_0_deprecated_device_type_enum():
with pytest.deprecated_call(match="has been deprecated in v1.6 and will be removed in v1.8."):
_ = DeviceType.CPU
@pytest.mark.skipif(not _TORCHTEXT_LEGACY, reason="torchtext.legacy is deprecated.")
def test_v1_8_0_deprecated_torchtext_batch():
with pytest.deprecated_call(match="is deprecated and Lightning will remove support for it in v1.8"):
data_iterator, _ = get_dummy_torchtext_data_iterator(num_samples=3, batch_size=3)
batch = next(iter(data_iterator))
_ = move_data_to_device(batch=batch, device=torch.device("cpu"))
def test_v1_8_0_on_init_start_end(tmpdir):
class TestCallback(Callback):
def on_init_start(self, trainer):
print("Starting to init trainer!")
def on_init_end(self, trainer):
print("Trainer is init now")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
max_epochs=1,
fast_dev_run=True,
enable_progress_bar=False,
logger=False,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `on_init_start` callback hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
with pytest.deprecated_call(
match="The `on_init_end` callback hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.validate(model)
def test_v1_8_0_deprecated_call_hook():
trainer = Trainer(
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
enable_progress_bar=False,
logger=False,
)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8."):
trainer.call_hook("test_hook")
def test_v1_8_0_deprecated_warning_positional_category():
with pytest.deprecated_call(match=r"use `category=FutureWarning."):
rank_zero_warn("foo", FutureWarning)
def test_v1_8_0_deprecated_on_hpc_hooks(tmpdir):
class TestModelSave(BoringModel):
def on_hpc_save(self):
print("on_hpc_save override")
class TestModelLoad(BoringModel):
def on_hpc_load(self):
print("on_hpc_load override")
save_model = TestModelSave()
load_model = TestModelLoad()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, fast_dev_run=True)
with pytest.deprecated_call(
match=r"Method `LightningModule.on_hpc_save` is deprecated in v1.6 and will be removed in v1.8."
):
trainer.fit(save_model)
with pytest.deprecated_call(
match=r"Method `LightningModule.on_hpc_load` is deprecated in v1.6 and will be removed in v1.8."
):
trainer.fit(load_model)
def test_v1_8_0_deprecated_run_stage():
trainer = Trainer()
trainer._run_stage = Mock()
with pytest.deprecated_call(match="`Trainer.run_stage` is deprecated in v1.6 and will be removed in v1.8."):
trainer.run_stage()
def test_v1_8_0_trainer_verbose_evaluate():
trainer = Trainer()
with pytest.deprecated_call(match="verbose_evaluate` property has been deprecated and will be removed in v1.8"):
assert trainer.verbose_evaluate
with pytest.deprecated_call(match="verbose_evaluate` property has been deprecated and will be removed in v1.8"):
trainer.verbose_evaluate = False
@pytest.mark.parametrize("fn_prefix", ["validated", "tested", "predicted"])
def test_v1_8_0_trainer_ckpt_path_attributes(fn_prefix: str):
test_attr = f"{fn_prefix}_ckpt_path"
trainer = Trainer()
with pytest.deprecated_call(match=f"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8"):
_ = getattr(trainer, test_attr)
with pytest.deprecated_call(match=f"{test_attr}` attribute was deprecated in v1.6 and will be removed in v1.8"):
setattr(trainer, test_attr, "v")
def test_v1_8_0_deprecated_trainer_should_rank_save_checkpoint(tmpdir):
trainer = Trainer()
with pytest.deprecated_call(
match=r"`Trainer.should_rank_save_checkpoint` is deprecated in v1.6 and will be removed in v1.8."
):
_ = trainer.should_rank_save_checkpoint
def test_v1_8_0_deprecated_lr_scheduler():
trainer = Trainer()
with pytest.deprecated_call(match=r"`Trainer.lr_schedulers` is deprecated in v1.6 and will be removed in v1.8."):
assert trainer.lr_schedulers == []
def test_v1_8_0_trainer_optimizers_mixin():
trainer = Trainer()
model = BoringModel()
trainer.strategy.connect(model)
trainer.lightning_module.trainer = trainer
with pytest.deprecated_call(
match=r"`TrainerOptimizersMixin.init_optimizers` was deprecated in v1.6 and will be removed in v1.8."
):
trainer.init_optimizers(model)
with pytest.deprecated_call(
match=r"`TrainerOptimizersMixin.convert_to_lightning_optimizers` was deprecated in v1.6 and will be removed in "
"v1.8."
):
trainer.convert_to_lightning_optimizers()
def test_v1_8_0_deprecate_trainer_callback_hook_mixin():
methods_with_self = [
"on_before_accelerator_backend_setup",
"on_configure_sharded_model",
"on_init_start",
"on_init_end",
"on_fit_start",
"on_fit_end",
"on_sanity_check_start",
"on_sanity_check_end",
"on_train_epoch_start",
"on_train_epoch_end",
"on_validation_epoch_start",
"on_validation_epoch_end",
"on_test_epoch_start",
"on_test_epoch_end",
"on_predict_epoch_start",
"on_epoch_start",
"on_epoch_end",
"on_train_start",
"on_train_end",
"on_pretrain_routine_start",
"on_pretrain_routine_end",
"on_batch_start",
"on_batch_end",
"on_validation_start",
"on_validation_end",
"on_test_start",
"on_test_end",
"on_predict_start",
"on_predict_end",
"on_after_backward",
]
methods_with_stage = [
"setup",
"teardown",
]
methods_with_batch_batch_idx_dataloader_idx = [
"on_train_batch_start",
"on_validation_batch_start",
"on_test_batch_start",
"on_predict_batch_start",
]
methods_with_outputs_batch_batch_idx_dataloader_idx = [
"on_train_batch_end",
"on_validation_batch_end",
"on_test_batch_end",
"on_predict_batch_end",
]
methods_with_checkpoint = ["on_save_checkpoint", "on_load_checkpoint"]
trainer = Trainer(
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
enable_progress_bar=False,
logger=False,
)
model = BoringModel()
# need to attach model to trainer for testing of `on_pretrain_routine_start`
trainer.strategy.connect(model)
for method_name in methods_with_self:
fn = getattr(trainer, method_name, None)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
fn()
for method_name in methods_with_stage:
fn = getattr(trainer, method_name)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
fn(stage="test")
for method_name in methods_with_batch_batch_idx_dataloader_idx:
fn = getattr(trainer, method_name)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
fn(batch={}, batch_idx=0, dataloader_idx=0)
for method_name in methods_with_outputs_batch_batch_idx_dataloader_idx:
fn = getattr(trainer, method_name)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
fn(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]), batch={}, batch_idx=0, dataloader_idx=0)
for method_name in methods_with_checkpoint:
fn = getattr(trainer, method_name)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
fn(checkpoint={})
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
trainer.on_predict_epoch_end(outputs=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
trainer.on_exception(exception=Exception)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
trainer.on_before_backward(loss=torch.tensor([[1.0, -1.0], [1.0, -1.0]]))
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
trainer.on_before_optimizer_step(
optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9), optimizer_idx=0
)
with pytest.deprecated_call(match="was deprecated in v1.6 and will be removed in v1.8"):
trainer.on_before_zero_grad(optimizer=optim.SGD(model.parameters(), lr=0.01, momentum=0.9))
def test_v1_8_0_deprecated_training_type_plugin_property():
trainer = Trainer()
with pytest.deprecated_call(match="in v1.6 and will be removed in v1.8"):
trainer.training_type_plugin
def test_v1_8_0_deprecate_trainer_data_loading_mixin():
trainer = Trainer(max_epochs=1)
model = BoringModel()
dm = BoringDataModule()
trainer.fit(model, datamodule=dm)
with pytest.deprecated_call(
match=r"`TrainerDataLoadingMixin.prepare_dataloader` was deprecated in v1.6 and will be removed in v1.8.",
):
trainer.prepare_dataloader(dataloader=model.train_dataloader, shuffle=False)
with pytest.deprecated_call(
match=r"`TrainerDataLoadingMixin.request_dataloader` was deprecated in v1.6 and will be removed in v1.8.",
):
trainer.request_dataloader(stage=RunningStage.TRAINING)
def test_v_1_8_0_deprecated_device_stats_monitor_prefix_metric_keys():
from pytorch_lightning.callbacks.device_stats_monitor import prefix_metric_keys
with pytest.deprecated_call(match="in v1.6 and will be removed in v1.8"):
prefix_metric_keys({"foo": 1.0}, "bar")
@pytest.mark.parametrize(
"cls",
[
DDPPlugin,
DDP2Plugin,
DDPSpawnPlugin,
pytest.param(DeepSpeedPlugin, marks=RunIf(deepspeed=True)),
DataParallelPlugin,
DDPFullyShardedPlugin,
pytest.param(IPUPlugin, marks=RunIf(ipu=True)),
DDPShardedPlugin,
DDPSpawnShardedPlugin,
TPUSpawnPlugin,
],
)
def test_v1_8_0_deprecated_training_type_plugin_classes(cls):
old_name = cls.__name__
new_name = old_name.replace("Plugin", "Strategy")
with pytest.deprecated_call(
match=f"{old_name}` is deprecated in v1.6 and will be removed in v1.8. Use .*{new_name}` instead."
):
cls()
def test_v1_8_0_deprecated_single_device_plugin_class():
with pytest.deprecated_call(
match=(
"SingleDevicePlugin` is deprecated in v1.6 and will be removed in v1.8."
" Use `.*SingleDeviceStrategy` instead."
)
):
SingleDevicePlugin("cpu")
@RunIf(tpu=True)
def test_v1_8_0_deprecated_single_tpu_plugin_class():
with pytest.deprecated_call(
match=(
"SingleTPUPlugin` is deprecated in v1.6 and will be removed in v1.8." " Use `.*SingleTPUStrategy` instead."
)
):
SingleTPUPlugin(0)
def test_v1_8_0_deprecated_lightning_optimizers():
trainer = Trainer()
with pytest.deprecated_call(
match="Trainer.lightning_optimizers` is deprecated in v1.6 and will be removed in v1.8"
):
assert trainer.lightning_optimizers == {}
def test_v1_8_0_remove_on_batch_start_end(tmpdir):
class TestCallback(Callback):
def on_batch_start(self, *args, **kwargs):
print("on_batch_start")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
fast_dev_run=True,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `Callback.on_batch_start` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
class TestCallback(Callback):
def on_batch_end(self, *args, **kwargs):
print("on_batch_end")
trainer = Trainer(
callbacks=[TestCallback()],
fast_dev_run=True,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `Callback.on_batch_end` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
def test_v1_8_0_on_configure_sharded_model(tmpdir):
class TestCallback(Callback):
def on_configure_sharded_model(self, trainer, model):
print("Configuring sharded model")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
max_epochs=1,
fast_dev_run=True,
enable_progress_bar=False,
logger=False,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `on_configure_sharded_model` callback hook was deprecated in v1.6 and will be removed in v1.8."
):
trainer.fit(model)
def test_v1_8_0_remove_on_epoch_start_end_lightning_module(tmpdir):
class CustomModel(BoringModel):
def on_epoch_start(self, *args, **kwargs):
print("on_epoch_start")
model = CustomModel()
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `LightningModule.on_epoch_start` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
class CustomModel(BoringModel):
def on_epoch_end(self, *args, **kwargs):
print("on_epoch_end")
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
)
model = CustomModel()
with pytest.deprecated_call(
match="The `LightningModule.on_epoch_end` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
def test_v1_8_0_remove_on_pretrain_routine_start_end_lightning_module(tmpdir):
class CustomModel(BoringModel):
def on_pretrain_routine_start(self, *args, **kwargs):
print("foo")
model = CustomModel()
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `LightningModule.on_pretrain_routine_start` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
class CustomModel(BoringModel):
def on_pretrain_routine_end(self, *args, **kwargs):
print("foo")
trainer = Trainer(
fast_dev_run=True,
default_root_dir=tmpdir,
)
model = CustomModel()
with pytest.deprecated_call(
match="The `LightningModule.on_pretrain_routine_end` hook was deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
def test_v1_8_0_rank_zero_imports():
import warnings
from pytorch_lightning.utilities.distributed import rank_zero_debug, rank_zero_info
from pytorch_lightning.utilities.warnings import LightningDeprecationWarning, rank_zero_deprecation, rank_zero_warn
with pytest.deprecated_call(
match="pytorch_lightning.utilities.distributed.rank_zero_debug has been deprecated in v1.6"
" and will be removed in v1.8."
):
rank_zero_debug("foo")
with pytest.deprecated_call(
match="pytorch_lightning.utilities.distributed.rank_zero_info has been deprecated in v1.6"
" and will be removed in v1.8."
):
rank_zero_info("foo")
with pytest.deprecated_call(
match="pytorch_lightning.utilities.warnings.rank_zero_warn has been deprecated in v1.6"
" and will be removed in v1.8."
):
rank_zero_warn("foo")
with pytest.deprecated_call(
match="pytorch_lightning.utilities.warnings.rank_zero_deprecation has been deprecated in v1.6"
" and will be removed in v1.8."
):
rank_zero_deprecation("foo")
with pytest.deprecated_call(
match="pytorch_lightning.utilities.warnings.LightningDeprecationWarning has been deprecated in v1.6"
" and will be removed in v1.8."
):
warnings.warn("foo", LightningDeprecationWarning, stacklevel=5)
def test_v1_8_0_on_before_accelerator_backend_setup(tmpdir):
class TestCallback(Callback):
def on_before_accelerator_backend_setup(self, *args, **kwargs):
print("on_before_accelerator_backend called.")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
max_epochs=1,
fast_dev_run=True,
enable_progress_bar=False,
logger=False,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `on_before_accelerator_backend_setup` callback hook was deprecated in v1.6"
" and will be removed in v1.8"
):
trainer.fit(model)
def test_v1_8_0_logger_agg_parameters():
class CustomLogger(LightningLoggerBase):
@rank_zero_only
def log_hyperparams(self, params):
pass
@rank_zero_only
def log_metrics(self, metrics, step):
pass
@property
def name(self):
pass
@property
def version(self):
pass
with pytest.deprecated_call(
match="The `agg_key_funcs` parameter for `LightningLoggerBase` was deprecated in v1.6"
" and will be removed in v1.8."
):
CustomLogger(agg_key_funcs={"mean", np.mean})
with pytest.deprecated_call(
match="The `agg_default_func` parameter for `LightningLoggerBase` was deprecated in v1.6"
" and will be removed in v1.8."
):
CustomLogger(agg_default_func=np.mean)
# Should have no deprecation warning
logger = CustomLogger()
with pytest.deprecated_call(
match="`LightningLoggerBase.update_agg_funcs` was deprecated in v1.6 and will be removed in v1.8."
):
logger.update_agg_funcs()
def test_v1_8_0_deprecated_agg_and_log_metrics_override(tmpdir):
class AggregationOverrideLogger(CSVLogger):
@rank_zero_only
def agg_and_log_metrics(self, metrics, step):
self.log_metrics(metrics=metrics, step=step)
logger = AggregationOverrideLogger(tmpdir)
logger2 = CSVLogger(tmpdir)
logger3 = CSVLogger(tmpdir)
# Test single loggers
with pytest.deprecated_call(
match="`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed"
" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom"
" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`."
):
Trainer(logger=logger)
# Should have no deprecation warning
Trainer(logger=logger2)
# Test multiple loggers
with pytest.deprecated_call(
match="`LightningLoggerBase.agg_and_log_metrics` is deprecated in v1.6 and will be removed"
" in v1.8. `Trainer` will directly call `LightningLoggerBase.log_metrics` so custom"
" loggers should not implement `LightningLoggerBase.agg_and_log_metrics`."
):
Trainer(logger=[logger, logger3])
# Should have no deprecation warning
Trainer(logger=[logger2, logger3])
def test_v1_8_0_callback_on_pretrain_routine_start_end(tmpdir):
class TestCallback(Callback):
def on_pretrain_routine_start(self, trainer, pl_module):
print("on_pretrain_routine_start called.")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
fast_dev_run=True,
enable_progress_bar=False,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `Callback.on_pretrain_routine_start` hook has been deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
class TestCallback(Callback):
def on_pretrain_routine_end(self, trainer, pl_module):
print("on_pretrain_routine_end called.")
model = BoringModel()
trainer = Trainer(
callbacks=[TestCallback()],
fast_dev_run=True,
enable_progress_bar=False,
default_root_dir=tmpdir,
)
with pytest.deprecated_call(
match="The `Callback.on_pretrain_routine_end` hook has been deprecated in v1.6 and will be removed in v1.8"
):
trainer.fit(model)
def test_v1_8_0_weights_save_path(tmpdir):
with pytest.deprecated_call(match=r"Setting `Trainer\(weights_save_path=\)` has been deprecated in v1.6"):
trainer = Trainer(weights_save_path=tmpdir)
with pytest.deprecated_call(match=r"`Trainer.weights_save_path` has been deprecated in v1.6"):
_ = trainer.weights_save_path
def test_deprecated_epoch_outputs_format(tmpdir):
class DeprecationModel(BoringModel):
def __init__(self):
super().__init__()
self.truncated_bptt_steps = 1
def training_step(self, batch, batch_idx, optimizer_idx, hiddens):
output = super().training_step(batch, batch_idx)
output["hiddens"] = hiddens
return output
def tbptt_split_batch(self, batch, split_size):
return [batch, batch]
def training_epoch_end(self, outputs):
...
def on_train_batch_end(self, outputs, batch, batch_idx) -> None:
...
def configure_optimizers(self):
return [torch.optim.Adam(self.parameters()), torch.optim.Adam(self.parameters())]
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = DeprecationModel()
batch_match = r"on_train_batch_end.*will change in version v1.8 to \(tbptt_steps, n_optimizers\)"
with pytest.deprecated_call(match=batch_match):
trainer.fit(model)
class DeprecationModel2(DeprecationModel):
def on_train_batch_end(self, *args, new_format=True):
...
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = DeprecationModel()
epoch_match = r"training_epoch_end.*will change in version v1.8 to \(n_batches, tbptt_steps, n_optimizers\)"
with pytest.deprecated_call(match=epoch_match):
trainer.fit(model)
class NoDeprecationModel(DeprecationModel2):
def training_epoch_end(self, outputs, new_format=True):
...
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
model = NoDeprecationModel()
with no_deprecated_call(match="will change in version v1.8.*new_format=True"):
trainer.fit(model)
@pytest.mark.flaky(reruns=3)
@pytest.mark.parametrize(["action", "expected"], [("a", [3, 1]), ("b", [2]), ("c", [1])])
def test_simple_profiler_iterable_durations(tmpdir, action: str, expected: list):
"""Ensure the reported durations are reasonably accurate."""
def _sleep_generator(durations):
"""the profile_iterable method needs an iterable in which we can ensure that we're properly timing how long
it takes to call __next__"""
for duration in durations:
time.sleep(duration)
yield duration
def _get_python_cprofile_total_duration(profile):
return sum(x.inlinetime for x in profile.getstats())
simple_profiler = SimpleProfiler()
iterable = _sleep_generator(expected)
with pytest.deprecated_call(
match="`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8."
):
for _ in simple_profiler.profile_iterable(iterable, action):
pass
# we exclude the last item in the recorded durations since that's when StopIteration is raised
np.testing.assert_allclose(simple_profiler.recorded_durations[action][:-1], expected, rtol=0.2)
advanced_profiler = AdvancedProfiler(dirpath=tmpdir, filename="profiler")
iterable = _sleep_generator(expected)
with pytest.deprecated_call(
match="`BaseProfiler.profile_iterable` is deprecated in v1.6 and will be removed in v1.8."
):
for _ in advanced_profiler.profile_iterable(iterable, action):
pass
recorded_total_duration = _get_python_cprofile_total_duration(advanced_profiler.profiled_actions[action])
expected_total_duration = np.sum(expected)
np.testing.assert_allclose(recorded_total_duration, expected_total_duration, rtol=0.2)
def test_v1_8_0_logger_collection(tmpdir):
logger1 = CSVLogger(tmpdir)
logger2 = CSVLogger(tmpdir)
trainer1 = Trainer(logger=logger1)
trainer2 = Trainer(logger=[logger1, logger2])
# Should have no deprecation warning
trainer1.logger
trainer1.loggers
trainer2.loggers
trainer2.logger
with pytest.deprecated_call(match="`LoggerCollection` is deprecated in v1.6"):
LoggerCollection([logger1, logger2])
def test_v1_8_0_precision_plugin_checkpoint_hooks(tmpdir):
class PrecisionPluginSaveHook(PrecisionPlugin):
def on_save_checkpoint(self, checkpoint):
print("override on_save_checkpoint")
class PrecisionPluginLoadHook(PrecisionPlugin):
def on_load_checkpoint(self, checkpoint):
print("override on_load_checkpoint")
model = BoringModel()
precplugin_save = PrecisionPluginSaveHook()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_save])
with pytest.deprecated_call(
match="`PrecisionPlugin.on_save_checkpoint` was deprecated in"
" v1.6 and will be removed in v1.8. Use `state_dict` instead."
):
trainer.fit(model)
precplugin_load = PrecisionPluginLoadHook()
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, plugins=[precplugin_load])
with pytest.deprecated_call(
match="`PrecisionPlugin.on_load_checkpoint` was deprecated in"
" v1.6 and will be removed in v1.8. Use `load_state_dict` instead."
):
trainer.fit(model)
def test_v1_8_0_abstract_profiler():
assert "`AbstractProfiler` was deprecated in v1.6" in AbstractProfiler.__doc__
def test_v1_8_0_datamodule_checkpointhooks():
class CustomBoringDataModuleSave(BoringDataModule):
def on_save_checkpoint(self, checkpoint):
print("override on_save_checkpoint")
class CustomBoringDataModuleLoad(BoringDataModule):
def on_load_checkpoint(self, checkpoint):
print("override on_load_checkpoint")
trainer = Mock()
trainer.datamodule = CustomBoringDataModuleSave()
with pytest.deprecated_call(
match="`LightningDataModule.on_save_checkpoint` was deprecated in"
" v1.6 and will be removed in v1.8. Use `state_dict` instead."
):
_check_datamodule_checkpoint_hooks(trainer)
trainer.datamodule = CustomBoringDataModuleLoad()
with pytest.deprecated_call(
match="`LightningDataModule.on_load_checkpoint` was deprecated in"
" v1.6 and will be removed in v1.8. Use `load_state_dict` instead."
):
_check_datamodule_checkpoint_hooks(trainer)
def test_v1_8_0_trainer_use_amp(tmpdir):
trainer = Trainer()
with pytest.deprecated_call(match="`Trainer.use_amp` is deprecated in v1.6.0"):
_ = trainer.use_amp
def test_v1_8_0_lightning_module_use_amp():
model = BoringModel()
with pytest.deprecated_call(match="`LightningModule.use_amp` was deprecated in v1.6"):
_ = model.use_amp
with pytest.deprecated_call(match="`LightningModule.use_amp` was deprecated in v1.6"):
model.use_amp = False
@mock.patch.dict(os.environ, {"PL_TORCH_DISTRIBUTED_BACKEND": "foo"})
def test_v1_8_0_torch_distributed_backend_env():
from pytorch_lightning.utilities.distributed import _get_process_group_backend_from_env
with pytest.deprecated_call(
match="Environment variable `PL_TORCH_DISTRIBUTED_BACKEND`"
" was deprecated in v1.6 and will be removed in v1.8."
):
_get_process_group_backend_from_env()
def test_parallel_strategy_torch_distributed_backend():
class CustomParallel(ParallelStrategy):
@property
def root_device(self) -> torch.device:
return torch.device("cpu")
def model_to_device(self):
pass
@property
def is_global_zero(self):
return True
def broadcast(self, obj):
return obj
def reduce(self, tensor):
return tensor
def barrier(self):
return
def all_gather(self, tensor):
return tensor
strategy = CustomParallel()
with pytest.deprecated_call(
match="ParallelStrategy.torch_distributed_backend was deprecated" " in v1.6 and will be removed in v1.8."
):
strategy.torch_distributed_backend
def test_trainer_config_device_ids():
trainer = Trainer(devices=2)
with pytest.deprecated_call(
match="`Trainer.devices` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` or `Trainer.device_ids` to get device information instead."
):
trainer.devices == 2
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "strategy"],
[
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param("-1", 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)"),
],
)
def test_root_gpu_property(monkeypatch, gpus, expected_root_gpu, strategy):
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: 16)
with pytest.deprecated_call(
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
"Please use `Trainer.strategy.root_device.index` instead."
):
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
@pytest.mark.parametrize(
["gpus", "expected_root_gpu", "strategy"],
[
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
],
)
def test_root_gpu_property_0_passing(monkeypatch, gpus, expected_root_gpu, strategy):
monkeypatch.setattr(torch.cuda, "device_count", lambda: 0)
with pytest.deprecated_call(
match="`Trainer.root_gpu` is deprecated in v1.6 and will be removed in v1.8. "
"Please use `Trainer.strategy.root_device.index` instead."
):
assert Trainer(gpus=gpus, strategy=strategy).root_gpu == expected_root_gpu
@pytest.mark.parametrize(
["gpus", "expected_num_gpus", "strategy"],
[
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, 16, "ddp", id="-1 - use all gpus"),
pytest.param("-1", 16, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)"),
],
)
def test_trainer_gpu_parse(monkeypatch, gpus, expected_num_gpus, strategy):
monkeypatch.setattr(torch.cuda, "is_available", lambda: True)
monkeypatch.setattr(torch.cuda, "device_count", lambda: 16)
with pytest.deprecated_call(
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` instead."
):
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
@pytest.mark.parametrize(
["gpus", "expected_num_gpus", "strategy"],
[
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
],
)
def test_trainer_num_gpu_0(monkeypatch, gpus, expected_num_gpus, strategy):
monkeypatch.setattr(torch.cuda, "device_count", lambda: 0)
with pytest.deprecated_call(
match="`Trainer.num_gpus` was deprecated in v1.6 and will be removed in v1.8."
" Please use `Trainer.num_devices` instead."
):
assert Trainer(gpus=gpus, strategy=strategy).num_gpus == expected_num_gpus
| [
"torch.device",
"torch.tensor"
] | 1.8 | valanm22/pytorch-lightning | 5d190eabd28671a6222741f5dd9ee3f214e519b1 |
1.11 | import torch
from luffy.models.palm import *
def test_palm_tony():
model = PaLMTony(num_tokens=20000)
tokens = torch.randint(0, 20000, (1, 2048))
feat = model(tokens)
assert feat.shape == (1, 2048, 20000)
| [
"torch.randint"
] | 1.11.0 | Fei-Wang/dl-pytorch | a7672603e2de7824d0ff7e97b69dedad3fd9d476 |
1.7 | from .predict import predict
import argparse
import sys, multiprocessing
import torch
def _parse_args():
parser=argparse.ArgumentParser(description="Run SolTranNet aqueous solubility predictor")
parser.add_argument('input',nargs='?',type=argparse.FileType('r'),default=sys.stdin,help='PATH to the file containing the SMILES you wish to use. Assumes the content is 1 SMILE per line.')
parser.add_argument('output',nargs='?',type=argparse.FileType('w'),default=sys.stdout,help='Name of the output file. Defaults to stdout.')
parser.add_argument('--batchsize',default=32,type=int,help='Batch size for the data loader. Defaults to 32.')
parser.add_argument('--cpus',default=multiprocessing.cpu_count(),type=int,help='Number of CPU cores to use for the data loader. Defaults to use all available cores. Pass 0 to only run on 1 CPU.')
parser.add_argument('--cpu_predict',action='store_true',help='Flag to force the predictions to be made on only the CPU. Default behavior is to use GPU if available.')
args=parser.parse_args()
return args
def _run(args):
smiles=[x.rstrip() for x in args.input]
if args.cpu_predict:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus,device=torch.device('cpu'))
else:
predictions=predict(smiles,batch_size=args.batchsize,num_workers=args.cpus)
for pred, smi, warn in predictions:
args.output.write(f'{smi},{pred:.3f},{warn}\n')
| [
"torch.device"
] | 1.7.0 | hengwei-chan/molecular_attention_transformer | 29193d4155df528e3a6a0c1e0da39111d0b8db93 |
1.3 | """Utility functions used by PyTorch algorithms."""
import torch
import torch.nn.functional as F
class _Default: # pylint: disable=too-few-public-methods
"""A wrapper class to represent default arguments.
Args:
val (object): Argument value.
"""
def __init__(self, val):
self.val = val
def make_optimizer(optimizer_type, module, **kwargs):
"""Create an optimizer for PyTorch algos.
Args:
optimizer_type (Union[type, tuple[type, dict]]): Type of optimizer.
This can be an optimizer type such as 'torch.optim.Adam' or a
tuple of type and dictionary, where dictionary contains arguments
to initialize the optimizer e.g. (torch.optim.Adam, {'lr' = 1e-3})
module (torch.nn.Module): The module whose parameters needs to be
optimized.
kwargs (dict): Other keyword arguments to initialize optimizer. This
is not used when `optimizer_type` is tuple.
Returns:
torch.optim.Optimizer: Constructed optimizer.
Raises:
ValueError: Raises value error when `optimizer_type` is tuple, and
non-default argument is passed in `kwargs`.
"""
if isinstance(optimizer_type, tuple):
opt_type, opt_args = optimizer_type
for name, arg in kwargs.items():
if not isinstance(arg, _Default):
raise ValueError('Should not specify {} and explicit \
optimizer args at the same time'.format(name))
return opt_type(module.parameters(), **opt_args)
opt_args = {}
for name, arg in kwargs.items():
if isinstance(arg, _Default):
opt_args[name] = arg.val
else:
opt_args[name] = arg
return optimizer_type(module.parameters(), **opt_args)
def compute_advantages(discount, gae_lambda, max_path_length, baselines,
rewards):
"""Calculate advantages.
Advantages are a discounted cumulative sum.
Calculate advantages using a baseline (value function) according to
Generalized Advantage Estimation (GAE)
The discounted cumulative sum can be computed using conv2d with filter.
filter:
[1, (discount * gae_lambda), (discount * gae_lambda) ^ 2, ...]
where the length is same with max_path_length.
baselines and rewards are also has same shape.
baselines:
[ [b_11, b_12, b_13, ... b_1n],
[b_21, b_22, b_23, ... b_2n],
...
[b_m1, b_m2, b_m3, ... b_mn] ]
rewards:
[ [r_11, r_12, r_13, ... r_1n],
[r_21, r_22, r_23, ... r_2n],
...
[r_m1, r_m2, r_m3, ... r_mn] ]
Args:
discount (float): RL discount factor (i.e. gamma).
gae_lambda (float): Lambda, as used for Generalized Advantage
Estimation (GAE).
max_path_length (int): Maximum length of a single rollout.
baselines (torch.Tensor): A 2D vector of value function estimates with
shape (N, T), where N is the batch dimension (number of episodes)
and T is the maximum path length experienced by the agent. If an
episode terminates in fewer than T time steps, the remaining
elements in that episode should be set to 0.
rewards (torch.Tensor): A 2D vector of per-step rewards with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining elements in
that episode should be set to 0.
Returns:
torch.Tensor: A 2D vector of calculated advantage values with shape
(N, T), where N is the batch dimension (number of episodes) and T
is the maximum path length experienced by the agent. If an episode
terminates in fewer than T time steps, the remaining values in that
episode should be set to 0.
"""
adv_filter = torch.full((1, 1, 1, max_path_length - 1),
discount * gae_lambda)
adv_filter = torch.cumprod(F.pad(adv_filter, (1, 0), value=1), dim=-1)
deltas = (rewards + discount * F.pad(baselines, (0, 1))[:, 1:] - baselines)
deltas = F.pad(deltas, (0, max_path_length - 1)).unsqueeze(0).unsqueeze(0)
advantages = F.conv2d(deltas, adv_filter, stride=1).squeeze()
return advantages
def pad_to_last(nums, total_length, axis=-1, val=0):
"""Pad val to last in nums in given axis.
length of the result in given axis should be total_length.
Raises:
IndexError: If the input axis value is out of range of the nums array
Args:
nums (numpy.ndarray): The array to pad.
total_length (int): The final width of the Array.
axis (int): Axis along which a sum is performed.
val (int): The value to set the padded value.
Returns:
torch.Tensor: Padded array
"""
tensor = torch.Tensor(nums)
axis = (axis + len(tensor.shape)) if axis < 0 else axis
if len(tensor.shape) <= axis:
raise IndexError('axis {} is out of range {}'.format(
axis, tensor.shape))
padding_config = [0, 0] * len(tensor.shape)
padding_idx = abs(axis - len(tensor.shape)) * 2 - 1
padding_config[padding_idx] = max(total_length - tensor.shape[axis], val)
return F.pad(tensor, padding_config)
def filter_valids(tensor, valids):
"""Filter out tensor using valids (last index of valid tensors).
valids contains last indices of each rows.
Args:
tensor (torch.Tensor): The tensor to filter
valids (list[int]): Array of length of the valid values
Returns:
torch.Tensor: Filtered Tensor
"""
return [tensor[i][:valids[i]] for i in range(len(valids))]
| [
"torch.nn.functional.pad",
"torch.full",
"torch.Tensor",
"torch.nn.functional.conv2d"
] | 1.3.0 | adibellathur/garage | 482a26a07d46091f878c41b582f1478588e397ff |
1.0 | # Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import condensa
from condensa import schemes
def test_float16(device):
scheme = schemes.Quantize(condensa.float16)
fc = torch.nn.Linear(100, 10).float().to(device)
scheme.pi(fc)
assert fc.weight.dtype == torch.float16
scheme.delta(fc)
assert fc.weight.dtype == torch.float32
if __name__ == '__main__':
test_float16('cpu')
if torch.cuda.is_available():
test_float16('cpu')
| [
"torch.nn.Linear",
"torch.cuda.is_available"
] | 1.0.0 | stormymcstorm/condensa | c7321e0a362f73eca9349769b341a7dd688ee1b9 |
0.4 | from easydict import EasyDict as edict
# from pathlib import Path
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
#when inference, at maximum detect 10 faces in one image, my laptop is slow
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se' # or 'ir'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir' # or 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
# conf.num_workers = 4 # when batchsize is 200
conf.num_workers = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf | [
"torch.cuda.is_available"
] | 0.4.0 | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c |
0.4 | from network import PNet,ONet
import torch,cv2,itertools
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import time
from matlab_cp2tform import get_similarity_transform_for_cv2
import math
def alignment(src_img,src_pts, crop_size = (112, 112)):
ref_pts = np.array([ [30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041] ])
if crop_size[1]==112:
ref_pts[:,0] += 8.0
src_pts = np.array(src_pts).reshape(5,2)
s = np.array(src_pts).astype(np.float32)
r = np.array(ref_pts).astype(np.float32)
tfm = get_similarity_transform_for_cv2(s, r)
face_img = cv2.warpAffine(src_img, tfm, crop_size)
return face_img
def resize_square(img, height=128, color=(0, 0, 0)): # resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = [round(shape[0] * ratio), round(shape[1] * ratio)]
dw = height - new_shape[1] # width padding
dh = height - new_shape[0] # height padding
top, bottom = dh // 2, dh - (dh // 2)
left, right = dw // 2, dw - (dw // 2)
img = cv2.resize(img, (new_shape[1], new_shape[0]), interpolation=cv2.INTER_AREA) # resized, no border
return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color), ratio, dw // 2, dh // 2
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
def get_anchors(scale=64):
'''
compute anchors
return:
u_boxes:tensor([anchor_num,4]) (cx,cy,w,h): real anchors
boxes:tensor([anchor_num,4]) (x1,y1,x2,y2): crop box for ONet,each with size 80
'''
sizes = [float(s) / scale for s in [32]]
aspect_ratios = [(1.,)]
feature_map_sizes = [int(scale/16)]
num_layers = len(feature_map_sizes)
u_boxes,boxes = [],[]
for i in range(num_layers):
fmsize = feature_map_sizes[i]
for h,w in itertools.product(range(fmsize),repeat=2):
cx = float(w)/feature_map_sizes[i]
cy = float(h)/feature_map_sizes[i]
s = sizes[i]
for j,ar in enumerate(aspect_ratios[i]):
u_boxes.append((cx,cy,float(s)*ar,float(s)*ar))
boxes.append((w*16-32,h*16-32,w*16+32,h*16+32))
return torch.Tensor(u_boxes),torch.Tensor(boxes).long()
def nms(bboxes,scores,threshold=0.35):
'''
bboxes(tensor) [N,4]
scores(tensor) [N,]
'''
x1 = bboxes[:,0]
y1 = bboxes[:,1]
x2 = bboxes[:,2]
y2 = bboxes[:,3]
areas = (x2-x1) * (y2-y1)
_,order = scores.sort(0,descending=True)
keep = []
while order.numel() > 0:
if order.numel() == 1:
i = order.item()
else:
i = order[0].item()
keep.append(i)
if order.numel() == 1:
break
xx1 = x1[order[1:]].clamp(min=x1[i])
yy1 = y1[order[1:]].clamp(min=y1[i])
xx2 = x2[order[1:]].clamp(max=x2[i])
yy2 = y2[order[1:]].clamp(max=y2[i])
w = (xx2-xx1).clamp(min=0)
h = (yy2-yy1).clamp(min=0)
inter = w*h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
ids = (ovr<=threshold).nonzero().squeeze()
if ids.numel() == 0:
break
order = order[ids+1]
return torch.LongTensor(keep)
def decode_box(loc, size=64):
variances = [0.1,0.2]
anchor,crop = get_anchors(scale=size)
cxcy = loc[:,:2] * variances[0] * anchor[:,2:] + anchor[:,:2]
wh = torch.exp(loc[:,2:] * variances[1]) * anchor[:,2:]
boxes = torch.cat([cxcy-wh/2,cxcy+wh/2],1)
return boxes,anchor,crop
def decode_ldmk(ldmk,anchor):
variances = [0.1,0.2]
index_x = torch.Tensor([0,2,4,6,8]).long()
index_y = torch.Tensor([1,3,5,7,9]).long()
ldmk[:,index_x] = ldmk[:,index_x] * variances[0] * anchor[:,2].view(-1,1) + anchor[:,0].view(-1,1)
ldmk[:,index_y] = ldmk[:,index_y] * variances[0] * anchor[:,3].view(-1,1) + anchor[:,1].view(-1,1)
return ldmk
import os
# list_per = []
def detect(file, pic = None):
def change(boxes,ldmks, h, w, pad1):
index_x = torch.LongTensor([0,2,4,6,8])
index_y = torch.LongTensor([1,3,5,7,9])
if h <= w:
boxes[:,1] = boxes[:,1]*w-pad1
boxes[:,3] = boxes[:,3]*w-pad1
boxes[:,0] = boxes[:,0]*w
boxes[:,2] = boxes[:,2]*w
ldmks[:,index_x] = ldmks[:,index_x] * w
ldmks[:,index_y] = ldmks[:,index_y] * w - torch.Tensor([pad1])
else:
boxes[:,1] = boxes[:,1]*h
boxes[:,3] = boxes[:,3]*h
boxes[:,0] = boxes[:,0]*h-pad1
boxes[:,2] = boxes[:,2]*h-pad1
ldmks[:,index_x] = ldmks[:,index_x] * h - torch.Tensor([pad1])
ldmks[:,index_y] = ldmks[:,index_y] * h
return boxes,ldmks
if not isinstance(file, np.ndarray):
im = cv2.imread(file)
else:
im = file
if im is None:
print("can not open image:", file)
return
# pad img to square
h, w,_ = im.shape
dim_diff = np.abs(h - w)
pad1, pad2 = dim_diff //2, dim_diff - dim_diff // 2
pad = ((pad1,pad2),(0,0),(0,0)) if h<=w else ((0,0),(pad1, pad2),(0,0))
img = np.pad(im, pad,'constant', constant_values=128)
#get img_pyramid
img_scale, img_size = 0,int((img.shape[0]-1)/32)
while img_size > 0:
img_scale += 1
img_size /= 2
if img_scale == 6:
break
img_size = 64
img_pyramid = []
t_boxes,t_probs, t_anchors, t_crops, t_which = None, None, None, None, None
for scale in range(4):
# print('scale:{0} img_size:{1}'.format(scale, img_size))
input_img = cv2.resize(img,(img_size, img_size))
img_pyramid.append(input_img.transpose(2,0,1))
im_tensor = torch.from_numpy(input_img.transpose(2,0,1)).float()
if use_gpu:
im_tensor = im_tensor.cuda()
#get conf and loc(box)
if use_gpu:
torch.cuda.synchronize()
loc,conf = pnet(torch.unsqueeze(im_tensor,0))
if use_gpu:
torch.cuda.synchronize()
# print('forward time:{}s'.format(e_t-s_t))
loc, conf = loc.detach().cpu(),conf.detach().cpu()
loc, conf = loc.data.squeeze(0),F.softmax(conf.squeeze(0))
boxes, anchor, crop = decode_box(loc,size=img_size)
which_img = torch.tensor([scale]).long().expand((crop.shape[0],))
#add box into stack
if scale == 0:
t_boxes, t_confs, t_anchors, t_crops, t_which = boxes, conf, anchor, crop, which_img
else:
t_boxes = torch.cat((t_boxes, boxes),0)
t_confs = torch.cat((t_confs, conf),0)
t_anchors = torch.cat((t_anchors, anchor),0)
t_crops = torch.cat((t_crops, crop),0)
t_which = torch.cat((t_which, which_img),0)
img_size *= 2
#get right boxes and nms
t_confs[:,0] = 0.6
max_conf, labels = t_confs.max(1)
if labels.long().sum().item() is 0:
return None
ids = labels.nonzero().squeeze(1)
t_boxes, t_confs, t_anchors, t_crops, t_which = t_boxes[ids], t_confs[ids], t_anchors[ids], t_crops[ids], t_which[ids]
max_conf = max_conf[ids]
keep = nms(t_boxes, max_conf)
t_boxes, max_conf, t_anchors, t_crops, t_which = t_boxes[keep], max_conf[keep], t_anchors[keep], t_crops[keep], t_which[keep]
t_boxes = t_boxes.detach().numpy()
max_conf = max_conf.detach().numpy()
#get crop and ldmks
crop_imgs = []
for i in range(t_boxes.shape[0]):
img = img_pyramid[t_which[i]]
crop = t_crops[i].numpy()
_,h_,w_ = img.shape
o_x1,o_y1,o_x2,o_y2 = max(crop[0],0),max(crop[1],0),min(crop[2],w_),min(crop[3],h_)
c_x1 = 0 if crop[0] >=0 else -crop[0]
c_y1 = 0 if crop[1] >=0 else -crop[1]
c_x2 = 64 if crop[2] <= w_ else 64 - (crop[2] - w_)
c_y2 = 64 if crop[3] <= h_ else 64 - (crop[3] - h_)
crop_img = np.ones((3,64,64))*128
np.copyto(crop_img[:,c_y1:c_y2,c_x1:c_x2],img[:,o_y1:o_y2,o_x1:o_x2])
crop_imgs.append(crop_img)
crop_imgs = torch.from_numpy(np.array(crop_imgs)).float()
if use_gpu:
crop_imgs = crop_imgs.cuda()
t_ldmks = onet(crop_imgs).detach().cpu()[:,10,:].squeeze(1)
t_ldmks = decode_ldmk(t_ldmks, t_anchors)
t_boxes, t_ldmks = change(t_boxes,t_ldmks, h, w, pad1)
t_faces = []
for i in range(len(t_boxes)):
box, prob, ldmk = t_boxes[i], max_conf[i], t_ldmks[i]
if prob <= 0.7:
continue
ldmk_fn = ldmk.reshape(5,2)
x1 = min(int(box[0])-5, 0)
x2 = min(int(box[2]) -5, 0)
y1 = max(int(box[1])+5, im.shape[1])
y2 = max(int(box[3])+5, im.shape[2])
face = alignment(im, ldmk_fn)
cv2.rectangle(im, (x1,y1),(x2,y2), (255,0,0), 1)
cv2.imwrite('a.png',im)
t_faces.append(face)
return t_boxes, t_faces
import glob, tqdm
class Face_Alignt():
def __init__(self, use_gpu = True):
pnet,onet = PNet(),ONet()
pnet.load_state_dict(torch.load('weight/msos_pnet_rotate.pt',map_location=lambda storage, loc:storage), strict=False)
onet.load_state_dict(torch.load('weight/msos_onet_rotate.pt',map_location=lambda storage, loc:storage), strict=False)
onet.float()
pnet.eval()
onet.eval()
if use_gpu:
torch.cuda.set_device(0)
pnet.cuda()
onet.cuda()
else:
torch.set_num_threads(1)
def align_multi(img, limit=None, min_face_size=30.0):
return detect(img) | [
"torch.cat",
"torch.cuda.synchronize",
"torch.unsqueeze",
"torch.cuda.set_device",
"torch.LongTensor",
"torch.tensor",
"torch.load",
"torch.Tensor",
"torch.exp",
"torch.set_num_threads"
] | 0.4.0 | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c |
1.6 | import torch
import numpy as np
import SimpleITK as sitk
from Phys_Seg.data_loading import load_and_preprocess, save_segmentation_nifti, read_file, save_img
from Phys_Seg.predict_case import predict_phys_seg, physics_preprocessing, image_preprocessing
import importlib
from Phys_Seg.utils import postprocess_prediction, get_params_fname, maybe_download_parameters
from network_architecture import nnUNet
import os
import Phys_Seg
def apply_phys_seg(img, out_fname):
img_itk = sitk.ReadImage(img)
img_npy = sitk.GetArrayFromImage(img_itk)
out = sitk.GetImageFromArray(img_npy)
out.CopyInformation(img_itk)
sitk.WriteImage(out, out_fname)
def run_phys_seg(mri_fnames, output_fnames, sequence='MPRAGE', physics_params=None,
# config_file=os.path.join(Phys_Seg.__path__[0], "config.py"),
device=None, overwrite=True):
"""
:param mri_fnames: str or list/tuple of str
:param output_fnames: str or list/tuple of str. If list: must have the same length as output_fnames
:param sequence: MPRAGE or SPGR (for now)
:param config_file: config.py
:param device: either int (for device id) or 'cpu'
:param overwrite: True or False
:param postprocess: whether to do postprocessing or not. Postprocessing here consists of simply discarding all
but the largest predicted connected component. Default False
:return:
"""
physics_input_size = {'MPRAGE': 4,
'SPGR': 6}
# Load in model weights
maybe_download_parameters(sequence=sequence, physics_flag=True if physics_params else False)
params_file = get_params_fname(sequence=sequence, physics_flag=True if physics_params else False)
net = nnUNet(1, 4, physics_flag=True if physics_params else False,
physics_input=physics_input_size[sequence],
physics_output=40)
if device == "cpu":
net = net.cpu()
else:
net.cuda(device)
net = torch.nn.DataParallel(net, device_ids=[device, int(1-device)])
net.to(f'cuda:{net.device_ids[0]}')
# net = torch.nn.DataParallel(net)
if not isinstance(mri_fnames, (list, tuple)):
mri_fnames = [mri_fnames]
if not isinstance(output_fnames, (list, tuple)):
output_fnames = [output_fnames]
params = torch.load(params_file, map_location=lambda storage, loc: storage)
for in_fname, out_fname in zip(mri_fnames, output_fnames):
if overwrite or not (os.path.isfile(out_fname)):
print("File:", in_fname)
print("preprocessing...")
try:
data, aff = read_file(in_fname)
except RuntimeError:
print("\nERROR\nCould not read file", in_fname, "\n")
continue
except AssertionError as e:
print(e)
continue
# Process data
if physics_params is not None:
physics_params = eval(physics_params)
# Convert TR to pTD
physics_params[1] = physics_params[1] - physics_params[0]
print(physics_params)
processed_physics = physics_preprocessing(np.array(physics_params), sequence)
else:
processed_physics = None
data = image_preprocessing(patient_data=data)
print("prediction (CNN id)...")
net.load_state_dict(params['model_state_dict'])
net.eval()
seg = predict_phys_seg(net=net,
patient_data=data,
processed_physics=processed_physics,
main_device=device)
print("exporting segmentation...")
save_segmentation_nifti(seg, aff, out_fname)
# apply_phys_seg(in_fname, out_fname)
| [
"torch.load"
] | 1.6.0 | pedrob37/Phys_Seg | 7adc65d7b228b3a5702acfa9e6d0494d6b4c2dee |
1.8 | import math
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from torchreid.losses import AngleSimpleLinear
from torchreid.ops import Dropout, EvalModeSetter, rsc
from .common import HSigmoid, HSwish, ModelInterface, make_divisible
import timm
from torchreid.integration.nncf.compression import get_no_nncf_trace_context_manager, nullcontext
__all__ = ['mobilenetv3_large', 'mobilenetv3_large_075', 'mobilenetv3_small', 'mobilenetv3_large_150',
'mobilenetv3_large_125']
pretrained_urls = {
'mobilenetv3_small':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-small-55df8e1f.pth?raw=true',
'mobilenetv3_large':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-1cd25616.pth?raw=true',
'mobilenetv3_large_075':
'https://github.com/d-li14/mobilenetv3.pytorch/blob/master/pretrained/mobilenetv3-large-0.75-9632d2a8.pth?raw=true',
'mobilenetv3_large_21k':
'https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/mobilenetv3_large_100_miil_21k.pth'
}
SHOULD_NNCF_SKIP_SE_LAYERS = False
SHOULD_NNCF_SKIP_HEAD = False
no_nncf_se_layer_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_SE_LAYERS else nullcontext
no_nncf_head_context = get_no_nncf_trace_context_manager() if SHOULD_NNCF_SKIP_HEAD else nullcontext
class SELayer(nn.Module):
def __init__(self, channel, reduction=4):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, make_divisible(channel // reduction, 8)),
nn.ReLU(inplace=True),
nn.Linear(make_divisible(channel // reduction, 8), channel),
HSigmoid()
)
def forward(self, x):
with no_nncf_se_layer_context():
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
def conv_3x3_bn(inp, oup, stride, IN_conv1=False):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup) if not IN_conv1 else nn.InstanceNorm2d(oup, affine=True),
HSwish()
)
def conv_1x1_bn(inp, oup, loss='softmax'):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
HSwish() if loss == 'softmax' else nn.PReLU()
)
class InvertedResidual(nn.Module):
def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
self.identity = stride == 1 and inp == oup
if inp == hidden_dim:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
# Squeeze-and-Excite
SELayer(hidden_dim) if use_se else nn.Identity(),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
HSwish() if use_hs else nn.ReLU(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
# Squeeze-and-Excite
SELayer(hidden_dim) if use_se else nn.Identity(),
HSwish() if use_hs else nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.identity:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV3(ModelInterface):
def __init__(self,
cfgs,
mode,
IN_conv1=False,
num_classes=1000,
width_mult=1.,
in_channels=3,
input_size=(224, 224),
dropout_cls = None,
pooling_type='avg',
IN_first=False,
self_challenging_cfg=False,
**kwargs):
super().__init__(**kwargs)
self.in_size = input_size
self.num_classes = num_classes
self.input_IN = nn.InstanceNorm2d(in_channels, affine=True) if IN_first else None
self.pooling_type = pooling_type
self.self_challenging_cfg = self_challenging_cfg
self.width_mult = width_mult
self.dropout_cls = dropout_cls
# setting of inverted residual blocks
self.cfgs = cfgs
assert mode in ['large', 'small']
# building first layer
input_channel = make_divisible(16 * self.width_mult, 8)
stride = 1 if self.in_size[0] < 100 else 2
layers = [conv_3x3_bn(3, input_channel, stride, IN_conv1)]
# building inverted residual blocks
block = InvertedResidual
flag = True
for k, t, c, use_se, use_hs, s in self.cfgs:
if (self.in_size[0] < 100) and (s == 2) and flag:
s = 1
flag = False
output_channel = make_divisible(c * self.width_mult, 8)
exp_size = make_divisible(input_channel * t, 8)
layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
input_channel = output_channel
self.features = nn.Sequential(*layers)
self.num_features = exp_size
# building last several layers
self.conv = conv_1x1_bn(input_channel, exp_size, self.loss)
output_channel = {'large': 1280, 'small': 1024}
output_channel = make_divisible(output_channel[mode] * self.width_mult, 8) if self.width_mult > 1.0 else output_channel[mode]
if self.loss == 'softmax' or self.loss == 'asl':
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
HSwish(),
Dropout(**self.dropout_cls),
nn.Linear(output_channel, self.num_classes),
)
else:
assert self.loss in ['am_softmax', 'am_binary']
self.classifier = nn.Sequential(
nn.Linear(exp_size, output_channel),
nn.BatchNorm1d(output_channel),
nn.PReLU(),
Dropout(**self.dropout_cls),
AngleSimpleLinear(output_channel, self.num_classes),
)
self._initialize_weights()
self.forward = autocast(self.mix_precision)(self.forward)
def extract_features(self, x):
y = self.conv(self.features(x))
return y
def infer_head(self, x, skip_pool=False):
if not skip_pool:
glob_features = self._glob_feature_vector(x, self.pooling_type, reduce_dims=False)
else:
glob_features = x
logits = self.classifier(glob_features.view(x.shape[0], -1))
return glob_features, logits
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x, return_featuremaps=False, get_embeddings=False, gt_labels=None):
if self.input_IN is not None:
x = self.input_IN(x)
y = self.extract_features(x)
if return_featuremaps:
return y
with no_nncf_head_context():
glob_features, logits = self.infer_head(y, skip_pool=False)
if self.training and self.self_challenging_cfg.enable and gt_labels is not None:
glob_features = rsc(
features = glob_features,
scores = logits,
labels = gt_labels,
retain_p = 1.0 - self.self_challenging_cfg.drop_p,
retain_batch = 1.0 - self.self_challenging_cfg.drop_batch_p
)
with EvalModeSetter([self.output], m_type=(nn.BatchNorm1d, nn.BatchNorm2d)):
_, logits = self.infer_head(x, skip_pool=True)
if not self.training and self.is_classification():
return [logits]
if get_embeddings:
out_data = [logits, glob_features]
elif self.loss in ['softmax', 'am_softmax', 'asl', 'am_binary']:
out_data = [logits]
elif self.loss in ['triplet']:
out_data = [logits, glob_features]
else:
raise KeyError("Unsupported loss: {}".format(self.loss))
return tuple(out_data)
def init_pretrained_weights(model, key='', **kwargs):
"""Initializes model with pretrained weights.
Layers that don't match with pretrained layers in name or size are kept unchanged.
"""
import os
import errno
import gdown
from torchreid.utils import load_pretrained_weights
def _get_torch_home():
ENV_TORCH_HOME = 'TORCH_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
torch_home = os.path.expanduser(
os.getenv(
ENV_TORCH_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'
)
)
)
return torch_home
torch_home = _get_torch_home()
model_dir = os.path.join(torch_home, 'checkpoints')
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
filename = key + '_imagenet.pth'
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
gdown.download(pretrained_urls[key], cached_file)
model = load_pretrained_weights(model, cached_file, **kwargs)
def mobilenetv3_large_075(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult =.75, **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large_075')
return net
def mobilenetv3_large(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_large')
return net
def mobilenetv3_large_150(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.5, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_large_125(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Large model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 0, 0, 1],
[3, 4, 24, 0, 0, 2],
[3, 3, 24, 0, 0, 1],
[5, 3, 40, 1, 0, 2],
[5, 3, 40, 1, 0, 1],
[5, 3, 40, 1, 0, 1],
[3, 6, 80, 0, 1, 2],
[3, 2.5, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 2.3, 80, 0, 1, 1],
[3, 6, 112, 1, 1, 1],
[3, 6, 112, 1, 1, 1],
[5, 6, 160, 1, 1, 2],
[5, 6, 160, 1, 1, 1],
[5, 6, 160, 1, 1, 1]
]
net = MobileNetV3(cfgs, mode='large', width_mult = 1.25, **kwargs)
if pretrained:
raise NotImplementedError("The weights for this configuration are not available")
return net
def mobilenetv3_small(pretrained=False, **kwargs):
"""
Constructs a MobileNetV3-Small model
"""
cfgs = [
# k, t, c, SE, HS, s
[3, 1, 16, 1, 0, 2],
[3, 4.5, 24, 0, 0, 2],
[3, 3.67, 24, 0, 0, 1],
[5, 4, 40, 1, 1, 2],
[5, 6, 40, 1, 1, 1],
[5, 6, 40, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 3, 48, 1, 1, 1],
[5, 6, 96, 1, 1, 2],
[5, 6, 96, 1, 1, 1],
[5, 6, 96, 1, 1, 1],
]
net = MobileNetV3(cfgs, mode='small', width_mult = 1., **kwargs)
if pretrained:
init_pretrained_weights(net, key='mobilenetv3_small')
return net
| [
"torch.nn.Linear",
"torch.nn.Identity",
"torch.cuda.amp.autocast",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.PReLU",
"torch.nn.BatchNorm1d",
"torch.nn.InstanceNorm2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.8.1 | daniil-lyakhov/deep-object-reid | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 |
1.8 | from __future__ import absolute_import, division, print_function
from enum import auto
import torch
import torch.nn.functional as F
from torch import nn
from torch.cuda.amp import GradScaler, autocast
from torchreid import metrics
from torchreid.losses import AsymmetricLoss, AMBinaryLoss
from torchreid.metrics.accuracy import accuracy
from torchreid.optim import SAM
from ..engine import Engine
class MultilabelEngine(Engine):
r"""Multilabel classification engine. It supports ASL, BCE and Angular margin loss with binary classification."""
def __init__(self, datamanager, models, optimizers, schedulers, use_gpu, save_all_chkpts,
train_patience, early_stoping, lr_decay_factor, loss_name, label_smooth,
lr_finder, m, s, sym_adjustment, auto_balance, amb_k, amb_t, clip_grad,
should_freeze_aux_models, nncf_metainfo, initial_lr,
target_metric, use_ema_decay, ema_decay, asl_gamma_pos, asl_gamma_neg, asl_p_m,
mix_precision, **kwargs):
super().__init__(datamanager,
models=models,
optimizers=optimizers,
schedulers=schedulers,
use_gpu=use_gpu,
save_all_chkpts=save_all_chkpts,
train_patience=train_patience,
lr_decay_factor=lr_decay_factor,
early_stoping=early_stoping,
should_freeze_aux_models=should_freeze_aux_models,
nncf_metainfo=nncf_metainfo,
initial_lr=initial_lr,
lr_finder=lr_finder,
target_metric=target_metric,
use_ema_decay=use_ema_decay,
ema_decay=ema_decay)
self.main_losses = nn.ModuleList()
self.clip_grad = clip_grad
num_classes = self.datamanager.num_train_pids
if not isinstance(num_classes, (list, tuple)):
num_classes = [num_classes]
self.num_classes = num_classes
for _ in enumerate(self.num_classes):
if loss_name == 'asl':
self.main_losses.append(AsymmetricLoss(
gamma_neg=asl_gamma_neg,
gamma_pos=asl_gamma_pos,
probability_margin=asl_p_m,
label_smooth=label_smooth,
))
elif loss_name == 'bce':
self.main_losses.append(AsymmetricLoss(
gamma_neg=0,
gamma_pos=0,
probability_margin=0,
label_smooth=label_smooth,
))
elif loss_name == 'am_binary':
self.main_losses.append(AMBinaryLoss(
m=m,
k=amb_k,
t=amb_t,
s=s,
sym_adjustment=sym_adjustment,
auto_balance=auto_balance,
gamma_neg=asl_gamma_neg,
gamma_pos=asl_gamma_pos,
label_smooth=label_smooth,
))
num_classes = self.datamanager.num_train_pids
if not isinstance(num_classes, (list, tuple)):
num_classes = [num_classes]
self.num_classes = num_classes
self.num_targets = len(self.num_classes)
self.enable_sam = isinstance(self.optims[self.main_model_name], SAM)
for model_name in self.get_model_names():
assert isinstance(self.optims[model_name], SAM) == self.enable_sam, "SAM must be enabled \
for all models or none of them"
self.scaler = GradScaler(enabled=mix_precision)
self.prev_smooth_top1 = 0.
self.forward_backward = autocast(mix_precision)(self.forward_backward)
def forward_backward(self, data):
n_iter = self.epoch * self.num_batches + self.batch_idx
train_records = self.parse_data_for_train(data, output_dict=True, use_gpu=self.use_gpu)
imgs, obj_ids = train_records['img'], train_records['obj_id']
model_names = self.get_model_names()
num_models = len(model_names)
steps = [1,2] if self.enable_sam and not self.lr_finder else [1]
# forward pass
for step in steps:
# if sam is enabled then statistics will be written each step, but will be saved only the second time
# this is made just for convinience
avg_acc = 0.0
out_logits = [[] for _ in range(self.num_targets)]
total_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)
loss_summary = dict()
for model_name in model_names:
self.optims[model_name].zero_grad()
model_loss, model_loss_summary, model_avg_acc, model_logits = self._single_model_losses(
self.models[model_name], train_records, imgs, obj_ids, n_iter, model_name)
avg_acc += model_avg_acc / float(num_models)
total_loss += model_loss / float(num_models)
loss_summary.update(model_loss_summary)
for trg_id in range(self.num_targets):
if model_logits[trg_id] is not None:
out_logits[trg_id].append(model_logits[trg_id])
model_num = len(model_names)
# compute mutual loss
if len(model_names) > 1:
mutual_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)
for trg_id in range(self.num_targets):
if len(out_logits[trg_id]) <= 1:
continue
for model_i, logits_i in enumerate(out_logits[trg_id]):
probabilities_i = torch.sigmoid(logits_i)
kl_loss = 0
for model_j, logits_j in enumerate(out_logits[trg_id]):
if model_i != model_j:
probabilities_j = torch.sigmoid(logits_j)
kl_loss += self.kl_div_binary(probabilities_i, probabilities_j)
mutual_loss += kl_loss / (model_num - 1)
loss_summary['mutual_{}/{}'.format(trg_id, model_names[model_i])] = mutual_loss.item()
should_turn_off_mutual_learning = self._should_turn_off_mutual_learning(self.epoch)
coeff_mutual_learning = int(not should_turn_off_mutual_learning)
total_loss += coeff_mutual_learning * mutual_loss
# backward pass
self.scaler.scale(total_loss).backward(retain_graph=False)
for model_name in model_names:
if self.clip_grad != 0 and step == 1:
self.scaler.unscale_(self.optims[model_name])
torch.nn.utils.clip_grad_norm_(self.models[model_name].parameters(), self.clip_grad)
if not self.enable_sam and step == 1:
self.scaler.step(self.optims[model_name])
self.scaler.update()
elif step == 1:
assert self.enable_sam
if self.clip_grad == 0:
# if self.clip_grad == 0 this means that unscale_ wasn't applied
# unscale parameters to perform SAM manipulations with parameters
self.scaler.unscale_(self.optims[model_name])
overflow = self.optims[model_name].first_step(self.scaler)
self.scaler.update() # update scaler after first step
if overflow:
print("Overflow occurred. Skipping step ...")
loss_summary['loss'] = total_loss.item()
# skip second step if overflow occurred
return loss_summary, avg_acc
else:
assert self.enable_sam and step==2
if self.clip_grad == 0:
self.scaler.unscale_(self.optims[model_name])
self.optims[model_name].second_step()
self.scaler.update()
loss_summary['loss'] = total_loss.item()
return loss_summary, avg_acc
def _single_model_losses(self, model, train_records, imgs, obj_ids, n_iter, model_name):
model_output = model(imgs)
all_logits = self._parse_model_output(model_output)
total_loss = torch.zeros([], dtype=imgs.dtype, device=imgs.device)
out_logits = []
loss_summary = dict()
num_trg_losses = 0
avg_acc = 0
for trg_id in range(self.num_targets):
trg_mask = train_records['dataset_id'] == trg_id
trg_obj_ids = obj_ids[trg_mask]
trg_num_samples = trg_obj_ids.numel()
if trg_num_samples == 0:
out_logits.append(None)
continue
trg_logits = all_logits[trg_id][trg_mask]
main_loss = self.main_losses[trg_id](trg_logits, trg_obj_ids)
avg_acc += metrics.accuracy_multilabel(trg_logits, trg_obj_ids).item()
loss_summary['main_{}/{}'.format(trg_id, model_name)] = main_loss.item()
scaled_trg_logits = self.main_losses[trg_id].get_last_scale() * trg_logits
out_logits.append(scaled_trg_logits)
total_loss += main_loss
num_trg_losses += 1
total_loss /= float(num_trg_losses)
avg_acc /= float(num_trg_losses)
return total_loss, loss_summary, avg_acc, out_logits
def kl_div_binary(self, x, y):
''' compute KL divergence between two tensors represented
independent binary distributions'''
# get binary distributions for two models with shape = (BxCx2)
p = torch.stack((x, (1-x))).permute(1,2,0)
q = torch.stack((y, (1-y))).permute(1,2,0)
# log probabilities
p_log = torch.log(p.add_(1e-8))
# compute true KLDiv for each sample, than do the batchmean reduction
return F.kl_div(p_log, q, reduction='none').sum(2).div_(x.size(1)).sum().div_(x.size(0))
def _parse_model_output(self, model_output):
all_logits = model_output[0] if isinstance(model_output, (tuple, list)) else model_output
all_logits = all_logits if isinstance(all_logits, (tuple, list)) else [all_logits]
return all_logits
def exit_on_plateau_and_choose_best(self, top1, smooth_top1):
'''
The function returns a pair (should_exit, is_candidate_for_best).
The function sets this checkpoint as a candidate for best if either it is the first checkpoint
for this LR or this checkpoint is better then the previous best.
The function sets should_exit = True if the overfitting is observed or the metric
doesn't improves for a predetermined number of epochs.
'''
should_exit = False
is_candidate_for_best = False
current_metric = round(top1, 4)
if smooth_top1 <= self.prev_smooth_top1:
self.iter_to_wait += 1
if self.iter_to_wait >= self.train_patience:
print("The training should be stopped due to no improvements for {} epochs".format(self.train_patience))
should_exit = True
else:
self.iter_to_wait = 0
if current_metric >= self.best_metric:
self.best_metric = current_metric
is_candidate_for_best = True
self.prev_smooth_top1 = smooth_top1
return should_exit, is_candidate_for_best
| [
"torch.zeros",
"torch.sigmoid",
"torch.cuda.amp.autocast",
"torch.stack",
"torch.nn.ModuleList",
"torch.nn.functional.kl_div",
"torch.cuda.amp.GradScaler"
] | 1.8.1 | daniil-lyakhov/deep-object-reid | b0f7d6a2d4cff8c417a66d82c09d16788d81ec67 |
1.6 | import torch
import torch.nn as nn
import torch.nn.functional as F
def _create_activation(activation_type):
if activation_type == 'relu':
return torch.relu
elif activation_type == 'swish':
return lambda x: x * torch.sigmoid(x)
raise ValueError('invalid activation_type.')
def create_encoder(observation_shape,
action_size=None,
use_batch_norm=False,
discrete_action=False,
activation_type='relu',
**kwargs):
activation = _create_activation(activation_type)
if len(observation_shape) == 3:
# pixel input
if action_size is not None:
return PixelEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return PixelEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
elif len(observation_shape) == 1:
# vector input
if action_size is not None:
return VectorEncoderWithAction(observation_shape,
action_size,
use_batch_norm=use_batch_norm,
discrete_action=discrete_action,
activation=activation,
**kwargs)
return VectorEncoder(observation_shape,
use_batch_norm=use_batch_norm,
activation=activation,
**kwargs)
else:
raise ValueError('observation_shape must be 1d or 3d.')
class PixelEncoder(nn.Module):
def __init__(self,
observation_shape,
filters=None,
feature_size=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
# default architecture is based on Nature DQN paper.
if filters is None:
filters = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
if feature_size is None:
feature_size = 512
self.observation_shape = observation_shape
self.use_batch_norm = use_batch_norm
self.activation = activation
self.feature_size = feature_size
# convolutional layers
in_channels = [observation_shape[0]] + [f[0] for f in filters[:-1]]
self.convs = nn.ModuleList()
self.conv_bns = nn.ModuleList()
for in_channel, f in zip(in_channels, filters):
out_channel, kernel_size, stride = f
conv = nn.Conv2d(in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride)
self.convs.append(conv)
if use_batch_norm:
self.conv_bns.append(nn.BatchNorm2d(out_channel))
# last dense layer
self.fc = nn.Linear(self._get_linear_input_size(), feature_size)
if use_batch_norm:
self.fc_bn = nn.BatchNorm1d(feature_size)
def _get_linear_input_size(self):
x = torch.rand((1, ) + self.observation_shape)
with torch.no_grad():
return self._conv_encode(x).view(1, -1).shape[1]
def _conv_encode(self, x):
h = x
for i in range(len(self.convs)):
h = self.activation(self.convs[i](h))
if self.use_batch_norm:
h = self.conv_bns[i](h)
return h
def forward(self, x):
h = self._conv_encode(x)
h = self.activation(self.fc(h.view(h.shape[0], -1)))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class PixelEncoderWithAction(PixelEncoder):
def __init__(self,
observation_shape,
action_size,
filters=None,
feature_size=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
super().__init__(observation_shape, filters, feature_size,
use_batch_norm, activation)
def _get_linear_input_size(self):
size = super()._get_linear_input_size()
return size + self.action_size
def forward(self, x, action):
h = self._conv_encode(x)
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
# cocat feature and action
h = torch.cat([h.view(h.shape[0], -1), action], dim=1)
h = self.activation(self.fc(h))
if self.use_batch_norm:
h = self.fc_bn(h)
return h
class VectorEncoder(nn.Module):
def __init__(self,
observation_shape,
hidden_units=None,
use_batch_norm=False,
activation=torch.relu):
super().__init__()
self.observation_shape = observation_shape
if hidden_units is None:
hidden_units = [256, 256]
self.use_batch_norm = use_batch_norm
self.feature_size = hidden_units[-1]
self.activation = activation
in_units = [observation_shape[0]] + hidden_units[:-1]
self.fcs = nn.ModuleList()
self.bns = nn.ModuleList()
for in_unit, out_unit in zip(in_units, hidden_units):
self.fcs.append(nn.Linear(in_unit, out_unit))
if use_batch_norm:
self.bns.append(nn.BatchNorm1d(out_unit))
def forward(self, x):
h = x
for i in range(len(self.fcs)):
h = self.activation(self.fcs[i](h))
if self.use_batch_norm:
h = self.bns[i](h)
return h
class VectorEncoderWithAction(VectorEncoder):
def __init__(self,
observation_shape,
action_size,
hidden_units=None,
use_batch_norm=False,
discrete_action=False,
activation=torch.relu):
self.action_size = action_size
self.discrete_action = discrete_action
concat_shape = (observation_shape[0] + action_size, )
super().__init__(concat_shape, hidden_units, use_batch_norm,
activation)
self.observation_shape = observation_shape
def forward(self, x, action):
if self.discrete_action:
action = F.one_hot(action.view(-1).long(),
num_classes=self.action_size).float()
x = torch.cat([x, action], dim=1)
return super().forward(x)
| [
"torch.nn.Linear",
"torch.rand",
"torch.cat",
"torch.sigmoid",
"torch.nn.ModuleList",
"torch.no_grad",
"torch.nn.BatchNorm2d",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d"
] | 1.6.0 | meokz/d3rlpy | 40504e2d8b424547558ab82786c523e8f4626a82 |
0.4 | # coding: UTF-8
import argparse
import logging
import random
import torch
import copy
import numpy as np
from dataset import CDTB
from collections import Counter
from itertools import chain
from structure.vocab import Vocab, Label
from structure.nodes import node_type_filter, EDU, Relation, Sentence, TEXT
from treebuilder.partptr.model import PartitionPtr
from treebuilder.partptr.parser import PartitionPtrParser
import torch.optim as optim
from util.eval import parse_eval, gen_parse_report
from tensorboardX import SummaryWriter
def build_vocab(dataset):
word_freq = Counter()
pos_freq = Counter()
nuc_freq = Counter()
rel_freq = Counter()
for paragraph in chain(*dataset):
for node in paragraph.iterfind(filter=node_type_filter([EDU, Relation])):
if isinstance(node, EDU):
word_freq.update(node.words)
pos_freq.update(node.tags)
elif isinstance(node, Relation):
nuc_freq[node.nuclear] += 1
rel_freq[node.ftype] += 1
word_vocab = Vocab("word", word_freq)
pos_vocab = Vocab("part of speech", pos_freq)
nuc_label = Label("nuclear", nuc_freq)
rel_label = Label("relation", rel_freq)
return word_vocab, pos_vocab, nuc_label, rel_label
def gen_decoder_data(root, edu2ids):
# splits s0 s1 s2 s3 s4 s5 s6
# edus s/ e0 e1 e2 e3 e4 e5 /s
splits = [] # [(0, 3, 6, NS), (0, 2, 3, SN), ...]
child_edus = [] # [edus]
if isinstance(root, EDU):
child_edus.append(root)
elif isinstance(root, Sentence):
for child in root:
_child_edus, _splits = gen_decoder_data(child, edu2ids)
child_edus.extend(_child_edus)
splits.extend(_splits)
elif isinstance(root, Relation):
children = [gen_decoder_data(child, edu2ids) for child in root]
if len(children) < 2:
raise ValueError("relation node should have at least 2 children")
while children:
left_child_edus, left_child_splits = children.pop(0)
if children:
last_child_edus, _ = children[-1]
start = edu2ids[left_child_edus[0]]
split = edu2ids[left_child_edus[-1]] + 1
end = edu2ids[last_child_edus[-1]] + 1
nuc = root.nuclear
rel = root.ftype
splits.append((start, split, end, nuc, rel))
child_edus.extend(left_child_edus)
splits.extend(left_child_splits)
return child_edus, splits
def numericalize(dataset, word_vocab, pos_vocab, nuc_label, rel_label):
instances = []
for paragraph in filter(lambda d: d.root_relation(), chain(*dataset)):
encoder_inputs = []
decoder_inputs = []
pred_splits = []
pred_nucs = []
pred_rels = []
edus = list(paragraph.edus())
for edu in edus:
edu_word_ids = [word_vocab[word] for word in edu.words]
edu_pos_ids = [pos_vocab[pos] for pos in edu.tags]
encoder_inputs.append((edu_word_ids, edu_pos_ids))
edu2ids = {edu: i for i, edu in enumerate(edus)}
_, splits = gen_decoder_data(paragraph.root_relation(), edu2ids)
for start, split, end, nuc, rel in splits:
decoder_inputs.append((start, end))
pred_splits.append(split)
pred_nucs.append(nuc_label[nuc])
pred_rels.append(rel_label[rel])
instances.append((encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels))
return instances
def gen_batch_iter(instances, batch_size, use_gpu=False):
random_instances = np.random.permutation(instances)
num_instances = len(instances)
offset = 0
while offset < num_instances:
batch = random_instances[offset: min(num_instances, offset+batch_size)]
# find out max seqlen of edus and words of edus
num_batch = batch.shape[0]
max_edu_seqlen = 0
max_word_seqlen = 0
for encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels in batch:
max_edu_seqlen = max_edu_seqlen if max_edu_seqlen >= len(encoder_inputs) else len(encoder_inputs)
for edu_word_ids, edu_pos_ids in encoder_inputs:
max_word_seqlen = max_word_seqlen if max_word_seqlen >= len(edu_word_ids) else len(edu_word_ids)
# batch to numpy
e_input_words = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_input_poses = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.long)
e_masks = np.zeros([num_batch, max_edu_seqlen, max_word_seqlen], dtype=np.uint8)
d_inputs = np.zeros([num_batch, max_edu_seqlen-1, 2], dtype=np.long)
d_outputs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_nucs = np.zeros([num_batch, max_edu_seqlen-1], dtype=np.long)
d_output_rels = np.zeros([num_batch, max_edu_seqlen - 1], dtype=np.long)
d_masks = np.zeros([num_batch, max_edu_seqlen-1, max_edu_seqlen+1], dtype=np.uint8)
for batchi, (encoder_inputs, decoder_inputs, pred_splits, pred_nucs, pred_rels) in enumerate(batch):
for edui, (edu_word_ids, edu_pos_ids) in enumerate(encoder_inputs):
word_seqlen = len(edu_word_ids)
e_input_words[batchi][edui][:word_seqlen] = edu_word_ids
e_input_poses[batchi][edui][:word_seqlen] = edu_pos_ids
e_masks[batchi][edui][:word_seqlen] = 1
for di, decoder_input in enumerate(decoder_inputs):
d_inputs[batchi][di] = decoder_input
d_masks[batchi][di][decoder_input[0]+1: decoder_input[1]] = 1
d_outputs[batchi][:len(pred_splits)] = pred_splits
d_output_nucs[batchi][:len(pred_nucs)] = pred_nucs
d_output_rels[batchi][:len(pred_rels)] = pred_rels
# numpy to torch
e_input_words = torch.from_numpy(e_input_words).long()
e_input_poses = torch.from_numpy(e_input_poses).long()
e_masks = torch.from_numpy(e_masks).byte()
d_inputs = torch.from_numpy(d_inputs).long()
d_outputs = torch.from_numpy(d_outputs).long()
d_output_nucs = torch.from_numpy(d_output_nucs).long()
d_output_rels = torch.from_numpy(d_output_rels).long()
d_masks = torch.from_numpy(d_masks).byte()
if use_gpu:
e_input_words = e_input_words.cuda()
e_input_poses = e_input_poses.cuda()
e_masks = e_masks.cuda()
d_inputs = d_inputs.cuda()
d_outputs = d_outputs.cuda()
d_output_nucs = d_output_nucs.cuda()
d_output_rels = d_output_rels.cuda()
d_masks = d_masks.cuda()
yield (e_input_words, e_input_poses, e_masks), (d_inputs, d_masks), (d_outputs, d_output_nucs, d_output_rels)
offset = offset + batch_size
def parse_and_eval(dataset, model):
model.eval()
parser = PartitionPtrParser(model)
golds = list(filter(lambda d: d.root_relation(), chain(*dataset)))
num_instances = len(golds)
strips = []
for paragraph in golds:
edus = []
for edu in paragraph.edus():
edu_copy = EDU([TEXT(edu.text)])
setattr(edu_copy, "words", edu.words)
setattr(edu_copy, "tags", edu.tags)
edus.append(edu_copy)
strips.append(edus)
parses = []
for edus in strips:
parse = parser.parse(edus)
parses.append(parse)
return num_instances, parse_eval(parses, golds)
def model_score(scores):
eval_score = sum(score[2] for score in scores)
return eval_score
def main(args):
# set seed for reproducibility
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# load dataset
cdtb = CDTB(args.data, "TRAIN", "VALIDATE", "TEST", ctb_dir=args.ctb_dir, preprocess=True, cache_dir=args.cache_dir)
# build vocabulary
word_vocab, pos_vocab, nuc_label, rel_label = build_vocab(cdtb.train)
trainset = numericalize(cdtb.train, word_vocab, pos_vocab, nuc_label, rel_label)
logging.info("num of instances trainset: %d" % len(trainset))
logging.info("args: %s" % str(args))
# build model
model = PartitionPtr(hidden_size=args.hidden_size, dropout=args.dropout,
word_vocab=word_vocab, pos_vocab=pos_vocab, nuc_label=nuc_label, rel_label=rel_label,
pretrained=args.pretrained, w2v_size=args.w2v_size, w2v_freeze=args.w2v_freeze,
pos_size=args.pos_size,
split_mlp_size=args.split_mlp_size, nuc_mlp_size=args.nuc_mlp_size,
rel_mlp_size=args.rel_mlp_size,
use_gpu=args.use_gpu)
if args.use_gpu:
model.cuda()
logging.info("model:\n%s" % str(model))
# train and evaluate
niter = 0
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
writer = SummaryWriter(args.log_dir)
logging.info("hint: run 'tensorboard --logdir %s' to observe training status" % args.log_dir)
best_model = None
best_model_score = 0.
for nepoch in range(1, args.epoch + 1):
batch_iter = gen_batch_iter(trainset, args.batch_size, args.use_gpu)
for nbatch, (e_inputs, d_inputs, grounds) in enumerate(batch_iter, start=1):
niter += 1
model.train()
optimizer.zero_grad()
splits_loss, nucs_loss, rels_loss = model.loss(e_inputs, d_inputs, grounds)
loss = args.a_split_loss * splits_loss + args.a_nuclear_loss * nucs_loss + args.a_relation_loss * rels_loss
loss.backward()
optimizer.step()
log_splits_loss += splits_loss.item()
log_nucs_loss += nucs_loss.item()
log_rels_loss += rels_loss.item()
log_loss += loss.item()
if niter % args.log_every == 0:
logging.info("[iter %-6d]epoch: %-3d, batch %-5d,"
"train splits loss:%.5f, nuclear loss %.5f, relation loss %.5f, loss %.5f" %
(niter, nepoch, nbatch, log_splits_loss, log_nucs_loss, log_rels_loss, log_loss))
writer.add_scalar("train/split_loss", log_splits_loss, niter)
writer.add_scalar("train/nuclear_loss", log_nucs_loss, niter)
writer.add_scalar("train/relation_loss", log_rels_loss, niter)
writer.add_scalar("train/loss", log_loss, niter)
log_splits_loss = 0.
log_nucs_loss = 0.
log_rels_loss = 0.
log_loss = 0.
if niter % args.validate_every == 0:
num_instances, validate_scores = parse_and_eval(cdtb.validate, model)
logging.info("validation on %d instances" % num_instances)
logging.info(gen_parse_report(*validate_scores))
writer.add_scalar("validate/span_f1", validate_scores[0][2], niter)
writer.add_scalar("validate/nuclear_f1", validate_scores[1][2], niter)
writer.add_scalar("validate/coarse_relation_f1", validate_scores[2][2], niter)
writer.add_scalar("validate/fine_relation_f1", validate_scores[3][2], niter)
new_model_score = model_score(validate_scores)
if new_model_score > best_model_score:
# test on testset with new best model
best_model_score = new_model_score
best_model = copy.deepcopy(model)
logging.info("test on new best model")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
writer.add_scalar("test/span_f1", test_scores[0][2], niter)
writer.add_scalar("test/nuclear_f1", test_scores[1][2], niter)
writer.add_scalar("test/coarse_relation_f1", test_scores[2][2], niter)
writer.add_scalar("test/fine_relation_f1", test_scores[3][2], niter)
if best_model:
# evaluation and save best model
logging.info("final test result")
num_instances, test_scores = parse_and_eval(cdtb.test, best_model)
logging.info("test on %d instances" % num_instances)
logging.info(gen_parse_report(*test_scores))
logging.info("save best model to %s" % args.model_save)
with open(args.model_save, "wb+") as model_fd:
torch.save(best_model, model_fd)
writer.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
arg_parser = argparse.ArgumentParser()
# dataset parameters
arg_parser.add_argument("--data", default="data/CDTB")
arg_parser.add_argument("--ctb_dir", default="data/CTB")
arg_parser.add_argument("--cache_dir", default="data/cache")
# model parameters
arg_parser.add_argument("-hidden_size", default=512, type=int)
arg_parser.add_argument("-dropout", default=0.33, type=float)
# w2v_group = arg_parser.add_mutually_exclusive_group(required=True)
arg_parser.add_argument("-pretrained", default="data/pretrained/sgns.renmin.word")
arg_parser.add_argument("-w2v_size", type=int)
arg_parser.add_argument("-pos_size", default=30, type=int)
arg_parser.add_argument("-split_mlp_size", default=64, type=int)
arg_parser.add_argument("-nuc_mlp_size", default=32, type=int)
arg_parser.add_argument("-rel_mlp_size", default=128, type=int)
arg_parser.add_argument("--w2v_freeze", dest="w2v_freeze", action="store_true")
arg_parser.set_defaults(w2v_freeze=True)
# train parameters
arg_parser.add_argument("-epoch", default=20, type=int)
arg_parser.add_argument("-batch_size", default=64, type=int)
arg_parser.add_argument("-lr", default=0.001, type=float)
arg_parser.add_argument("-l2", default=0.0, type=float)
arg_parser.add_argument("-log_every", default=10, type=int)
arg_parser.add_argument("-validate_every", default=10, type=int)
arg_parser.add_argument("-a_split_loss", default=0.3, type=float)
arg_parser.add_argument("-a_nuclear_loss", default=1.0, type=float)
arg_parser.add_argument("-a_relation_loss", default=1.0, type=float)
arg_parser.add_argument("-log_dir", default="data/log")
arg_parser.add_argument("-model_save", default="data/models/treebuilder.partptr.model")
arg_parser.add_argument("--seed", default=21, type=int)
arg_parser.add_argument("--use_gpu", dest="use_gpu", action="store_true")
arg_parser.set_defaults(use_gpu=True)
main(arg_parser.parse_args())
| [
"torch.save",
"torch.from_numpy",
"torch.manual_seed"
] | 0.4 | NLP-Discourse-SoochowU/TDDiscourseParser | 2f9c7cef85c564c47b368ee4935caf1fad7c598d |
1.1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets
import logging
import argparse
import sys
import asyncio
import numpy as np
import syft as sy
from syft import workers
from syft.frameworks.torch.federated import utils
logger = logging.getLogger(__name__)
LOG_INTERVAL = 25
# Loss function
@torch.jit.script
def loss_fn(pred, target):
return F.nll_loss(input=pred, target=target)
# Model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def define_and_get_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run federated learning using websocket client workers."
)
parser.add_argument("--batch_size", type=int, default=32, help="batch size of the training")
parser.add_argument(
"--test_batch_size", type=int, default=128, help="batch size used for the test data"
)
parser.add_argument(
"--training_rounds", type=int, default=40, help="number of federated learning rounds"
)
parser.add_argument(
"--federate_after_n_batches",
type=int,
default=10,
help="number of training steps performed on each remote worker before averaging",
)
parser.add_argument("--lr", type=float, default=0.1, help="learning rate")
parser.add_argument("--cuda", action="store_true", help="use cuda")
parser.add_argument("--seed", type=int, default=1, help="seed used for randomization")
parser.add_argument("--save_model", action="store_true", help="if set, model will be saved")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="if set, websocket client workers will be started in verbose mode",
)
args = parser.parse_args(args=args)
return args
async def fit_model_on_worker(
worker: workers.WebsocketClientWorker,
traced_model: torch.jit.ScriptModule,
batch_size: int,
curr_round: int,
max_nr_batches: int,
lr: float,
):
"""Send the model to the worker and fit the model on the worker's training data.
Args:
worker: Remote location, where the model shall be trained.
traced_model: Model which shall be trained.
batch_size: Batch size of each training step.
curr_round: Index of the current training round (for logging purposes).
max_nr_batches: If > 0, training on worker will stop at min(max_nr_batches, nr_available_batches).
lr: Learning rate of each training step.
Returns:
A tuple containing:
* worker_id: Union[int, str], id of the worker.
* improved model: torch.jit.ScriptModule, model after training at the worker.
* loss: Loss on last training batch, torch.tensor.
"""
train_config = sy.TrainConfig(
model=traced_model,
loss_fn=loss_fn,
batch_size=batch_size,
shuffle=True,
max_nr_batches=max_nr_batches,
epochs=1,
lr=lr,
)
train_config.send(worker)
logger.info(
"Training round %s, calling fit on worker: %s, lr = %s",
curr_round,
worker.id,
"{:.3f}".format(train_config.lr),
)
loss = await worker.async_fit(dataset_key="mnist", return_ids=[0])
logger.info("Training round: %s, worker: %s, avg_loss: %s", curr_round, worker.id, loss.mean())
model = train_config.model_ptr.get().obj
return worker.id, model, loss
def evaluate_models_on_test_data(test_loader, results):
np.set_printoptions(formatter={"float": "{: .0f}".format})
for worker_id, worker_model, _ in results:
evaluate_model(worker_id, worker_model, "cpu", test_loader, print_target_hist=False)
def evaluate_model(worker_id, model, device, test_loader, print_target_hist=False):
model.eval()
test_loss = 0.0
correct = 0
hist_target = np.zeros(10)
hist_pred = np.zeros(10)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
hist, _ = np.histogram(target, bins=10, range=(0, 10))
hist_target += hist
output = model(data)
test_loss += loss_fn(output, target).item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
hist, _ = np.histogram(pred, bins=10, range=(0, 10))
hist_pred += hist
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
if print_target_hist:
logger.info("Target histogram: %s", hist_target)
logger.info("Prediction hist.: %s", hist_pred)
logger.info(
"%s: Test set: Average loss: %s, Accuracy: %s/%s (%s)",
worker_id,
"{:.4f}".format(test_loss),
correct,
len(test_loader.dataset),
"{:.2f}".format(100.0 * correct / len(test_loader.dataset)),
)
async def main():
args = define_and_get_arguments()
hook = sy.TorchHook(torch)
kwargs_websocket = {"host": "localhost", "hook": hook, "verbose": args.verbose}
alice = workers.WebsocketClientWorker(id="alice", port=8777, **kwargs_websocket)
bob = workers.WebsocketClientWorker(id="bob", port=8778, **kwargs_websocket)
charlie = workers.WebsocketClientWorker(id="charlie", port=8779, **kwargs_websocket)
worker_instances = [alice, bob, charlie]
use_cuda = args.cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=args.test_batch_size,
shuffle=False,
drop_last=False,
**kwargs,
)
model = Net().to(device)
(data, target) = test_loader.__iter__().next()
traced_model = torch.jit.trace(model, data)
learning_rate = args.lr
for curr_round in range(1, args.training_rounds + 1):
logger.info("Starting training round %s/%s", curr_round, args.training_rounds)
results = await asyncio.gather(
*[
fit_model_on_worker(
worker=worker,
traced_model=traced_model,
batch_size=args.batch_size,
curr_round=curr_round,
max_nr_batches=args.federate_after_n_batches,
lr=learning_rate,
)
for worker in worker_instances
]
)
models = {}
loss_values = {}
test_models = curr_round % 10 == 1 or curr_round == args.training_rounds
if test_models:
evaluate_models_on_test_data(test_loader, results)
for worker_id, worker_model, worker_loss in results:
if worker_model is not None:
models[worker_id] = worker_model
loss_values[worker_id] = worker_loss
traced_model = utils.federated_avg(models)
if test_models:
evaluate_model(
"Federated model", traced_model, "cpu", test_loader, print_target_hist=True
)
# decay learning rate
learning_rate = max(0.98 * learning_rate, args.lr * 0.01)
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == "__main__":
# Logging setup
logger = logging.getLogger("run_websocket_server")
FORMAT = "%(asctime)s %(levelname)s %(filename)s(l:%(lineno)d, p:%(process)d) - %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(level=logging.DEBUG)
# Websockets setup
websockets_logger = logging.getLogger("websockets")
websockets_logger.setLevel(logging.INFO)
websockets_logger.addHandler(logging.StreamHandler())
# Run main
asyncio.get_event_loop().run_until_complete(main())
| [
"torch.nn.Linear",
"torch.device",
"torch.no_grad",
"torch.nn.functional.log_softmax",
"torch.manual_seed",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.jit.trace",
"torch.nn.functional.nll_loss",
"torch.nn.functional.max_pool2d"
] | 1.1 | theoptips/PySyft | 4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc |
1.1 | import torch as th
from torch.utils.data import BatchSampler, RandomSampler, SequentialSampler
from syft.generic import ObjectStorage
from syft.federated.train_config import TrainConfig
class FederatedClient(ObjectStorage):
"""A Client able to execute federated learning in local datasets."""
def __init__(self, datasets=None):
super().__init__()
self.datasets = datasets if datasets is not None else dict()
self.optimizer = None
self.train_config = None
def add_dataset(self, dataset, key: str):
self.datasets[key] = dataset
def remove_dataset(self, key: str):
if key in self.datasets:
del self.datasets[key]
def set_obj(self, obj: object):
"""Registers objects checking if which objects it should cache.
Args:
obj: An object to be registered.
"""
if isinstance(obj, TrainConfig):
self.train_config = obj
self.optimizer = None
else:
super().set_obj(obj)
def _build_optimizer(
self, optimizer_name: str, model, optimizer_args: dict
) -> th.optim.Optimizer:
"""Build an optimizer if needed.
Args:
optimizer_name: A string indicating the optimizer name.
optimizer_args: A dict containing the args used to initialize the optimizer.
Returns:
A Torch Optimizer.
"""
if self.optimizer is not None:
return self.optimizer
if optimizer_name in dir(th.optim):
optimizer = getattr(th.optim, optimizer_name)
self.optimizer = optimizer(model.parameters(), **optimizer_args)
else:
raise ValueError("Unknown optimizer: {}".format(optimizer_name))
return self.optimizer
def fit(self, dataset_key: str, **kwargs):
"""Fits a model on the local dataset as specified in the local TrainConfig object.
Args:
dataset_key: Identifier of the local dataset that shall be used for training.
**kwargs: Unused.
Returns:
loss: Training loss on the last batch of training data.
"""
if self.train_config is None:
raise ValueError("TrainConfig not defined.")
if dataset_key not in self.datasets:
raise ValueError("Dataset {} unknown.".format(dataset_key))
model = self.get_obj(self.train_config._model_id).obj
loss_fn = self.get_obj(self.train_config._loss_fn_id).obj
self._build_optimizer(
self.train_config.optimizer, model, optimizer_args=self.train_config.optimizer_args
)
return self._fit(model=model, dataset_key=dataset_key, loss_fn=loss_fn)
def _create_data_loader(self, dataset_key: str, shuffle: bool = False):
data_range = range(len(self.datasets[dataset_key]))
if shuffle:
sampler = RandomSampler(data_range)
else:
sampler = SequentialSampler(data_range)
data_loader = th.utils.data.DataLoader(
self.datasets[dataset_key],
batch_size=self.train_config.batch_size,
sampler=sampler,
num_workers=0,
)
return data_loader
def _fit(self, model, dataset_key, loss_fn):
model.train()
data_loader = self._create_data_loader(
dataset_key=dataset_key, shuffle=self.train_config.shuffle
)
loss = None
iteration_count = 0
for _ in range(self.train_config.epochs):
for (data, target) in data_loader:
# Set gradients to zero
self.optimizer.zero_grad()
# Update model
output = model(data)
loss = loss_fn(target=target, pred=output)
loss.backward()
self.optimizer.step()
# Update and check interation count
iteration_count += 1
if iteration_count >= self.train_config.max_nr_batches >= 0:
break
return loss
| [
"torch.utils.data.SequentialSampler",
"torch.utils.data.RandomSampler",
"torch.utils.data.DataLoader"
] | 1.1 | theoptips/PySyft | 4b68c3c6fbe0c18cdf87dfe6ddc3c2071a71f1cc |
0.3 | from __future__ import division, print_function
from conllu.parser import parse, parse_tree
from tags import Tags, Tag, Label
import os
import re
import math
import numpy as np
import itertools
import pdb
import pickle
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import torch.nn.functional as F
np.set_printoptions(threshold=np.nan)
FROZEN_TAG = "__frozen__"
def freeze_dict(obj):
if isinstance(obj, dict):
dict_items = list(obj.items())
dict_items.append((FROZEN_TAG, True))
return tuple([(k, freeze_dict(v)) for k, v in dict_items])
return obj
def unfreeze_dict(obj):
if isinstance(obj, tuple):
if (FROZEN_TAG, True) in obj:
out = dict((k, unfreeze_dict(v)) for k, v in obj)
del out[FROZEN_TAG]
return out
return obj
def get_lang_code_dicts():
"""
Returns lang_to_code, code_to_lang dictionaries
"""
lang_to_code = {}
code_to_lang = {}
bad_chars = ",''"
rgx = re.compile('[%s]' % bad_chars)
with open("data/lang_codes.txt") as f:
data = f.read()
lines = data.split("\n")
split_line = [line.split() for line in lines]
for line in split_line[:-2]:
lang = rgx.sub('', line[0])
code = rgx.sub('', line[2])
lang_to_code[lang] = code
code_to_lang = {v: k for k, v in lang_to_code.iteritems()}
return lang_to_code, code_to_lang
def read_conll(treebank_path, langs, code_to_lang, train_or_dev, tgt_size=None, test=False):
"""
Reads conll formatted file
langs: list of languages
train: read training data
returns: dict with data for each language
as list of tuples of sentences and morph-tags
"""
annot_sents = {}
unique = []
for lang in langs:
train = train_or_dev if not test else "test"
if not test:
for file in os.listdir(treebank_path + "UD_" + code_to_lang[lang]):
if file.endswith("train.conllu"):
filepath = os.path.join(treebank_path + "UD_" + code_to_lang[lang], file)
break
else:
for file in os.listdir(treebank_path + "UD_" + code_to_lang[lang]):
if file.endswith("dev.conllu"):
filepath = os.path.join(treebank_path+ "UD_" + code_to_lang[lang], file)
break
with open(filepath) as f:
data = f.readlines()[:-1]
data = [line for line in data if line[0]!='#']
split_data = " ".join(data).split("\n \n")
ud = [parse(sent)[0] for sent in split_data]
all_text = []
all_tags = []
if langs[-1]==lang and tgt_size:
tgt_size = min(tgt_size, len(ud))
ud = ud[:tgt_size]
for sent in ud:
sent_text = []
sent_tags = []
for word in sent:
word_tags = {}
if word['feats']:
word_tags = dict(word['feats'])
if word['upostag']:
if word_tags:
word_tags.update({'POS':word['upostag']})
else:
word_tags = {'POS':word['upostag']}
if word_tags:
word_tags = freeze_dict(word_tags)
if word_tags not in unique:
unique.append(word_tags)
sent_text.append(word['form'])
sent_tags.append(freeze_dict(word_tags))
all_text.append(sent_text)
all_tags.append(sent_tags)
annot_sents[lang] = [(w, m) for w, m in zip(all_text, all_tags)]
return annot_sents, unique
def addNullLabels(annot_sents, langs, unique_tags):
for lang in langs:
i = 0
for w, m in annot_sents[lang]:
new_tags = []
for tags in m:
tag_dict = unfreeze_dict(tags)
for tag in unique_tags:
if tag.name not in tag_dict:
tag_dict[tag.name] = "NULL"
new_tags.append(freeze_dict(tag_dict))
annot_sents[lang][i] = (w, new_tags)
i += 1
return annot_sents
def sortbylength(data, lang_ids, maxlen=500):
"""
:param data: List of tuples of source sentences and morph tags
:param lang_ids: List of lang IDs for each sentence
:param maxlen: Maximum sentence length permitted
:return: Sorted data and sorted langIDs
"""
src = [elem[0] for elem in data]
tgt = [elem[1] for elem in data]
indexed_src = [(i,src[i]) for i in range(len(src))]
sorted_indexed_src = sorted(indexed_src, key=lambda x: -len(x[1]))
sorted_src = [item[1] for item in sorted_indexed_src if len(item[1])<maxlen]
sort_order = [item[0] for item in sorted_indexed_src if len(item[1])<maxlen]
sorted_tgt = [tgt[i] for i in sort_order]
sorted_lang_ids = [lang_ids[i] for i in sort_order]
sorted_data = [(src, tgt) for src, tgt in zip(sorted_src, sorted_tgt)]
return sorted_data, sorted_lang_ids
def get_train_order(training_data, batch_size, startIdx=0):
"""
:param data: List of tuples of source sentences and morph tags
:return: start idxs of batches
"""
lengths = [len(elem[0]) for elem in training_data]
start_idxs = []
end_idxs = []
prev_length=-1
batch_counter = 0
for i, length in enumerate(lengths, start=startIdx):
if length!=prev_length or batch_counter>batch_size:
start_idxs.append(i)
if prev_length!=-1:
end_idxs.append(i-1)
batch_counter = 1
batch_counter += 1
prev_length = length
end_idxs.append(startIdx + len(lengths)-1)
return [(s,e) for s,e in zip(start_idxs, end_idxs)]
def find_unique_tags(train_data_tags, null_label=False):
unique_tags = Tags()
for tags in train_data_tags:
for tag, label in unfreeze_dict(tags).items():
if not unique_tags.tagExists(tag):
unique_tags.addTag(tag)
curTag = unique_tags.getTagbyName(tag)
if not curTag.labelExists(label):
curTag.addLabel(label)
# Add null labels to unseen tags in each tag set
if null_label:
for tag in unique_tags:
tag.addLabel("NULL")
return unique_tags
def plot_heatmap(uniqueTags, weights, kind):
font = {'family' : 'normal',
'size' : 14,
'weight' : 'bold'}
matplotlib.rc('font', **font)
pairs = list(itertools.combinations(range(uniqueTags.size()), 2))
# weights is a ParameterList
for k, weight in enumerate(weights):
if kind=="pair":
i, j = pairs[k]
tag1 = uniqueTags.getTagbyIdx(i)
tag2 = uniqueTags.getTagbyIdx(j)
tag1_labels = [label.name for label in tag1.labels]
tag2_labels = [label.name for label in tag2.labels]
plt.figure(figsize=(20, 18), dpi=80)
plt.xticks(range(0, len(tag2_labels)), tag2_labels)
plt.yticks(range(0, len(tag1_labels)), tag1_labels)
plt.tick_params(labelsize=25)
plt.xlabel(tag2.name, fontsize=40)
plt.ylabel(tag1.name, fontsize=50)
plt.imshow(weight.data.cpu().numpy(), cmap='Reds', interpolation='nearest')
plt.savefig("figures/" + tag1.name + "_" + tag2.name + ".png", bbox_inches='tight')
plt.close()
elif kind=="trans":
tag = uniqueTags.getTagbyIdx(k)
tag_labels = [label.name for label in tag.labels]
plt.figure(figsize=(20, 18), dpi=80)
plt.xticks(range(0, len(tag_labels)), tag_labels, rotation=45)
plt.yticks(range(0, len(tag_labels)), tag_labels)
plt.tick_params(labelsize=40)
plt.xlabel(tag.name, fontsize=50)
plt.ylabel(tag.name, fontsize=50)
plt.imshow(weight.data.cpu().numpy(), cmap='Greys', interpolation='nearest')
plt.savefig("figures/" + tag.name + "_" + tag.name + ".png", bbox_inches='tight')
plt.close()
def get_var(x, gpu=False, volatile=False):
x = Variable(x, volatile=volatile)
if gpu:
x = x.cuda()
return x
def prepare_sequence(seq, to_ix, gpu=False):
if isinstance(to_ix, dict):
idxs = [to_ix[w] if w in to_ix else to_ix["UNK"] for w in seq]
elif isinstance(to_ix, list):
idxs = [to_ix.index(w) if w in to_ix else to_ix.index("UNK") for w in seq]
tensor = torch.LongTensor(idxs)
return get_var(tensor, gpu)
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
def logSumExp(a, b):
maxi = np.maximum(a, b)
aexp = a - maxi
bexp = b - maxi
sumOfExp = np.exp(aexp) + np.exp(bexp)
return maxi + np.log(sumOfExp)
def logSumExpTensor(vec):
# vec -> 16, tag_size
batch_size = vec.size()[0]
vec = vec.view(batch_size, -1)
max_score = torch.max(vec, 1)[0]
max_score_broadcast = max_score.view(-1, 1).expand(-1, vec.size()[1])
return max_score + \
torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 1))
def logSumExpTensors(a, b):
maxi = torch.max(a, b)
aexp = a - maxi
bexp = b - maxi
sumOfExp = torch.exp(aexp) + torch.exp(bexp)
return maxi + torch.log(sumOfExp)
def logDot(a, b, redAxis=None):
if redAxis==1:
b = b.transpose()
max_a = np.amax(a)
max_b = np.amax(b)
C = np.dot(np.exp(a - max_a), np.exp(b - max_b))
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C.transpose() if redAxis==1 else C
def logMax(a, b, redAxis=None):
if redAxis==1:
b = b.transpose()
max_a = np.amax(a)
max_b = np.amax(b)
C = np.max(np.exp(a[:, :, None]-max_a) * np.exp(b[None, :, :]-max_b), axis=1)
# if np.isfinite(C).all():
np.log(C, out=C)
# else:
# np.log(C + 1e-300, out=C)
C += max_a + max_b
return C.transpose() if redAxis==1 else C
def logNormalize(a):
denom = np.logaddexp.reduce(a, 1)
return (a.transpose()- denom).transpose()
def logNormalizeTensor(a):
denom = logSumExpTensor(a)
if len(a.size())==2:
denom = denom.view(-1, 1).expand(-1, a.size()[1])
elif len(a.size())==3:
denom = denom.view(a.size()[0], 1, 1).expand(-1, a.size()[1], a.size()[2])
return (a-denom)
def computeF1(hyps, golds, prefix, labels_to_ix=None, baseline=False, write_results=False):
"""
hyps: List of dicts for predicted morphological tags
golds: List of dicts for gold morphological tags
"""
f1_precision_scores = {}
f1_precision_total = {}
f1_recall_scores = {}
f1_recall_total = {}
f1_average = 0.0
if baseline:
hyps = [unfreeze_dict(h) for h in hyps]
golds = [unfreeze_dict(t) for t in golds]
# calculate precision
for i, word_tags in enumerate(hyps, start=0):
for k, v in word_tags.items():
if v=="NULL":
continue
if k not in f1_precision_scores:
f1_precision_scores[k] = 0
f1_precision_total[k] = 0
if k in golds[i]:
if v==golds[i][k]:
f1_precision_scores[k] += 1
f1_precision_total[k] += 1
f1_micro_precision = sum(f1_precision_scores.values())/sum(f1_precision_total.values())
for k in f1_precision_scores.keys():
f1_precision_scores[k] = f1_precision_scores[k]/f1_precision_total[k]
# calculate recall
for i, word_tags in enumerate(golds, start=0):
for k, v in word_tags.items():
if v=="NULL":
continue
if k not in f1_recall_scores:
f1_recall_scores[k] = 0
f1_recall_total[k] = 0
if k in hyps[i]:
if v==hyps[i][k]:
f1_recall_scores[k] += 1
f1_recall_total[k] += 1
f1_micro_recall = sum(f1_recall_scores.values())/sum(f1_recall_total.values())
f1_scores = {}
for k in f1_recall_scores.keys():
f1_recall_scores[k] = f1_recall_scores[k]/f1_recall_total[k]
if f1_recall_scores[k]==0 or k not in f1_precision_scores:
f1_scores[k] = 0
else:
f1_scores[k] = 2 * (f1_precision_scores[k] * f1_recall_scores[k]) / (f1_precision_scores[k] + f1_recall_scores[k])
f1_average += f1_recall_total[k] * f1_scores[k]
f1_average /= sum(f1_recall_total.values())
f1_micro_score = 2 * (f1_micro_precision * f1_micro_recall) / (f1_micro_precision + f1_micro_recall)
if write_results:
print("Writing F1 scores...")
with open(prefix + '_results_f1.txt', 'ab') as file:
file.write(pickle.dumps(f1_scores))
file.write("\nMacro-averaged F1 Score: " + str(f1_average))
file.write("\nMicro-averaged F1 Score: " + str(f1_micro_score))
return f1_average, f1_micro_score
def getCorrectCount(golds, hyps):
correct = 0
for i, word_tags in enumerate(golds, start=0):
allCorrect = True
for k, v in word_tags.items():
if k in hyps[i]:
if v!=hyps[i][k]:
allCorrect = False
break
if allCorrect==True:
correct += 1
return correct
| [
"torch.LongTensor",
"torch.exp",
"torch.autograd.Variable",
"torch.max",
"torch.log"
] | 0.3.0 | chaitanyamalaviya/NeuralFactorGraph | 6cd664b7edc43d56c6f1165baa7e7625eb0f7cd8 |
1.9 | import json
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from openprompt.data_utils.data_utils import InputFeatures
import re
from openprompt import Verbalizer
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from openprompt.utils.logging import logger
class One2oneVerbalizer(Verbalizer):
r"""
The basic manually defined verbalizer class, this class is inherited from the :obj:`Verbalizer` class.
This class restrict the use of label words to one words per label. For a verbalzer with less constraints,
please use Basic ManualVerbalizer.
Args:
tokenizer (:obj:`PreTrainedTokenizer`): The tokenizer of the current pre-trained model to point out the vocabulary.
classes (:obj:`classes`): The classes (or labels) of the current task.
num_classes (:obj:`int`): Optional. The number of classes of the verbalizer. Only one of `classes` and `num_classes` should be used.
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer. (used in PLMs like RoBERTa, which is sensitive to prefix space)
multi_token_handler (:obj:`str`, optional): The handling strategy for multiple tokens produced by the tokenizer.
"""
def __init__(self,
tokenizer: PreTrainedTokenizer,
num_classes: Optional[int] = None,
classes: Optional[List] = None,
label_words: Optional[Union[Sequence[str], Mapping[str, str]]] = None,
prefix: Optional[str] = " ",
multi_token_handler: Optional[str] = "first",
):
super().__init__(tokenizer=tokenizer, num_classes=num_classes, classes=classes)
self.prefix = prefix
self.multi_token_handler = multi_token_handler
self.label_words = label_words
def on_label_words_set(self):
super().on_label_words_set()
self.label_words = self.add_prefix(self.label_words, self.prefix)
self.generate_parameters()
@staticmethod
def add_prefix(label_words, prefix):
r"""Add prefix to label words. For example, if a label words is in the middle of a template,
the prefix should be ``' '``.
Args:
label_words (:obj:`Union[Sequence[str], Mapping[str, str]]`, optional): The label words that are projected by the labels.
prefix (:obj:`str`, optional): The prefix string of the verbalizer.
Returns:
:obj:`Sequence[str]`: New label words with prefix.
"""
new_label_words = []
if isinstance(label_words[0], list):
assert max([len(w) for w in label_words]) == 1, "Providing multiple label words, you should use other verbalizers instead."
label_words = [w[0] for w in label_words]
for word in label_words:
if word.startswith("<!>"):
new_label_words.append(word.split("<!>")[1])
else:
new_label_words.append(prefix + word)
return new_label_words
def generate_parameters(self) -> List:
r"""In basic manual template, the parameters are generated from label words directly.
In this implementation, the label_words should not be tokenized into more than one token.
"""
words_ids = []
for word in self.label_words:
word_ids = self.tokenizer.encode(word, add_special_tokens=False)
if len(word_ids) > 1:
logger.warning("Word {} is split into multiple tokens: {}. \
If this is not what you expect, try using another word for this verbalizer" \
.format(word, self.tokenizer.convert_ids_to_tokens(word_ids)))
words_ids.append(word_ids)
max_len = max([len(ids) for ids in words_ids])
words_ids_mask = [[1]*len(ids) + [0]*(max_len-len(ids)) for ids in words_ids]
words_ids = [ids+[0]*(max_len-len(ids)) for ids in words_ids]
words_ids_tensor = torch.tensor(words_ids)
words_ids_mask = torch.tensor(words_ids_mask)
self.label_words_ids = nn.Parameter(words_ids_tensor, requires_grad=False)
self.label_words_mask = nn.Parameter(words_ids_mask, requires_grad=False)
def project(self,
logits: torch.Tensor,
**kwargs,
) -> torch.Tensor:
r"""
Project the labels, the return value is the normalized (sum to 1) probs of label words.
Args:
logits (:obj:`torch.Tensor`): The orginal logits of label words.
Returns:
:obj:`torch.Tensor`: The normalized logits of label words
"""
label_words_logits = logits[:, self.label_words_ids]
label_words_logits = self.handle_multi_token(label_words_logits, self.label_words_mask)
return label_words_logits
def process_logits(self, logits: torch.Tensor, **kwargs):
r"""A whole framework to process the original logits over the vocabulary, which contains four steps:
(1) Project the logits into logits of label words
(2) Normalize over all label words
(3) Calibrate (optional)
Args:
logits (:obj:`torch.Tensor`): The orginal logits.
Returns:
(:obj:`torch.Tensor`): The final processed logits over the label words set.
"""
# project
label_words_logits = self.project(logits, **kwargs) #Output: (batch_size, num_classes) or (batch_size, num_classes, num_label_words_per_label)
# normalize
label_words_probs = self.normalize(label_words_logits)
# calibrate
if hasattr(self, "_calibrate_logits") and self._calibrate_logits is not None:
label_words_probs = self.calibrate(label_words_probs=label_words_probs)
# convert to logits
label_words_logits = torch.log(label_words_probs+1e-15)
return label_words_logits
def normalize(self, logits: torch.Tensor) -> torch.Tensor:
"""
Given logits regarding the entire vocabulary, return the probs over the label words set.
Args:
logits (:obj:`Tensor`): The logits over the entire vocabulary.
Returns:
:obj:`Tensor`: The logits over the label words set.
"""
batch_size = logits.shape[0]
return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)
def calibrate(self, label_words_probs: torch.Tensor, **kwargs) -> torch.Tensor:
r"""
Args:
label_words_probs (:obj:`torch.Tensor`): The probability distribution of the label words with the shape of [``batch_size``, ``num_classes``, ``num_label_words_per_class``]
Returns:
:obj:`torch.Tensor`: The calibrated probability of label words.
"""
shape = label_words_probs.shape
assert self._calibrate_logits.dim() == 1, "self._calibrate_logits are not 1-d tensor"
calibrate_label_words_probs = self.normalize(self.project(self._calibrate_logits.unsqueeze(0), **kwargs))
assert calibrate_label_words_probs.shape[1:] == label_words_probs.shape[1:] \
and calibrate_label_words_probs.shape[0]==1, "shape not match"
label_words_probs /= (calibrate_label_words_probs+1e-15)
# normalize # TODO Test the performance
norm = label_words_probs.reshape(shape[0], -1).sum(dim=-1,keepdim=True) # TODO Test the performance of detaching()
label_words_probs /= norm
return label_words_probs
| [
"torch.log",
"torch.tensor",
"torch.nn.Parameter"
] | 1.9.0 | hlzhang109/OpenPrompt | 8a1ec1ceac3805a11b09dda9b96ad7406d222f26 |
1.8 | import torch
import torch.nn as nn
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def forward(self, preds, gts, reverse=True, bidirectional=True):
def compute_loss(preds, gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
if bidirectional or reverse:
backward_loss = compute_loss(gts, preds)
if reverse:
return backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss + backward_loss
else:
forward_loss = compute_loss(preds, gts)
return forward_loss
def batch_pairwise_dist(self, x, y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
if self.use_cuda:
dtype = torch.cuda.LongTensor
else:
dtype = torch.LongTensor
diag_ind_x = torch.arange(0, num_points_x).type(dtype)
diag_ind_y = torch.arange(0, num_points_y).type(dtype)
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(
zz.transpose(2, 1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = rx.transpose(2, 1) + ry - 2 * zz
return P | [
"torch.cuda.is_available",
"torch.min",
"torch.arange",
"torch.sum"
] | 1.8.0 | anglixjtu/MeshCNN_ | 83826e66d8989ed4967047c2ed6d099568c5781c |
1.8 | import numpy as np
from faiss import IndexFlatIP, IndexFlatL2
import pyvista as pv
import os
import time
from torch_geometric.nn import global_mean_pool, global_add_pool, global_max_pool, global_sort_pool
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from .util import get_labels_from_path
def add_subplot(plotter, coord_y, coord_x,
mesh, font_size, label=None,
dissm=None, filename=None,
show_edges=True):
plotter.subplot(coord_y, coord_x)
text = ''
if label is not None:
text += label + '\n'
if dissm is not None:
text += "distance: %.3f \n" % (dissm)
if filename is not None:
text += filename
if label or dissm or filename:
plotter.add_text(text, font_size=font_size, color='black')
plotter.set_background('white')
plotter.add_mesh(mesh, color="tan", show_edges=show_edges)
def visualize_retrieval(paths_q, paths_retr, dissm=None, show_self=False,
sub_size=(220, 150), font_size=10, out_path=None,
camera_pos=[4, 4, 4]):
num_query = len(paths_q)
if show_self:
start_ri = 0
else:
start_ri = 1
num_retr = len(paths_retr[0][start_ri:])
num_subplot = (num_query, num_retr+1)
fig_size = ((num_retr+1)*sub_size[1], num_query*sub_size[0])
p = pv.Plotter(shape=num_subplot,
window_size=fig_size, border_color='gray')
for qi, path_q in enumerate(paths_q):
mesh_q = pv.read(path_q)
_, filename = os.path.split(path_q)
label = get_labels_from_path(path_q)
label = 'Query - ' + label
add_subplot(p, qi, 0, mesh_q, font_size,
label=label, filename=filename)
p.set_position(camera_pos)
for ri, path_r in enumerate(paths_retr[qi][start_ri:]):
mesh_r = pv.read(path_r)
_, filename = os.path.split(path_r)
label = get_labels_from_path(path_r)
dissm_r = dissm[qi, ri+start_ri]
add_subplot(p, qi, ri+1, mesh_r,
font_size, dissm=dissm_r,
label=label, filename=filename)
p.set_position(camera_pos)
p.show(screenshot=out_path)
def show_embedding(self, features, idx_list):
label_list = self.get_labels_from_index(idx_list)
writer = SummaryWriter('runs/embedding')
writer.add_embedding(features,
metadata=label_list)
writer.close()
| [
"torch.utils.tensorboard.SummaryWriter"
] | 1.8.0 | anglixjtu/MeshCNN_ | 83826e66d8989ed4967047c2ed6d099568c5781c |
1.0 | # coding=utf-8
# Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch T5 model. """
import copy
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
from .configuration_t5 import T5Config
from .file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from .modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput
from .modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer
from .utils import logging
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "T5Config"
_TOKENIZER_FOR_DOC = "T5Tokenizer"
####################################################
# This dict contains shortcut names and associated url
# for the pretrained weights provided with the models
####################################################
T5_PRETRAINED_MODEL_ARCHIVE_LIST = [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
# See all T5 models at https://huggingface.co/models?filter=t5
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_t5(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
tf_weights[name] = array
for txt_name in names:
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
if "_slot_" in name[-1]:
logger.info("Skipping {}".format("/".join(name)))
tf_weights.pop(txt_name, None)
continue
pointer = model
array = tf_weights[txt_name]
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
# elif scope_names[0] == 'scale':
# pointer = getattr(pointer, 'weight')
# elif scope_names[0] == 'output_bias' or scope_names[0] == 'beta':
# pointer = getattr(pointer, 'bias')
# elif scope_names[0] == 'squad':
# pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if scope_names[0] not in ["kernel", "scale", "embedding"]:
pointer = getattr(pointer, "weight")
if scope_names[0] != "embedding":
logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name))
array = np.transpose(array)
try:
assert (
pointer.shape == array.shape
), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array.astype(np.float32))
tf_weights.pop(txt_name, None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
# logger.info("Weights not copied to PyTorch model: {}".format(', '.join(tf_weights.keys())))
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module)
####################################################
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
# layer norm should always be calculated in float32
variance = x.to(torch.float32).pow(2).mean(-1, keepdim=True)
x = x / torch.sqrt(variance + self.variance_epsilon)
if self.weight.dtype == torch.float16:
x = x.to(torch.float16)
return self.weight * x
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
h = self.wi(hidden_states)
h = F.relu(h)
h = self.dropout(h)
h = self.wo(h)
return h
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
self.DenseReluDense = T5DenseReluDense(config)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
norm_x = self.layer_norm(hidden_states)
y = self.DenseReluDense(norm_x)
layer_output = hidden_states + self.dropout(y)
return layer_output
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False, is_bidirectional=False):
super().__init__()
self.is_bidirectional = is_bidirectional
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.d_kv = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.d_kv
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(heads, self.n_heads, self.d_kv, self.pruned_heads)
# Prune linear layers
self.q = prune_linear_layer(self.q, index)
self.k = prune_linear_layer(self.k, index)
self.v = prune_linear_layer(self.v, index)
self.o = prune_linear_layer(self.o, index, dim=1)
# Update hyper params
self.n_heads = self.n_heads - len(heads)
self.inner_dim = self.d_kv * self.n_heads
self.pruned_heads = self.pruned_heads.union(heads)
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).to(torch.long) * num_buckets # mtf.to_int32(mtf.less(n, 0)) * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def compute_bias(self, qlen, klen):
""" Compute binned relative position bias """
context_position = torch.arange(qlen, dtype=torch.long)[:, None]
memory_position = torch.arange(klen, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position, # shape (qlen, klen)
bidirectional=self.is_bidirectional,
num_buckets=self.relative_attention_num_buckets,
)
rp_bucket = rp_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(rp_bucket) # shape (qlen, klen, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, qlen, klen)
return values
def forward(
self,
input,
mask=None,
kv=None,
position_bias=None,
past_key_value=None,
head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if kv is None) or attention over source sentence (provided by kv).
"""
# Input is (bs, qlen, dim)
# Mask is (bs, klen) (non-causal) or (bs, klen, klen)
# past_key_value[0] is (bs, n_heads, q_len - 1, dim_per_head)
bs, qlen, dim = input.size()
if past_key_value is not None:
assert self.is_decoder is True, "Encoder cannot cache past key value states"
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_qlen = qlen + past_key_value[0].shape[2] if query_length is None else query_length
else:
real_qlen = qlen
if kv is None:
klen = real_qlen
else:
klen = kv.size(1)
def shape(x):
""" projection """
return x.view(bs, -1, self.n_heads, self.d_kv).transpose(1, 2)
def unshape(x):
""" compute context """
return x.transpose(1, 2).contiguous().view(bs, -1, self.inner_dim)
q = shape(self.q(input)) # (bs, n_heads, qlen, dim_per_head)
if kv is None:
k = shape(self.k(input)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(input)) # (bs, n_heads, qlen, dim_per_head)
elif past_key_value is None:
k = v = kv
k = shape(self.k(k)) # (bs, n_heads, qlen, dim_per_head)
v = shape(self.v(v)) # (bs, n_heads, qlen, dim_per_head)
if past_key_value is not None:
if kv is None:
k_, v_ = past_key_value
k = torch.cat([k_, k], dim=2) # (bs, n_heads, klen, dim_per_head)
v = torch.cat([v_, v], dim=2) # (bs, n_heads, klen, dim_per_head)
else:
k, v = past_key_value
if self.is_decoder and use_cache is True:
present_key_value_state = ((k, v),)
else:
present_key_value_state = (None,)
# (bs, n_heads, qlen, klen)
scores = torch.matmul(
q, k.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", q, k), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
raise ValueError("No position_bias provided and no weights to compute position_bias")
position_bias = self.compute_bias(real_qlen, klen)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -qlen:, :]
if mask is not None:
position_bias = position_bias + mask # (bs, n_heads, qlen, klen)
scores += position_bias
weights = F.softmax(scores.float(), dim=-1).type_as(scores) # (bs, n_heads, qlen, klen)
weights = F.dropout(weights, p=self.dropout, training=self.training) # (bs, n_heads, qlen, klen)
# Mask heads if we want to
if head_mask is not None:
weights = weights * head_mask
context = torch.matmul(weights, v) # (bs, n_heads, qlen, dim_per_head)
context = unshape(context) # (bs, qlen, dim)
context = self.o(context)
outputs = (context,) + present_key_value_state
if output_attentions:
outputs = outputs + (weights,)
if self.has_relative_attention_bias:
outputs = outputs + (position_bias,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=not config.is_decoder
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
norm_x,
mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.EncDecAttention = T5Attention(
config, has_relative_attention_bias=has_relative_attention_bias, is_bidirectional=True
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
kv,
attention_mask=None,
position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
norm_x = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
norm_x,
mask=attention_mask,
kv=kv,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
y = attention_output[0]
layer_output = hidden_states + self.dropout(y)
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config, has_relative_attention_bias=has_relative_attention_bias))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
head_mask=head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
if self.is_decoder and encoder_hidden_states is not None:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
kv=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
head_mask=head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
outputs = (hidden_states,)
# Add attentions if we output them
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = T5Config
load_tf_weights = load_tf_weights_in_t5
base_model_prefix = "transformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"decoder_input_ids": input_ids,
"input_ids": input_ids,
"decoder_attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
""" Initialize the weights """
factor = self.config.initializer_factor # Used for testing weights initialization
if isinstance(module, T5LayerNorm):
module.weight.data.fill_(factor * 1.0)
elif isinstance(module, (T5Model, T5ForConditionalGeneration)):
# Mesh TensorFlow embeddings initialization
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624
module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0)
elif isinstance(module, T5DenseReluDense):
# Mesh TensorFlow FF initialization
# See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56
# and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89
module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5))
if hasattr(module.wi, "bias") and module.wi.bias is not None:
module.wi.bias.data.zero_()
module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5))
if hasattr(module.wo, "bias") and module.wo.bias is not None:
module.wo.bias.data.zero_()
elif isinstance(module, T5Attention):
# Mesh TensorFlow attention initialization to avoid scaling before softmax
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136
d_model = self.config.d_model
d_kv = self.config.d_kv
n_heads = self.config.num_heads
module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * d_kv) ** -0.5))
module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5))
module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * d_kv) ** -0.5))
if module.has_relative_attention_bias:
module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
def _shift_right(self, input_ids):
decoder_start_token_id = self.config.decoder_start_token_id
pad_token_id = self.config.pad_token_id
assert (
decoder_start_token_id is not None
), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information"
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values"
return shifted_input_ids
class T5Stack(T5PreTrainedModel):
def __init__(self, config, embed_tokens=None):
super().__init__(config)
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def get_output_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.num_layers)
present_key_value_states = () if use_cache else None
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
head_mask=head_mask[i],
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states, present_key_value_state = layer_outputs[:2]
if i == 0:
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[3 if output_attentions else 2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[5 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],) # We keep only self-attention weights for now
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, present_key_value_states, all_hidden_states, all_attentions]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=present_key_value_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
T5_START_DOCSTRING = r"""
The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer
<https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang,
Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text
denoising generative setting.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
T5_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you
should be able to pad the inputs on both the right and the left.
Indices can be obtained using :class:`~transformers.T5Tokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
detail.
To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training
<./t5.html#training>`__.
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Provide for sequence to sequence training. T5 uses the :obj:`pad_token_id` as the starting token for
:obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last
:obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`).
To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training
<./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset,
:obj:`decoder_input_ids` takes the value of :obj:`input_ids`.
decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, tgt_seq_len)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`:
`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a
sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of
the decoder.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.",
T5_START_DOCSTRING,
)
class T5Model(T5PreTrainedModel):
def __init__(self, config: T5Config):
super().__init__(config)
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids, return_dict=True)
>>> last_hidden_states = outputs.last_hidden_state
"""
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING)
class T5ForConditionalGeneration(T5PreTrainedModel):
authorized_missing_keys = [r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight"]
def __init__(self, config):
super().__init__(config)
self.model_dim = config.d_model
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
encoder_outputs=None,
past_key_values=None,
head_mask=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small', return_dict=True)
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("lm_labels")
if "decoder_past_key_value_states" in kwargs:
warnings.warn(
"The `decoder_past_key_value_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_value_states")
if "decoder_past_key_values" in kwargs:
warnings.warn(
"The `decoder_past_key_values` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
past_key_values = kwargs.pop("decoder_past_key_values")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# If decoding with past key value states, only the last tokens
# should be given as an input
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim ** -0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past, attention_mask, use_cache, encoder_outputs, **kwargs):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def _reorder_cache(self, past, beam_idx):
# if decoder past is not included in output
# speedy decoding is disabled and no need to reorder
if past is None:
logger.warning("You might want to consider setting `use_cache=True` to speed up decoding")
return past
reordered_decoder_past = ()
for layer_past_states in past:
# get the correct batch idx from layer past batch dim
# batch dim of `past` is at 2nd position
reordered_layer_past_states = ()
for layer_past_state in layer_past_states:
# need to set correct `past` for each of the four key / value states
reordered_layer_past_states = reordered_layer_past_states + (
layer_past_state.index_select(0, beam_idx),
)
assert reordered_layer_past_states[0].shape == layer_past_states[0].shape
assert len(reordered_layer_past_states) == len(layer_past_states)
reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)
return reordered_decoder_past
| [
"torch.nn.Linear",
"torch.cat",
"torch.nn.ModuleList",
"torch.ones",
"torch.nn.CrossEntropyLoss",
"torch.where",
"torch.sqrt",
"torch.abs",
"torch.tensor",
"torch.zeros_like",
"torch.nn.functional.relu",
"torch.nn.functional.dropout",
"torch.full_like",
"torch.matmul",
"torch.nn.Dropout",
"torch.arange",
"torch.all",
"torch.nn.Embedding"
] | 1.0 | kushalj001/transformers | 0538820737bd8fb9ba1eb3a772412c6bbe2433ab |
1.4 | import torch
import numpy as np
from torch.distributions import Categorical
from typing import Any, Dict, Tuple, Union, Optional
from tianshou.policy import SACPolicy
from tianshou.data import Batch, ReplayBuffer, to_torch
class DiscreteSACPolicy(SACPolicy):
"""Implementation of SAC for Discrete Action Settings. arXiv:1910.07207.
:param torch.nn.Module actor: the actor network following the rules in
:class:`~tianshou.policy.BasePolicy`. (s -> logits)
:param torch.optim.Optimizer actor_optim: the optimizer for actor network.
:param torch.nn.Module critic1: the first critic network. (s -> Q(s))
:param torch.optim.Optimizer critic1_optim: the optimizer for the first
critic network.
:param torch.nn.Module critic2: the second critic network. (s -> Q(s))
:param torch.optim.Optimizer critic2_optim: the optimizer for the second
critic network.
:param float tau: param for soft update of the target network, defaults to
0.005.
:param float gamma: discount factor, in [0, 1], defaults to 0.99.
:param (float, torch.Tensor, torch.optim.Optimizer) or float alpha: entropy
regularization coefficient, default to 0.2.
If a tuple (target_entropy, log_alpha, alpha_optim) is provided, then
alpha is automatatically tuned.
:param bool reward_normalization: normalize the reward to Normal(0, 1),
defaults to ``False``.
:param bool ignore_done: ignore the done flag while training the policy,
defaults to ``False``.
.. seealso::
Please refer to :class:`~tianshou.policy.BasePolicy` for more detailed
explanation.
"""
def __init__(
self,
actor: torch.nn.Module,
actor_optim: torch.optim.Optimizer,
critic1: torch.nn.Module,
critic1_optim: torch.optim.Optimizer,
critic2: torch.nn.Module,
critic2_optim: torch.optim.Optimizer,
tau: float = 0.005,
gamma: float = 0.99,
alpha: Union[
float, Tuple[float, torch.Tensor, torch.optim.Optimizer]
] = 0.2,
reward_normalization: bool = False,
ignore_done: bool = False,
estimation_step: int = 1,
**kwargs: Any,
) -> None:
super().__init__(actor, actor_optim, critic1, critic1_optim, critic2,
critic2_optim, (-np.inf, np.inf), tau, gamma, alpha,
reward_normalization, ignore_done, estimation_step,
**kwargs)
self._alpha: Union[float, torch.Tensor]
def forward( # type: ignore
self,
batch: Batch,
state: Optional[Union[dict, Batch, np.ndarray]] = None,
input: str = "obs",
**kwargs: Any,
) -> Batch:
obs = batch[input]
logits, h = self.actor(obs, state=state, info=batch.info)
dist = Categorical(logits=logits)
act = dist.sample()
return Batch(logits=logits, act=act, state=h, dist=dist)
def _target_q(
self, buffer: ReplayBuffer, indice: np.ndarray
) -> torch.Tensor:
batch = buffer[indice] # batch.obs: s_{t+n}
with torch.no_grad():
obs_next_result = self(batch, input="obs_next")
dist = obs_next_result.dist
target_q = dist.probs * torch.min(
self.critic1_old(batch.obs_next),
self.critic2_old(batch.obs_next),
)
target_q = target_q.sum(dim=-1) + self._alpha * dist.entropy()
return target_q
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(
batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
# critic 1
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy
+ (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
| [
"torch.no_grad",
"torch.distributions.Categorical",
"torch.min"
] | 1.4.0 | danagi/tianshou | c97aa4065ee8464bd5897bb86f1f81abd8e2cff9 |
0.4 | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
"""
[Func Handler] multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"feat_cls": feat_cls,
"output": output,
}
@Reture:
merge_out: dict.
examples: {
"feat_cls": feat_cls,
"output_comb": output_comb,
"output": output,
}
"""
feat_cls = multi_modal_data['feat_cls']
# merge class features
feat_cls_cat = torch.cat(feat_cls, 1) # concat
# merge output pred
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
"""
[Func Handler] objective_vectors_multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"class_threshold_group": [model.class_threshold_group[modal_idx][i], ...]
"objective_vectors_group": [model.objective_vectors_group[modal_idx][i], ...],
}
cate_idx: int. 0 ~ 18
modal_ids: list.
examples: [0, 1] or [0,]
@Reture:
merge_out: dict.
examples: {
"class_threshold": class_threshold,
"objective_vectors": objective_vectors,
}
"""
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
# modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
'''calculate accuracy. caring about recall but not IoU'''
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
#self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250 #ignore index
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
#self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
# cluster vectors & cuda initialization
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
#self.metrics = CustomMetrics(self.class_numbers)
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
# Discriminator
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
'''
set optimizer and load pretrained model
'''
for net in self.nets:
# name = net.__class__.__name__
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
'''load pretrained model
'''
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
"""source_output: [B x 19 x W x H, ...]
select modal-branch output in each batchsize
Specific-modal output
"""
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
# attention output & specific-modal output
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
#self.PredNet.eval()
# adversarial loss
# -----------------------------
"""Generator (segmentation)"""
# -----------------------------
# On Source Domain
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
# set grad false
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
# true/false discriminator
t_D_out = _d_net_DP(F.softmax(t_out))
#source_modal_ids
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
#loss_adv /= _batch_size
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
# -----------------------------
"""Discriminator """
# -----------------------------
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
# source domain
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
# target domain
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
#loss = torch.Tensor([0]).cuda(self.default_gpu)
loss = torch.Tensor([0]).cuda()
"""construct category objective vectors"""
# objective_vectors_group 2 x 19 x 256 --> 19 x 512
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
# baseNet scheduler
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
# if torch.cuda.is_available():
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
"""Make specific models eval mode during test time"""
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
"""Set requires_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
net (BaseModel) -- the network which will be operated on
requires_grad (bool) -- whether the networks require gradients or not
"""
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
''' set specific type of layers whether needing grad
'''
# print('Warning: all the BatchNorm params are fixed!')
# logger.info('Warning: all the BatchNorm params are fixed!')
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| [
"torch.cat",
"torch.nn.init.kaiming_normal_",
"torch.nn.SmoothL1Loss",
"torch.cuda.is_available",
"torch.nn.BCEWithLogitsLoss",
"torch.load",
"torch.sum",
"torch.nn.init.constant_",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.init.normal_",
"torch.as_tensor",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.Tensor",
"torch.masked_select",
"torch.zeros",
"torch.nn.TripletMarginLoss",
"torch.cuda.device_count",
"torch.full",
"torch.nn.functional.softmax",
"torch.nn.MSELoss",
"torch.nn.functional.interpolate",
"torch.no_grad",
"torch.nn.L1Loss",
"torch.mean"
] | 0.4.1 | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 |
1.4 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import argparse
import logging
import math
import os
import time
import warnings
from benchmark_dataset import BenchmarkLMDataset, collate_sentences_lm
import torch
from torch.distributed import rpc
import torch.multiprocessing as mp
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
import torchtext
from torchtext.data.utils import get_tokenizer
from fairscale.nn import Pipe
from fairscale.nn.model_parallel import initialize_model_parallel
from fairscale.nn.model_parallel.initialize import get_data_parallel_group, get_pipeline_parallel_group
from fairscale.nn.pipe import LazyModule, pipe
from fairscale.optim import GradScaler
from fairscale.optim.oss import OSS
from fairscale.utils.testing import dist_init, get_worker_map
try:
from fairscale.optim import Adam # type: ignore
can_benchmark = True
except ImportError:
from torch.optim import Adam # type: ignore
can_benchmark = False
def init_random_seed(seed: int):
import numpy
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
numpy.random.seed(seed)
PIPE_CHUNKS = 2
iteration_count = 0
class EmbeddingLayer(nn.Embedding):
def __init__(self, ntoken, ninp, initrange):
super().__init__(ntoken, ninp)
self.ninp = ninp
self.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
return super().forward(src) * math.sqrt(self.ninp)
class PositionalEncodingLayer(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncodingLayer, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerDecoderLayer(nn.TransformerEncoderLayer):
"""Though this class inherits from torch.nn.TransformerEncoderLayer,
it functions as a decoder in this model"""
def __init__(self, ninp, nhead, nhid, droupout):
super().__init__(ninp, nhead, nhid, droupout)
self.src_mask = None
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, src):
global iteration_count
iteration_count += 1
# if iteration_count == 196:
# dump_cuda_tensors()
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
return super().forward(src, self.src_mask)
class LinearLayer(nn.Linear):
def __init__(self, ninp, ntoken, initrange):
super().__init__(ninp, ntoken)
self.bias.data.zero_()
self.weight.data.uniform_(-initrange, initrange)
class TransformerLMSequential(nn.Sequential):
"""A small language model based on the design of GPT-2 using nn.Sequential
for compatability with Pipe"""
def __init__(self, ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder):
layers = [
EmbeddingLayer(ntokens, ninp, initrange),
PositionalEncodingLayer(ninp, dropout),
]
for _ in range(ndecoder):
layers.append(TransformerDecoderLayer(ninp, nhead, nhid, dropout))
layers.append(LinearLayer(ninp, ntokens, initrange))
super(TransformerLMSequential, self).__init__(*layers)
def get_data(device):
with warnings.catch_warnings(record=True) as fjldska:
TEXT = torchtext.data.Field(
tokenize=get_tokenizer("basic_english"), init_token="<sos>", eos_token="<eos>", lower=True
)
train_txt, val_txt, test_txt = torchtext.datasets.WikiText2.splits(TEXT)
TEXT.build_vocab(train_txt)
ntokens = len(TEXT.vocab.stoi)
batch_size = 20
eval_batch_size = 10
train_data = batchify(train_txt, batch_size, TEXT, device)
val_data = batchify(val_txt, eval_batch_size, TEXT, device)
test_data = batchify(test_txt, eval_batch_size, TEXT, device)
return ntokens, train_data, val_data, test_data
def batchify(data, bsz, TEXT, device):
data = TEXT.numericalize([data.examples[0].text])
nbatch = data.size(0) // bsz
data = data.narrow(0, 0, nbatch * bsz)
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
def get_batch(source, i, bptt):
seq_len = min(bptt, len(source) - 1 - i)
data = source[i : i + seq_len]
target = source[i + 1 : i + 1 + seq_len].view(-1)
return data, target
def make_model(args, device, ntokens):
ninp = 2048 # embedding dimension
nhid = 2048 # the dimension of the feedforward network model in nn.TransformerEncoder
nhead = 32 # the number of heads in the multiheadattention models
dropout = 0
initrange = 0.1
ndecoder = args.num_decoder_layers
if args.lazy_construction:
layers = [
LazyModule(lambda: EmbeddingLayer(ntokens, ninp, initrange)),
LazyModule(lambda: PositionalEncodingLayer(ninp, dropout)),
]
for _ in range(ndecoder):
layers.append(LazyModule(lambda: TransformerDecoderLayer(ninp, nhead, nhid, dropout)))
layers.append(LazyModule(lambda: LinearLayer(ninp, ntokens, initrange)))
model = layers
else:
model = TransformerLMSequential(ntokens, ninp, nhead, nhid, dropout, initrange, ndecoder).to(device)
criterion = nn.CrossEntropyLoss()
lr = 0.01 # learning rate
def make_adam(model):
if args.ddp_zero:
return OSS(params=model.parameters(), optim=Adam, group=get_data_parallel_group(), lr=lr)
else:
return Adam(model.parameters(), lr=lr)
optimizer = make_adam
scaler = GradScaler()
return model, criterion, optimizer, scaler
def get_tensors_by_size_bucket():
from collections import defaultdict
import gc
size_buckets = defaultdict(int)
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
return size_buckets
def dump_size_buckets(size_buckets, prefix=""):
from functools import reduce
import operator
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(prefix + f"{key} : {value}, {this}")
print(prefix + f"total = {total}")
last_size_buckets = None
once = True
def safe_rank():
try:
return torch.distributed.get_rank()
except AssertionError:
return 0
def check_size_buckets():
global last_size_buckets
global once
size_buckets = get_tensors_by_size_bucket()
if last_size_buckets is not None:
if size_buckets != last_size_buckets:
print(f"difference is oustanding tensors: {safe-rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
if once:
print(f"dumping buckets for: {safe_rank()}")
dump_size_buckets(last_size_buckets, "old: ")
dump_size_buckets(size_buckets, "new: ")
once = False
else:
print(f"size buckets none on {safe_rank()}")
last_size_buckets = size_buckets
def dump_cuda_tensors():
print(f"dumping cuda tensors...")
from functools import reduce
import gc
import operator
for obj in gc.get_objects():
if not isinstance(obj, torch.Tensor):
continue
if obj.device.type == "cuda":
size_buckets[(*obj.size(),) + (obj.element_size(),)] += 1
print(f"outstanding cuda tensors:")
total = 0
for key, value in size_buckets.items():
this = reduce(operator.mul, key) * value
total += this
print(f"{key} : {value}, {this}")
print(f"total size = {total}")
import pprint
pprint.pprint(torch.cuda.memory_stats())
def train(lm_dataloader, model, criterion, optimizer, vocab_size, args):
model.train()
from functools import reduce
import operator
num_params = reduce(operator.add, (reduce(operator.mul, x.size()) for x in model.parameters()))
if model.group:
total = torch.Tensor([num_params])
if torch.cuda.is_available():
total = total.cuda()
torch.distributed.all_reduce(total, group=model.group)
logging.info(
f"training model, #prams = {num_params}, group: {model.group.rank()}, grank:"
f" {torch.distributed.get_rank()}, sizes {model.group.size()}"
)
torch.distributed.barrier()
if model.group.rank() == 0:
logging.info(f"total #prams = {total.item()}")
else:
logging.info(f"training model, #prams = {num_params}")
vocab_size = 10000 # FIXME
total_loss = 0.0
start_time = time.time()
word_counter = 0
optimizer = optimizer(model)
def get_first_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[0]
else:
return torch.cuda.current_device()
def get_last_device(model):
if isinstance(model, DDP):
model = model.module
if not torch.cuda.is_available():
return torch.device("cpu")
if model.devices:
return model.devices[-1]
else:
return torch.cuda.current_device()
pipe_group = model.group
if args.ddp_zero:
model = DDP(
model,
device_ids=[torch.cuda.current_device()],
process_group=get_data_parallel_group(),
find_unused_parameters=False,
)
if pipe_group and pipe_group.rank() != 0 and pipe_group.rank() != (pipe_group.size() - 1):
thing = {"input": torch.zeros(args.batch_size)}
class FakeDataset:
def __getitem__(self, index):
return thing
def __len__(self):
return len(lm_dataloader)
lm_dataloader = FakeDataset()
for i, batch in enumerate(lm_dataloader):
bi = batch["input"]
if args.max_batch and i > args.max_batch:
break
optimizer.zero_grad()
try:
if (pipe_group is None or pipe_group.rank() == 0) and not args.ddp_zero:
tmp = batch["input"].to(get_first_device(model))
output = model(tmp)
else:
output = model(batch["input"])
except Exception as e:
raise RuntimeError(f"training failed on {torch.distributed.get_rank()}") from e
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
target = batch["target"].to(get_last_device(model))
output = output.to(target.device)
loss = criterion(output.view(-1, vocab_size), target.view(-1))
if args.ddp_zero:
ddp_group = get_data_parallel_group()
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM, group=ddp_group)
loss /= ddp_group.size()
loss.backward()
del target
else:
if args.ddp_zero:
model.module.back_helper(output)
else:
model.back_helper(output)
del output
torch.nn.utils.clip_grad_value_(model.parameters(), 0.05)
optimizer.step()
if pipe_group is None or pipe_group.rank() == pipe_group.size() - 1:
total_loss += loss.item()
log_interval = 1
word_counter += batch["ntokens"]
if i % log_interval == 0 and i > 0:
cur_loss = total_loss / log_interval
elapsed = time.time() - start_time
print(
"| batch {:5d} | wps {:5.2f} | loss {:5.2f} | ppl {:8.2f}".format(
i, word_counter / elapsed, cur_loss, math.exp(cur_loss)
)
)
word_counter = 0
total_loss = 0
start_time = time.time()
# if i >= 10:
# break
# torch.cuda.empty_cache()
# check_size_buckets()
def evaluate(eval_model, data_source, criterion, bptt, ntokens):
eval_model.eval()
total_loss = 0.0
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, bptt):
data, targets = get_batch(data_source, i, bptt)
output = eval_model(data)
output = output.to(targets.device)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
return total_loss / (len(data_source) - 1)
def get_number_of_words(data):
return data.size()[0] * data.size()[1]
def benchmark_language_model(train_data, val_data, test_data, model, criterion, optimizer, ntokens, args):
epoch = 1
bptt = 35
start_time = time.time()
print("-" * 110)
print("| start of epoch {:1d}".format(epoch))
print("-" * 110)
epoch_start_time = time.time()
train(train_data, model, criterion, optimizer, bptt, ntokens, args)
val_loss = 1 # evaluate(model, val_data, criterion, bptt, ntokens)
print("-" * 89)
print(
"| end of epoch {:1d} | time: {:5.2f}s | valid loss {:5.2f} ".format(
epoch, (time.time() - epoch_start_time), val_loss
)
)
print("-" * 110)
elapsed_time = time.time() - start_time
nwords = get_number_of_words(train_data) + get_number_of_words(val_data)
wps = nwords / elapsed_time
test_loss = 1 # evaluate(model, test_data, criterion, bptt, ntokens)
print("=" * 89)
print(
"| end of training | test loss {:5.2f} \n| time: {:5.2f}s | words: {:3d} | wps: {:5.2f}".format(
test_loss, elapsed_time, nwords, wps
)
)
print("=" * 110)
if can_benchmark and len(model.balance) == 4:
# Assert that words per second is within 3 standard deviations of the average
# of six golden runs
assert wps > 36954.4 - (3 * 116.825)
print("Peak allocated bytes on cuda:0: {:1d}".format(torch.cuda.memory_stats(0)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:1: {:1d}".format(torch.cuda.memory_stats(1)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:2: {:1d}".format(torch.cuda.memory_stats(2)["allocated_bytes.all.peak"]))
print("Peak allocated bytes on cuda:3: {:1d}".format(torch.cuda.memory_stats(3)["allocated_bytes.all.peak"]))
# Assert that memory usage on each GPU is within 10% of golden run
# Right-hand-side is golden run bytes * 110%
assert torch.cuda.memory_stats(0)["allocated_bytes.all.peak"] < 4061909504 * 1.1
assert torch.cuda.memory_stats(1)["allocated_bytes.all.peak"] < 4050944 * 1.1
assert torch.cuda.memory_stats(2)["allocated_bytes.all.peak"] < 10427392 * 1.1
assert torch.cuda.memory_stats(3)["allocated_bytes.all.peak"] < 2031824896 * 1.1
print("No regression detected")
def generate_balance_weighted(num_devices, num_layers, fraction=0.5):
balance = []
layers_assigned = 0
average_count = num_layers / num_devices
last_layers = int(average_count * fraction)
balance = generate_balance(num_devices - 1, num_layers - last_layers)
balance.append(last_layers)
return balance
def generate_balance(num_devices, num_layers):
balance = []
layers_assigned = 0
for i in range(num_devices):
x = (num_layers - layers_assigned) / (num_devices - i)
if x.is_integer():
balance.append(int(x))
layers_assigned += x
else:
balance.append(math.ceil(x))
layers_assigned += math.ceil(x)
return balance
def make_model_and_data(args, device, new_data: bool = True):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if new_data:
vocab_size = 10000
model, criterion, optimizer, scaler = make_model(args, device, vocab_size)
lm_dataset = BenchmarkLMDataset()
lm_dataloader = DataLoader(
lm_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0, collate_fn=collate_sentences_lm
)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": lm_dataloader,
"vocab_size": vocab_size,
}
else:
data = get_data(device)
ntokens, train_data, val_data, test_data = data
model, criterion, optimizer, scaler = make_model(args, device, ntokens)
return {
"model": model,
"criterion": criterion,
"optimizer": optimizer,
"data": data,
}
def bench_single_process(args):
num_devices = torch.cuda.device_count() if torch.cuda.is_available() else 1
assert num_devices > 0
init_random_seed(0)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance(min(num_devices, 4), len(model))
p = pipe.Pipe(
model, balance, chunks=args.chunks, pipelined_backward=args.pipelined_backward, checkpoint=args.checkpoint
)
del model
del blob["model"]
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_mp_worker(args, available_workers):
new_data = True
blob = make_model_and_data(args, None, new_data=new_data)
model = blob["model"]
balance = generate_balance_weighted(get_pipeline_parallel_group().size(), len(model), 0.8)
p = pipe.Pipe(
model,
balance,
style=Pipe.AsyncSchedule,
chunks=args.chunks,
worker_map=get_worker_map(),
input_device=torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu"),
pipelined_backward=args.pipelined_backward,
checkpoint=args.checkpoint,
# loss_fn=blob["criterion"],
)
if torch.cuda.is_available():
p = p.cuda()
if args.all_at_once and p.pipeline:
print(f"running all at once")
p.pipeline.all_at_once = True
if new_data:
train(blob["data"], p, blob["criterion"], blob["optimizer"], blob["vocab_size"], args)
else:
ntokens, train_data, val_data, test_data = blob["data"]
benchmark_language_model(train_data, val_data, test_data, p, criterion, optimizer, ntokens, args)
def run_worker(rank, world_size, args):
if args.world_size != 0:
world_size = args.world_size
dist_init(rank + args.rank_base, world_size, hostname=args.host)
initialize_model_parallel(1, world_size)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
def bench_multi_process(args, all_at_once=False):
if args.local_world_size != 0:
world_size = args.local_world_size
else:
world_size = min(torch.cuda.device_count(), 2)
mp.spawn(run_worker, args=(world_size, args), nprocs=world_size, join=True)
best_device_map = {
0: "mlx5_0:1",
1: "mlx5_0:1",
2: "mlx5_1:1",
3: "mlx5_1:1",
4: "mlx5_2:1",
5: "mlx5_2:1",
6: "mlx5_3:1",
7: "mlx5_3:1",
}
def bench_mpi(args):
guess_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
os.environ["UCX_NET_DEVICES"] = best_device_map[local_rank]
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10638"
if args.socket_name:
os.environ["GLOO_SOCKET_IFNAME"] = args.socket_name
os.environ["TP_SOCKET_IFNAME"] = args.socket_name
torch.distributed.init_process_group(backend="gloo", rank=guess_rank, world_size=world_size)
os.environ["MASTER_ADDR"] = args.host
os.environ["MASTER_PORT"] = "10639"
init_method = f"tcp://{os.environ['MASTER_ADDR']}:{os.environ['MASTER_PORT']}"
rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
torch.cuda.set_device(local_rank % torch.cuda.device_count())
rpc.init_rpc(
f"Test{rank}",
rank=rank,
world_size=world_size,
backend=rpc.BackendType.PROCESS_GROUP,
rpc_backend_options=rpc.ProcessGroupRpcBackendOptions(rpc_timeout=20, init_method=init_method),
)
backends = {"model_parallel_backend": "nccl", "pipeline_backend": "mpi", "ddp_backend": "nccl"}
if args.ddp_zero:
initialize_model_parallel(1, 4, **backends)
else:
initialize_model_parallel(1, world_size, **backends)
init_random_seed(0)
run_mp_worker(args, world_size)
rpc.shutdown()
torch.distributed.destroy_process_group()
parser = argparse.ArgumentParser(description="benchmark")
parser.add_argument("--local-world-size", "-l", type=int, default=0, help="local world size")
parser.add_argument("--world-size", "-w", type=int, default=0, help="world size")
parser.add_argument("--rank-base", "-r", type=int, help="rank base", default=0)
parser.add_argument("--host", "-o", type=str, default="localhost", help="hostname")
parser.add_argument("--no-mpi", action="store_true", default=False, help="disable mpi")
parser.add_argument("--chunks", type=int, default=1, help="number of microbatches per batch")
parser.add_argument("--batch-size", type=int, default=8, help="size of a batch")
parser.add_argument("--all-at-once", action="store_true", default=False, help="do backward pass on whole batch at once")
parser.add_argument("--max-batch", type=int, default=4, help="Max number of batches")
parser.add_argument("--socket-name", type=str, default=None, help="socket ifname for gloo/tp")
parser.add_argument("--num-decoder-layers", type=int, default=10, help="Number of decoder layers in the model")
parser.add_argument("--ddp-zero", action="store_true", default=False, help="enable ddp")
parser.add_argument(
"--lazy-construction", action="store_true", default=False, help="Number of decoder layers in the model"
)
parser.add_argument(
"--checkpoint", default="never", choices=["always", "except_last", "never"], help="Checkpointing strategy for pipe"
)
parser.add_argument(
"--pipelined-backward", dest="pipelined_backward", action="store_true", help="Pipelined backward pass"
)
parser.add_argument(
"--no-pipelined-backward", dest="pipelined_backward", action="store_false", help="Pipelined backward pass"
)
parser.set_defaults(pipelined_backward=True)
if __name__ == "__main__":
args = parser.parse_args()
# bench_multi_process(args, all_at_once=True)
if args.no_mpi or "OMPI_COMM_WORLD_RANK" not in os.environ:
print(f"Running benchmark with args: {args}")
bench_single_process(args)
else:
if os.environ["OMPI_COMM_WORLD_RANK"] == "0":
print(f"Running benchmark with args: {args}")
bench_mpi(args)
| [
"torch.distributed.get_world_size",
"torch.cuda.manual_seed",
"torch.multiprocessing.spawn",
"torch.cuda.current_device",
"torch.ones",
"torch.cuda.is_available",
"torch.cuda.memory_stats",
"torch.distributed.rpc.shutdown",
"torch.nn.CrossEntropyLoss",
"torch.distributed.init_process_group",
"torch.manual_seed",
"torch.utils.data.DataLoader",
"torch.distributed.get_rank",
"torch.Tensor",
"torch.zeros",
"torch.cos",
"torch.device",
"torch.distributed.rpc.ProcessGroupRpcBackendOptions",
"torch.cuda.device_count",
"torch.distributed.barrier",
"torch.nn.Dropout",
"torch.sin",
"torch.distributed.destroy_process_group",
"torch.arange",
"torch.no_grad",
"torch.distributed.all_reduce"
] | 1.4.0 | jessijzhao/fairscale | d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec |
1.4 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict
import torch
from torch.cuda.amp import GradScaler as TorchGradScaler
import torch.distributed as dist
from torch.optim import Optimizer
from .oss import OSS
class GradScaler(TorchGradScaler):
def _unscale_grads_(
self, optimizer: Optimizer, inv_scale: torch.Tensor, found_inf: torch.Tensor, allow_fp16: bool
) -> Dict[torch.device, torch.Tensor]:
return super()._unscale_grads_(optimizer, inv_scale, found_inf, True)
class ShardedGradScaler(TorchGradScaler):
"""
A shard-aware :class:`GradScaler<torch.cuda.amp.GradScaler>`, to be used in conjunction with
:class:`OSS` and :class:`ShardedOptimizer`.
Interface and usecases are not changed, more explanations can be found in the corresponding pytorch
documentation https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
"""
def __init__(self) -> None:
super().__init__()
self.display_warning = True
def unscale_(self, optimizer: Optimizer) -> None:
# Could be a mistake, this scaler is supposed to work with ZeroRedundancyOptimizer only
if self.display_warning and not isinstance(optimizer, OSS):
logging.warning(
"ShardedGradScaler is to be used in combination with a sharded optimizer, this could not be checked"
)
self.display_warning = False # Only warn once
# Call the upstream unscale_ method which will only act on this rank's gradients
super().unscale_(optimizer)
# Synchronize the detected inf across the ranks
optimizer_state = self._per_optimizer_states[id(optimizer)]
handles = [dist.all_reduce(v, async_op=True) for v in optimizer_state["found_inf_per_device"].values()]
# Make sure that the calls are done before moving out
_ = list(map(lambda x: x.wait(), handles))
| [
"torch.distributed.all_reduce"
] | 1.4.0 | jessijzhao/fairscale | d6a8fc6dadc5d5ab4e3ee3f42f8cd570d70d30ec |
1.0 | """
Unit tests for RNN decoders.
"""
import unittest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from texar.torch.hyperparams import HParams
from texar.torch.modules.decoders.decoder_helpers import get_helper
from texar.torch.modules.decoders.rnn_decoders import (
AttentionRNNDecoder, AttentionRNNDecoderOutput, BasicRNNDecoder,
BasicRNNDecoderOutput)
from texar.torch.modules.embedders.embedders import WordEmbedder
from texar.torch.utils.utils import map_structure
class BasicRNNDecoderTest(unittest.TestCase):
r"""Tests :class:`~texar.torch.modules.decoders.rnn_decoders.BasicRNNDecoder`.
"""
def setUp(self):
self._vocab_size = 4
self._max_time = 8
self._batch_size = 16
self._emb_dim = 20
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._hparams = HParams(None, BasicRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
self.assertIsInstance(outputs, BasicRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
self.assertEqual(final_state[0].shape, (self._batch_size, hidden_size))
def test_decode_train(self):
r"""Tests decoding in training mode.
"""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
# Helper by default HParams
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Helper by decoding strategy
helper_train = decoder.create_helper(decoding_strategy='train_greedy')
outputs, final_state, sequence_lengths = decoder(
helper=helper_train, inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Implicit helper
outputs, final_state, sequence_lengths = decoder(
inputs=self._inputs, sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
# Eval helper through forward args
outputs, final_state, sequence_lengths = decoder(
embedding=self._embedder,
start_tokens=torch.tensor([1] * self._batch_size),
end_token=2, infer_mode=True)
self._test_outputs(
decoder, outputs, final_state, sequence_lengths, test_mode=True)
@staticmethod
def _assert_tensor_equal(a: torch.Tensor, b: torch.Tensor) -> bool:
if torch.is_tensor(a):
a = a.detach().numpy()
if torch.is_tensor(b):
b = b.detach().numpy()
if any(np.issubdtype(array.dtype, np.floating) for array in [a, b]):
return np.testing.assert_allclose(a, b, rtol=1e-5, atol=1e-8)
return np.testing.assert_array_equal(a, b)
def test_decode_train_with_torch(self):
r"""Compares decoding results with PyTorch built-in decoder.
"""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
input_size = self._emb_dim
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
num_layers = decoder.hparams.rnn_cell.num_layers
torch_lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True)
# match parameters
for name in ['weight_ih', 'weight_hh', 'bias_ih', 'bias_hh']:
setattr(torch_lstm, f'{name}_l0',
getattr(decoder._cell._cell, name))
torch_lstm.flatten_parameters()
output_layer = decoder._output_layer
input_lengths = torch.tensor([self._max_time] * self._batch_size)
inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
# decoder outputs
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
inputs=inputs,
sequence_length=input_lengths,
helper=helper_train)
# torch LSTM outputs
lstm_inputs = F.embedding(inputs, self._embedder.embedding)
torch_outputs, torch_states = torch_lstm(lstm_inputs)
torch_outputs = output_layer(torch_outputs)
torch_sample_id = torch.argmax(torch_outputs, dim=-1)
self.assertEqual(final_state[0].shape,
(self._batch_size, hidden_size))
self._assert_tensor_equal(outputs.logits, torch_outputs)
self._assert_tensor_equal(outputs.sample_id, torch_sample_id)
self._assert_tensor_equal(final_state[0], torch_states[0].squeeze(0))
self._assert_tensor_equal(final_state[1], torch_states[1].squeeze(0))
self._assert_tensor_equal(sequence_lengths, input_lengths)
def test_decode_infer(self):
r"""Tests decoding in inference mode."""
decoder = BasicRNNDecoder(
token_embedder=self._embedder, input_size=self._emb_dim,
vocab_size=self._vocab_size, hparams=self._hparams)
decoder.eval()
start_tokens = torch.tensor([self._vocab_size - 2] * self._batch_size)
helpers = []
for strategy in ['infer_greedy', 'infer_sample']:
helper = decoder.create_helper(
decoding_strategy=strategy,
start_tokens=start_tokens,
end_token=self._vocab_size - 1)
helpers.append(helper)
for klass in ['TopKSampleEmbeddingHelper', 'SoftmaxEmbeddingHelper',
'GumbelSoftmaxEmbeddingHelper']:
helper = get_helper(
klass, start_tokens=start_tokens,
end_token=self._vocab_size - 1,
top_k=self._vocab_size // 2, tau=2.0,
straight_through=True)
helpers.append(helper)
for helper in helpers:
max_length = 100
outputs, final_state, sequence_lengths = decoder(
helper=helper, max_decoding_length=max_length)
self.assertLessEqual(max(sequence_lengths), max_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
class AttentionRNNDecoderTest(unittest.TestCase):
r"""Tests :class:`~texar.torch.modules.decoders.rnn_decoders.AttentionRNNDecoder`.
"""
def setUp(self):
self._vocab_size = 10
self._max_time = 16
self._batch_size = 8
self._emb_dim = 20
self._attention_dim = 256
self._inputs = torch.randint(
self._vocab_size, size=(self._batch_size, self._max_time))
embedding = torch.rand(
self._vocab_size, self._emb_dim, dtype=torch.float)
self._embedder = WordEmbedder(init_value=embedding)
self._encoder_output = torch.rand(
self._batch_size, self._max_time, 64)
self._test_hparams = {} # (cell_type, is_multi) -> hparams
for cell_type in ["RNNCell", "LSTMCell", "GRUCell"]:
hparams = {
"rnn_cell": {
'type': cell_type,
'kwargs': {
'num_units': 256,
},
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[(cell_type, False)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
hparams = {
"rnn_cell": {
'type': 'LSTMCell',
'kwargs': {
'num_units': 256,
},
'num_layers': 3,
},
"attention": {
"kwargs": {
"num_units": self._attention_dim
},
}
}
self._test_hparams[("LSTMCell", True)] = HParams(
hparams, AttentionRNNDecoder.default_hparams())
def _test_outputs(self, decoder, outputs, final_state, sequence_lengths,
test_mode=False):
hidden_size = decoder.hparams.rnn_cell.kwargs.num_units
cell_type = decoder.hparams.rnn_cell.type
is_multi = decoder.hparams.rnn_cell.num_layers > 1
self.assertIsInstance(outputs, AttentionRNNDecoderOutput)
max_time = (self._max_time if not test_mode
else max(sequence_lengths).item())
self.assertEqual(
outputs.logits.shape,
(self._batch_size, max_time, self._vocab_size))
if not test_mode:
np.testing.assert_array_equal(
sequence_lengths, [max_time] * self._batch_size)
map_structure(
lambda t: self.assertEqual(
t.size(), (self._batch_size, hidden_size)),
final_state.cell_state)
state = final_state.cell_state
if is_multi:
self.assertIsInstance(state, list)
state = state[0]
if cell_type == "LSTMCell":
self.assertIsInstance(state, tuple)
state = state[0]
self.assertIsInstance(state, torch.Tensor)
def test_decode_infer(self):
r"""Tests decoding in inference mode.
"""
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
decoder.eval()
helper_infer = decoder.create_helper(
start_tokens=torch.tensor([1] * self._batch_size), end_token=2)
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_infer)
self._test_outputs(decoder, outputs, final_state, sequence_lengths,
test_mode=True)
def test_decode_train(self):
r"""Tests decoding in training mode.
"""
seq_length = np.random.randint(
self._max_time, size=[self._batch_size]) + 1
encoder_values_length = torch.tensor(seq_length)
for (cell_type, is_multi), hparams in self._test_hparams.items():
decoder = AttentionRNNDecoder(
encoder_output_size=64,
token_embedder=self._embedder,
vocab_size=self._vocab_size,
input_size=self._emb_dim,
hparams=hparams)
sequence_length = torch.tensor([self._max_time] * self._batch_size)
helper_train = decoder.create_helper()
outputs, final_state, sequence_lengths = decoder(
memory=self._encoder_output,
memory_sequence_length=encoder_values_length,
helper=helper_train,
inputs=self._inputs,
sequence_length=sequence_length)
self._test_outputs(decoder, outputs, final_state, sequence_lengths)
if __name__ == "__main__":
unittest.main()
| [
"torch.rand",
"torch.nn.LSTM",
"torch.is_tensor",
"torch.nn.functional.embedding",
"torch.randint",
"torch.tensor",
"torch.argmax"
] | 1.0.0 | wwt17/texar-pytorch | 9fb3ae8f7b541da5c808357033a93fba1817bfbd |
1.5 | import os
import torch
from pathlib import Path
from nn_interpretability.model.definition.am_mnist_classifier import AMCNN
from nn_interpretability.model.definition.mc_dropout_cnn import CNN_Dropout
from nn_interpretability.model.definition.general_mnist_cnn import GeneralCNN
from nn_interpretability.model.definition.mnist_generator import MNISTGenerator
from nn_interpretability.model.definition.mnist_discriminator import MNISTDiscriminator
from nn_interpretability.model.definition.cam_mnist_classifier import CAMMNISTClassifier
from nn_interpretability.model.definition.pretrained_dc_generator import PretrainedDCGANGenerator
from nn_interpretability.model.definition.cam_mnist_classifier_2 import CAMMNISTExtendedClassifier
class ModelRepository:
MODELS_PATH = str(Path(__file__).parent.parent.parent.joinpath('models')) + "/"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@staticmethod
def get_general_mnist_cnn(path: str = None):
model = GeneralCNN()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cnn_dropout(path: str = None):
model = CNN_Dropout()
if path is not None:
if os.path.exists(ModelRepository.MODELS_PATH + path):
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_classifier(path: str = None):
model = CAMMNISTClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_cam_extended_classifier(path: str = None):
model = CAMMNISTExtendedClassifier()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_am_classifier(path: str = None):
model = AMCNN()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_pretrained_dcgan_generator():
"""
Source of the pretrained model is:
https://github.com/csinva/gan-vae-pretrained-pytorch
:return:
"""
path = 'pretrained_dcgan_generator.pth'
model = PretrainedDCGANGenerator()
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_generator(latent_dim: int = 128, path: str = None):
model = MNISTGenerator(latent_dim=latent_dim)
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def get_mnist_discriminator(path: str = None):
model = MNISTDiscriminator()
if path is not None:
model = ModelRepository._load(model, path)
return model.to(ModelRepository.device)
@staticmethod
def save(model, model_name):
torch.save(model.state_dict(), ModelRepository.MODELS_PATH + model_name)
return model
@staticmethod
def _load(model, model_name):
model.load_state_dict(torch.load(ModelRepository.MODELS_PATH + model_name, map_location=ModelRepository.device))
return model.to(ModelRepository.device)
| [
"torch.cuda.is_available",
"torch.load"
] | 1.5.0 | miquelmn/nn_interpretability | 2b5d2b4102016189743e09f1f3a56f2ecddfde98 |
1.7 | # coding: utf-8
# Copyright 2020 Tarkan Temizoz
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from Models.linearnet import LinearNet
class Optimization:
""" A helper class to train, test and diagnose Cost-sensitive Logistic Regression
Attributes:
model: CSLR model.
optimizer: Optimizer of the network.
train_return: List of train returns.
val_return: List of validation returns.
validation: Whether there is validation data.
batch_size: Batch-size of the network.
n_epochs: Total number of epochs.
n_steps: Number of epochs to evaluate the results
"""
def __init__(self, model, optimizer, config):
"""Initialises CLSR.
Args:
model: CSLR model.
optimizer: Optimizer of the network.
config: Configuration of the network.
"""
self.model = model
self.optimizer = optimizer
self.train_return = []
self.val_return = []
self.validation = False
self.batch_size = config.get("batch_size",32)
self.n_epochs = config.get("n_epochs", 1000)
self.n_steps = config.get("n_steps", self.n_epochs)
@staticmethod
def batch(iterable, n):
"""Creates batches."""
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
def train(self, x_train, r_train, x_val=None, r_val=None):
"""Applies simple feed-forward network to an input.
Args:
x_train: train features
r_train: train returns
x_val: validation features
r_val: validation returns
"""
if x_val is not None or r_val is not None:
self.validation = True
start_time = time.time()
for epoch in range(self.n_epochs):
x_shuff, r_shuff = shuffle(x_train, r_train)
self.model.train()
for j in self.batch(range(0, len(x_shuff)),self.batch_size):
if len(j) < 2:
break
x_batch = x_shuff[j]
r_batch = r_shuff[j]
self.optimizer.zero_grad()
outputs, _, _ = self.model(x_batch)
loss = -torch.mul(outputs, r_batch).sum()
loss.backward()
self.optimizer.step()
returns_train, _, _ = self.evaluate(x_train, r_train)
self.train_return.append(returns_train)
if self.validation is True:
returns_val, _, _ = self.evaluate(x_val, r_val)
self.val_return.append(returns_val)
if ((epoch+1) % self.n_steps == 0):
elapsed = time.time() - start_time
print(
("Epoch %d Train Return: %.3f.") % (epoch + 1, self.train_return[-1]),
((" Validation Return: %.3f. Elapsed time: %.3fs.")
% (self.val_return[-1], elapsed)
if self.validation is True else
" Elapsed time: %.3fs."
% elapsed)
)
start_time = time.time()
def evaluate(self, x_test, r_test):
"""Evaluates simple feed-forward network to an input.
Args:
x_test: features of the evaluated data
r_test: returns of the evaluated data
Returns:
Triple of Tensors for: (Total returns, decision variables, probabilities)
"""
with torch.no_grad():
outputs, probs, _ = self.model(x_test)
returns = torch.mul(outputs, r_test).sum()
return returns, outputs, probs
def plot_return(self):
"""Draws a plot, Trains Returns vs Test Returns"""
plt.plot(self.train_return, label="Train Return")
plt.plot(self.val_return, label="Test Return")
plt.legend()
plt.title("Returns")
| [
"torch.mul",
"torch.no_grad"
] | 1.7.1 | tarkantemizoz/Cost-Sensitive-Learning | 083f8dfd2950b7e3874df34bf61c2ca1e4a91fbb |
1.5 | from model import DeepJIT
import torch
from tqdm import tqdm
from utils import mini_batches_train, save
import torch.nn as nn
import os, datetime
def train_model(data, params):
data_pad_msg, data_pad_code, data_labels, dict_msg, dict_code = data
# set up parameters
params.cuda = (not params.no_cuda) and torch.cuda.is_available()
del params.no_cuda
params.filter_sizes = [int(k) for k in params.filter_sizes.split(',')]
# params.save_dir = os.path.join(params.save_dir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
params.vocab_msg, params.vocab_code = len(dict_msg), len(dict_code)
if len(data_labels.shape) == 1:
params.class_num = 1
else:
params.class_num = data_labels.shape[1]
params.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# create and train the defect model
model = DeepJIT(args=params)
if torch.cuda.is_available():
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=params.l2_reg_lambda)
criterion = nn.BCELoss()
for epoch in range(1, params.num_epochs + 1):
total_loss = 0
# building batches for training model
batches = mini_batches_train(X_msg=data_pad_msg, X_code=data_pad_code, Y=data_labels, mini_batch_size=params.batch_size)
for i, (batch) in enumerate(tqdm(batches)):
pad_msg, pad_code, labels = batch
if torch.cuda.is_available():
pad_msg, pad_code, labels = torch.tensor(pad_msg).cuda(), torch.tensor(
pad_code).cuda(), torch.cuda.FloatTensor(labels)
else:
pad_msg, pad_code, labels = torch.tensor(pad_msg).long(), torch.tensor(pad_code).long(), torch.tensor(
labels).float()
optimizer.zero_grad()
predict = model.forward(pad_msg, pad_code)
loss = criterion(predict, labels)
total_loss += loss
loss.backward()
optimizer.step()
print('Epoch %i / %i -- Total loss: %f' % (epoch, params.num_epochs, total_loss))
save(model, params.save_dir, 'epoch', epoch)
| [
"torch.cuda.is_available",
"torch.tensor",
"torch.nn.BCELoss",
"torch.cuda.FloatTensor"
] | 1.5.0 | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a |
1.5 | import math
import random
import time
import argparse
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc
import pandas as pd
import numpy as np
import torch.nn as nn
import torch
from LR import LR
from DBN import DBN
parser = argparse.ArgumentParser()
parser.add_argument('-project', type=str,
default='qt')
parser.add_argument('-data', type=str,
default='k')
parser.add_argument('-algorithm', type=str,
default='lr')
parser.add_argument('-drop', type=str,
default='')
parser.add_argument('-only', nargs='+',
default=[])
def evaluation_metrics(y_true, y_pred):
fpr, tpr, thresholds = roc_curve(y_true=y_true, y_score=y_pred, pos_label=1)
auc_ = auc(fpr, tpr)
y_pred = [1 if p >= 0.5 else 0 for p in y_pred]
acc = accuracy_score(y_true=y_true, y_pred=y_pred)
prc = precision_score(y_true=y_true, y_pred=y_pred)
rc = recall_score(y_true=y_true, y_pred=y_pred)
# f1 = 2 * prc * rc / (prc + rc)
f1 = 0
return acc, prc, rc, f1, auc_
def replace_value_dataframe(df):
df = df.replace({True: 1, False: 0})
df = df.fillna(df.mean())
if args.drop:
df = df.drop(columns=[args.drop])
elif args.only:
df = df[['Unnamed: 0','_id','date','bug','__'] + args.only]
return df.values
def get_features(data):
# return the features of yasu data
return data[:, 5:]
def get_ids(data):
# return the labels of yasu data
return data[:, 1:2].flatten().tolist()
def get_label(data):
data = data[:, 3:4].flatten().tolist()
data = [1 if int(d) > 0 else 0 for d in data]
return data
def load_df_yasu_data(path_data):
data = pd.read_csv(path_data)
data = replace_value_dataframe(df=data)
ids, labels, features = get_ids(data=data), get_label(data=data), get_features(data=data)
indexes = list()
cnt_noexits = 0
for i in range(0, len(ids)):
try:
indexes.append(i)
except FileNotFoundError:
print('File commit id no exits', ids[i], cnt_noexits)
cnt_noexits += 1
ids = [ids[i] for i in indexes]
labels = [labels[i] for i in indexes]
features = features[indexes]
return (ids, np.array(labels), features)
def load_yasu_data(args):
train_path_data = 'data/{}/{}_train.csv'.format(args.project, args.data)
test_path_data = 'data/{}/{}_test.csv'.format(args.project, args.data)
train, test = load_df_yasu_data(train_path_data), load_df_yasu_data(test_path_data)
return train, test
def train_and_evl(data, label, args):
size = int(label.shape[0]*0.2)
auc_ = []
for i in range(5):
idx = size * i
X_e = data[idx:idx+size]
y_e = label[idx:idx+size]
X_t = np.vstack((data[:idx], data[idx+size:]))
y_t = np.hstack((label[:idx], label[idx+size:]))
model = LogisticRegression(max_iter=7000).fit(X_t, y_t)
y_pred = model.predict_proba(X_e)[:, 1]
fpr, tpr, thresholds = roc_curve(y_true=y_e, y_score=y_pred, pos_label=1)
auc_.append(auc(fpr, tpr))
return np.mean(auc_)
def mini_batches_update(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = list()
np.random.seed(seed)
# Step 1: No shuffle (X, Y)
shuffled_X, shuffled_Y = X, Y
Y = Y.tolist()
Y_pos = [i for i in range(len(Y)) if Y[i] == 1]
Y_neg = [i for i in range(len(Y)) if Y[i] == 0]
# Step 2: Randomly pick mini_batch_size / 2 from each of positive and negative labels
num_complete_minibatches = int(math.floor(m / float(mini_batch_size))) + 1
for k in range(0, num_complete_minibatches):
indexes = sorted(
random.sample(Y_pos, int(mini_batch_size / 2)) + random.sample(Y_neg, int(mini_batch_size / 2)))
mini_batch_X, mini_batch_Y = shuffled_X[indexes], shuffled_Y[indexes]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def mini_batches(X, Y, mini_batch_size=64, seed=0):
m = X.shape[0] # number of training examples
mini_batches = list()
np.random.seed(seed)
# Step 1: No shuffle (X, Y)
shuffled_X, shuffled_Y = X, Y
# Step 2: Partition (X, Y). Minus the end case.
# number of mini batches of size mini_batch_size in your partitioning
num_complete_minibatches = int(math.floor(m / float(mini_batch_size)))
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size]
else:
mini_batch_Y = shuffled_Y[k * mini_batch_size: k * mini_batch_size + mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :]
if len(Y.shape) == 1:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m]
else:
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
def DBN_JIT(train_features, train_labels, test_features, test_labels, hidden_units=[20, 12, 12], num_epochs_LR=200):
# training DBN model
#################################################################################################
starttime = time.time()
dbn_model = DBN(visible_units=train_features.shape[1],
hidden_units=hidden_units,
use_gpu=False)
dbn_model.train_static(train_features, train_labels, num_epochs=10)
# Finishing the training DBN model
# print('---------------------Finishing the training DBN model---------------------')
# using DBN model to construct features
DBN_train_features, _ = dbn_model.forward(train_features)
DBN_test_features, _ = dbn_model.forward(test_features)
DBN_train_features = DBN_train_features.numpy()
DBN_test_features = DBN_test_features.numpy()
train_features = np.hstack((train_features, DBN_train_features))
test_features = np.hstack((test_features, DBN_test_features))
if len(train_labels.shape) == 1:
num_classes = 1
else:
num_classes = train_labels.shape[1]
# lr_model = LR(input_size=hidden_units, num_classes=num_classes)
lr_model = LR(input_size=train_features.shape[1], num_classes=num_classes)
optimizer = torch.optim.Adam(lr_model.parameters(), lr=0.00001)
steps = 0
batches_test = mini_batches(X=test_features, Y=test_labels)
for epoch in range(1, num_epochs_LR + 1):
# building batches for training model
batches_train = mini_batches_update(X=train_features, Y=train_labels)
for batch in batches_train:
x_batch, y_batch = batch
x_batch, y_batch = torch.tensor(x_batch).float(), torch.tensor(y_batch).float()
optimizer.zero_grad()
predict = lr_model.forward(x_batch)
loss = nn.BCELoss()
loss = loss(predict, y_batch)
loss.backward()
optimizer.step()
# steps += 1
# if steps % 100 == 0:
# print('\rEpoch: {} step: {} - loss: {:.6f}'.format(epoch, steps, loss.item()))
endtime = time.time()
dtime = endtime - starttime
print("Train Time: %.8s s" % dtime) #显示到微秒
starttime = time.time()
y_pred, lables = lr_model.predict(data=batches_test)
endtime = time.time()
dtime = endtime - starttime
print("Eval Time: %.8s s" % dtime) #显示到微秒
return y_pred
def baseline_algorithm(train, test, algorithm, only=False):
_, y_train, X_train = train
_, y_test, X_test = test
X_train, X_test = preprocessing.scale(X_train), preprocessing.scale(X_test)
acc, prc, rc, f1, auc_ = 0, 0, 0, 0, 0
if algorithm == 'lr':
starttime = time.time()
model = LogisticRegression(max_iter=7000).fit(X_train, y_train)
endtime = time.time()
dtime = endtime - starttime
print("Train Time: %.8s s" % dtime) #显示到微秒
starttime = time.time()
y_pred = model.predict_proba(X_test)[:, 1]
endtime = time.time()
dtime = endtime - starttime
print("Eval Time: %.8s s" % dtime) #显示到微秒
acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)
if only and not "cross" in args.data:
auc_ = train_and_evl(X_train, y_train, args)
print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))
elif algorithm =='dbn':
y_pred = DBN_JIT(X_train, y_train, X_test, y_test)
acc, prc, rc, f1, auc_ = evaluation_metrics(y_true=y_test, y_pred=y_pred)
acc, prc, rc, f1 = 0, 0, 0, 0
print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))
else:
print('You need to give the correct algorithm name')
return
return y_test, y_pred
def save_result(labels, predicts, path):
results = []
for lable, predict in zip(labels, predicts):
results.append('{}\t{}\n'.format(lable, predict))
with open(path, 'w', encoding='utf-8') as f:
f.writelines(results)
if __name__ == '__main__':
args = parser.parse_args()
save_path = 'result/{}/{}_{}_{}.result'.format(args.project, args.project, args.algorithm, args.data.replace("/","_"))
only = True if args.only else False
if args.algorithm == 'la':
args.algorithm = 'lr'
args.only = ['la']
if "all" in args.only:
args.only.remove("all")
train, test = load_yasu_data(args)
labels, predicts = baseline_algorithm(train=train, test=test, algorithm=args.algorithm, only=only)
if not only:
save_result(labels, predicts, save_path) | [
"torch.tensor",
"torch.nn.BCELoss"
] | 1.5.0 | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a |
1.5 | import torch
import torch.nn as nn
class LR(nn.Module):
def __init__(self, input_size, num_classes):
super(LR, self).__init__()
# self.fc = nn.Linear(input_size, 128)
# self.fc1 = nn.Linear(128, 256)
# self.fc2 = nn.Linear(256, 64)
# self.fc3 = nn.Linear(64, num_classes)
self.fc = nn.Linear(input_size, num_classes)
self.sigmoid = nn.Sigmoid()
def forward(self, input_size):
# out = self.fc(input_size)
# out = self.fc1(out)
# out = self.fc2(out)
# out = self.fc3(out)
out = self.fc(input_size)
out = self.sigmoid(out).squeeze(1)
return out
def predict(self, data):
with torch.no_grad():
self.eval() # since we use drop out
all_predict, all_label = list(), list()
for batch in data:
x, y = batch
x = torch.tensor(x).float()
predict = self.forward(x).detach().numpy().tolist()
all_predict += predict
all_label += y.tolist()
# acc, prc, rc, f1, auc_ = evaluation_metrics(y_pred=all_predict, y_true=all_label)
# print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' % (acc, prc, rc, f1, auc_))
return all_predict, all_label
| [
"torch.nn.Linear",
"torch.no_grad",
"torch.tensor",
"torch.nn.Sigmoid"
] | 1.5.0 | ZZR0/ISSTA21-JIT-DP | c2916f7c3b1d235ff2858220886d6a7da068bf8a |
1.3 | from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
"""
Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,
but additionally supports :meth:`log_prob` .
:param ~torch.distributions.Distribution base_dist: The distribution to
reflect.
"""
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| [
"torch.distributions.transforms.AbsTransform"
] | 1.3.0 | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d |
1.2 | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN
from convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT
from convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS
TEACH_FORCE = 'teacher_forcing'
TEACH_GEN = 'teacher_gen'
GEN = 'gen'
GEN_VALID = 'gen_valid'
class Attention(nn.Module):
def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):
super(Attention, self).__init__()
self.dec_cell_size = dec_cell_size
self.ctx_cell_size = ctx_cell_size
self.attn_mode = attn_mode
if project:
self.linear_out = nn.Linear(
dec_cell_size+ctx_cell_size, dec_cell_size)
else:
self.linear_out = None
if attn_mode == 'general':
self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)
elif attn_mode == 'cat':
self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)
self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)
self.query_w = nn.Linear(dec_cell_size, 1)
def forward(self, output, context):
# output: (batch_size, output_seq_len, dec_cell_size)
# context: (batch_size, max_ctx_len, ctx_cell_size)
batch_size = output.size(0)
max_ctx_len = context.size(1)
if self.attn_mode == 'dot':
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(output, context.transpose(1, 2))
elif self.attn_mode == 'general':
# (batch_size, output_seq_len, ctx_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(mapped_output, context.transpose(1, 2))
elif self.attn_mode == 'cat':
# (batch_size, output_seq_len, dec_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, max_ctx_len, dec_cell_size)
mapped_attn = self.attn_w(context)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
tiled_output = mapped_output.unsqueeze(
2).repeat(1, 1, max_ctx_len, 1)
# (batch_size, 1, max_ctx_len, dec_cell_size)
tiled_attn = mapped_attn.unsqueeze(1)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
fc1 = F.tanh(tiled_output+tiled_attn)
# (batch_size, otuput_seq_len, max_ctx_len)
attn = self.query_w(fc1).squeeze(-1)
else:
raise ValueError('Unknown attention mode')
# TODO mask
# if self.mask is not None:
# (batch_size, output_seq_len, max_ctx_len)
attn = F.softmax(attn.view(-1, max_ctx_len),
dim=1).view(batch_size, -1, max_ctx_len)
# (batch_size, output_seq_len, ctx_cell_size)
mix = th.bmm(attn, context)
# (batch_size, output_seq_len, dec_cell_size+ctx_cell_size)
combined = th.cat((mix, output), dim=2)
if self.linear_out is None:
return combined, attn
else:
output = F.tanh(
self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(
batch_size, -1, self.dec_cell_size) # (batch_size, output_seq_len, dec_cell_size)
return output, attn
class DecoderRNN(BaseRNN):
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,
bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,
max_dec_len, embedding=None):
super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,
rnn_cell=rnn_cell,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
output_dropout_p=output_dropout_p,
bidirectional=bidirectional)
# TODO embedding is None or not
if embedding is None:
self.embedding = nn.Embedding(vocab_size, input_size)
else:
self.embedding = embedding
# share parameters between encoder and decoder
# self.rnn = ctx_encoder.rnn
# self.FC = nn.Linear(input_size, utt_encoder.output_size)
self.use_attn = use_attn
if self.use_attn:
self.attention = Attention(dec_cell_size=hidden_size,
ctx_cell_size=ctx_cell_size,
attn_mode=attn_mode,
project=True)
self.dec_cell_size = hidden_size
self.output_size = vocab_size
self.project = nn.Linear(self.dec_cell_size, self.output_size)
self.log_softmax = F.log_softmax
self.sys_id = sys_id
self.eos_id = eos_id
self.use_gpu = use_gpu
self.max_dec_len = max_dec_len
def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):
# dec_inputs: (batch_size, response_size-1)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
ret_dict = dict()
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()
if mode == GEN:
dec_inputs = None
if gen_type != 'beam':
beam_size = 1
if dec_inputs is not None:
decoder_input = dec_inputs
else:
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(
batch_size*beam_size, 1) # (batch_size, 1)
if mode == GEN and gen_type == 'beam':
# TODO if beam search, repeat the initial states of the RNN
pass
else:
decoder_hidden_state = dec_init_state
# list of logprob | max_dec_len*(batch_size, 1, vocab_size)
prob_outputs = []
symbol_outputs = [] # list of word ids | max_dec_len*(batch_size, 1)
# back_pointers = []
# lengths = blabla...
def decode(step, cum_sum, step_output, step_attn):
prob_outputs.append(step_output)
step_output_slice = step_output.squeeze(
1) # (batch_size, vocab_size)
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)
if gen_type == 'greedy':
_, symbols = step_output_slice.topk(1) # (batch_size, 1)
elif gen_type == 'sample':
# TODO FIXME
# symbols = self.gumbel_max(step_output_slice)
pass
elif gen_type == 'beam':
# TODO
pass
else:
raise ValueError('Unsupported decoding mode')
symbol_outputs.append(symbols)
return cum_sum, symbols
if mode == TEACH_FORCE:
prob_outputs, decoder_hidden_state, attn = self.forward_step(
input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)
else:
# do free running here
cum_sum = None
for step in range(self.max_dec_len):
# Input:
# decoder_input: (batch_size, 1)
# decoder_hidden_state: tuple: (h, c)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
# Output:
# decoder_output: (batch_size, 1, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# step_attn: (batch_size, 1, max_ctx_len)
decoder_output, decoder_hidden_state, step_attn = self.forward_step(
decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)
cum_sum, symbols = decode(
step, cum_sum, decoder_output, step_attn)
decoder_input = symbols
# (batch_size, max_dec_len, vocab_size)
prob_outputs = th.cat(prob_outputs, dim=1)
# back tracking to recover the 1-best in beam search
# if gen_type == 'beam':
ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs
# prob_outputs: (batch_size, max_dec_len, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# ret_dict[DecoderRNN.KEY_ATTN_SCORE]: max_dec_len*(batch_size, 1, max_ctx_len)
# ret_dict[DecoderRNN.KEY_SEQUENCE]: max_dec_len*(batch_size, 1)
return prob_outputs, decoder_hidden_state, ret_dict
def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (batch_size, response_size-1 i.e. output_seq_len)
# hidden_state: tuple: (h, c)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
batch_size, output_seq_len = input_var.size()
# (batch_size, output_seq_len, embedding_dim)
embedded = self.embedding(input_var)
# add goals
if goal_hid is not None:
# (batch_size, 1, goal_nhid)
goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))
# (batch_size, output_seq_len, goal_nhid)
goal_rep = goal_hid.repeat(1, output_seq_len, 1)
# (batch_size, output_seq_len, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (batch_size, output_seq_len, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (batch_size, output_seq_len, dec_cell_size)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# attn: (batch_size, output_seq_len, max_ctx_len)
output, attn = self.attention(output, encoder_outputs)
# (batch_size*output_seq_len, vocab_size)
logits = self.project(output.contiguous().view(-1, self.dec_cell_size))
prediction = self.log_softmax(logits, dim=logits.dim(
)-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s, attn
# special for rl
def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# goal_hid: (1, goal_nhid)
batch_size, output_seq_len = input_var.size()
embedded = self.embedding(input_var) # (1, 1, embedding_dim)
if goal_hid is not None:
goal_hid = goal_hid.view(goal_hid.size(
0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)
goal_rep = goal_hid.repeat(
1, output_seq_len, 1) # (1, 1, goal_nhid)
# (1, 1, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (1, 1, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (1, 1, dec_cell_size)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# attn: (1, 1, max_dlg_len)
output, attn = self.attention(output, encoder_outputs)
# (1*1, vocab_size)
logits = self.project(output.view(-1, self.dec_cell_size))
prediction = logits.view(
batch_size, output_seq_len, -1) # (1, 1, vocab_size)
# prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s
# special for rl
def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,
decoding_masked_tokens=DECODING_MASKED_TOKENS):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: max_dlg_len*(1, 1, dlg_cell_size)
# goal_hid: (1, goal_nhid)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
decoder_input = input_var
decoder_hidden_state = hidden_state
if type(encoder_outputs) is list:
# (1, max_dlg_len, dlg_cell_size)
encoder_outputs = th.cat(encoder_outputs, 1)
# print('encoder_outputs.size() = {}'.format(encoder_outputs.size()))
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in decoding_masked_tokens else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
dec_output = dec_output.view(-1) # (vocab_size, )
# TODO temperature
prob = F.softmax(dec_output/0.6, dim=0) # (vocab_size, )
logprob = F.log_softmax(dec_output, dim=0) # (vocab_size, )
symbol = prob.multinomial(num_samples=1).detach() # (1, )
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(0, symbol) # (1, )
return logprob, symbol
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(1, -1)
if vocab[symbol.item()] in stop_tokens:
break
assert len(logprob_outputs) == len(symbol_outputs)
# logprob_list = [t.item() for t in logprob_outputs]
logprob_list = logprob_outputs
symbol_list = [t.item() for t in symbol_outputs]
return logprob_list, symbol_list
# For MultiWoz RL
def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(batch_size, 1) # (1, 1)
decoder_hidden_state = dec_init_state # tuple: (h, c)
encoder_outputs = attn_context # (1, ctx_len, ctx_cell_size)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
# (batch_size, vocab_size, )
dec_output = dec_output.view(batch_size, -1)
# (batch_size, vocab_size, )
prob = F.softmax(dec_output/temp, dim=1)
# (batch_size, vocab_size, )
logprob = F.log_softmax(dec_output, dim=1)
symbol = prob.multinomial(
num_samples=1).detach() # (batch_size, 1)
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(1, symbol) # (1, )
return logprob, symbol
stopped_samples = set()
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(batch_size, -1)
for b_id in range(batch_size):
if vocab[symbol[b_id].item()] == EOS:
stopped_samples.add(b_id)
if len(stopped_samples) == batch_size:
break
assert len(logprob_outputs) == len(symbol_outputs)
symbol_outputs = th.cat(
symbol_outputs, dim=1).cpu().data.numpy().tolist()
logprob_outputs = th.cat(logprob_outputs, dim=1)
logprob_list = []
symbol_list = []
for b_id in range(batch_size):
b_logprob = []
b_symbol = []
for t_id in range(logprob_outputs.shape[1]):
symbol = symbol_outputs[b_id][t_id]
if vocab[symbol] == EOS and t_id != 0:
break
b_symbol.append(symbol_outputs[b_id][t_id])
b_logprob.append(logprob_outputs[b_id][t_id])
logprob_list.append(b_logprob)
symbol_list.append(b_symbol)
# TODO backward compatible, if batch_size == 1, we remove the nested structure
if batch_size == 1:
logprob_list = logprob_list[0]
symbol_list = symbol_list[0]
return logprob_list, symbol_list
| [
"torch.nn.Linear",
"torch.cat",
"torch.no_grad",
"torch.FloatTensor",
"torch.bmm",
"torch.nn.functional.log_softmax",
"torch.LongTensor",
"torch.nn.functional.softmax",
"torch.nn.Embedding",
"torch.nn.functional.tanh"
] | 1.2.0 | ljw23/ConvLab-2 | 13d48ea0e441701bd66100689b6c25b561f15525 |
1.2 | # Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.autograd import Variable
from convlab2.e2e.rnn_rollout.engines import EngineBase, Criterion
class SelectionEngine(EngineBase):
def __init__(self, model, args, verbose=False):
super(SelectionEngine, self).__init__(model, args, verbose)
self.sel_crit = Criterion(
self.model.item_dict,
bad_toks=['<disconnect>', '<disagree>'],
reduction='mean' if args.sep_sel else 'none')
def _forward(model, batch, sep_sel=False):
ctx, _, inpts, lens, _, sel_tgt, rev_idxs, hid_idxs, _ = batch
ctx = Variable(ctx)
inpts = [Variable(inpt) for inpt in inpts]
rev_idxs = [Variable(idx) for idx in rev_idxs]
hid_idxs = [Variable(idx) for idx in hid_idxs]
if sep_sel:
sel_tgt = Variable(sel_tgt)
else:
sel_tgt = [Variable(t) for t in sel_tgt]
# remove YOU:/THEM: from the end
sel_out = model(inpts[:-1], lens[:-1], rev_idxs[:-1], hid_idxs[:-1], ctx)
return sel_out, sel_tgt
def train_batch(self, batch):
sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,
sep_sel=self.args.sep_sel)
loss = 0
if self.args.sep_sel:
loss = self.sel_crit(sel_out, sel_tgt)
else:
for out, tgt in zip(sel_out, sel_tgt):
loss += self.sel_crit(out, tgt)
loss /= sel_out[0].size(0)
self.opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
self.opt.step()
return loss.item()
def valid_batch(self, batch):
with torch.no_grad():
sel_out, sel_tgt = SelectionEngine._forward(self.model, batch,
sep_sel=self.args.sep_sel)
loss = 0
if self.args.sep_sel:
loss = self.sel_crit(sel_out, sel_tgt)
else:
for out, tgt in zip(sel_out, sel_tgt):
loss += self.sel_crit(out, tgt)
loss /= sel_out[0].size(0)
return 0, loss.item(), 0
| [
"torch.autograd.Variable",
"torch.no_grad"
] | 1.2.0 | ljw23/ConvLab-2 | 13d48ea0e441701bd66100689b6c25b561f15525 |
1.9 | import os
import torch
import torch.nn.functional as F
import glob
import numpy as np
from torch.optim import Adam
from utils.utils import soft_update, hard_update
from utils.model import GaussianPolicy, QNetwork, DeterministicPolicy
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, merge, Lambda, Activation
from keras.layers.merge import Add, Multiply, Concatenate, concatenate
from keras.initializers import RandomUniform
from keras.optimizers import Adam
import keras.backend as K
from keras import metrics
def weighted_entropy(p, w_norm):
# w = tf.divide(tf.exp(A - np.max(A)), prob)
# w_norm = w / K.sum(w)
return K.sum(w_norm * p * K.log(p + 1e-8))
def weighted_mean(p, w_norm):
# w = tf.exp(A- np.max(A))
# w_norm = w / K.sum(w)
p_weighted = np.multiply(w_norm, p)
return K.mean(p_weighted, axis=0)
def weighted_mse(Q_target, Q_pred, w_norm):
# w = tf.exp(A- np.max(A))
# w_norm = w / K.sum(w)
error = K.square(Q_target - Q_pred)
return K.mean(w_norm * error)
def softmax(x):
col = x.shape[1]
x_max = np.reshape(np.amax(x, axis=1), (-1, 1))
e_x = np.exp(x - np.matlib.repmat(x_max, 1, col) )
e_x_sum = np.reshape( np.sum(e_x, axis=1), (-1, 1))
out = e_x / np.matlib.repmat(e_x_sum, 1, col)
return out
def weighted_mean_array(x, weights):
weights_mean = np.mean(weights, axis=1)
x_weighted = np.multiply(x, weights)
mean_weighted = np.divide(np.mean(x_weighted, axis=1), weights_mean)
return np.reshape(mean_weighted, (-1, 1))
def p_sample(p):
row, col = p.shape
p_sum = np.reshape(np.sum(p, axis=1), (row, 1))
p_normalized = p/np.matlib.repmat(p_sum, 1, col)
p_cumsum = np.matrix(np.cumsum( p_normalized, axis=1))
# print(p_cumsum[0])
rand = np.matlib.repmat(np.random.random((row, 1)), 1, col)
# print(rand[0])
o_softmax = np.argmax(p_cumsum >= rand, axis=1)
return o_softmax
def entropy(p):
return K.sum(p * K.log((p + 1e-8)))
def add_normal(x_input, outshape, at_eps):
"""
add normal noise to the input
"""
epsilon = K.random_normal(shape=outshape, mean=0., stddev=1.)
x_out = x_input + at_eps * np.multiply(epsilon, np.absolute(x_input))
return x_out
def kl(p, q):
return K.sum(p * K.log((p + 1e-8) / (q + 1e-8)))
class Multi_SAC(object):
def __init__(self, state_dim, action_dim, option_dim, max_action, action_space):
self.alpha = 0.2
self.lr = 0.0003
self.option_num = option_dim
self.policy_type = "Gaussian"
self.target_update_interval = 1
self.automatic_entropy_tuning = True
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
""" critic network """
self.critic = QNetwork(state_dim, action_dim, 400).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
self.critic_target = QNetwork(state_dim, action_dim, 400).to(self.device)
hard_update(self.critic_target, self.critic)
self.sampling_prob = torch.FloatTensor(state).to(self.device)
# ===================================================================== #
# Option Model #
# ===================================================================== #
self.option_state_input, self.option_action_input, self.option_input_concat, self.option_out_dec, \
self.option_out, self.option_out_noise, self.option_model = self.create_option_model()
Advantage = np.stop_gradient(self.target_q_value - self.predicted_v_value)
Weight = np.divide(np.exp(Advantage - np.max(Advantage)), self.sampling_prob)
W_norm = Weight/K.mean(Weight)
critic_conditional_entropy = weighted_entropy(self.option_out, tf.stop_gradient(W_norm))
p_weighted_ave = weighted_mean(self.option_out, tf.stop_gradient(W_norm))
self.critic_entropy = critic_conditional_entropy - self.c_ent * entropy(p_weighted_ave)
self.vat_loss = kl(self.option_out, self.option_out_noise)
self.reg_loss = metrics.mean_absolute_error(self.option_input_concat, self.option_out_dec)
self.option_loss = self.reg_loss + self.entropy_coeff * (self.critic_entropy) + self.c_reg * self.vat_loss
self.option_optimize = tf.train.AdamOptimizer(self.option_lr).minimize(self.option_loss)
""" option network """
self.it = 0
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning == True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=self.lr)
self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)
elif self.policy_type == "Multi_Gaussian":
if self.automatic_entropy_tuning == True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=self.lr)
self.policy = GaussianPolicy(state_dim, action_dim, 400, max_action).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(state_dim, action_dim, 400, max_action).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=self.lr)
def select_action(self, state, eval=True):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if eval == False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def train_actor_option(self, inputs, a_gradient, option):
self.sess.run(self.actor_optimizer_list[option], feed_dict={
self.actor_state_input_list[option]: inputs,
self.action_gradient_list[option]: a_gradient
})
def train_critic(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):
return self.sess.run([self.critic_optimize], feed_dict={
self.critic_state_input: inputs,
self.critic_action_input: action,
self.target_q_value: target_q_value,
self.predicted_v_value: predicted_v_value,
self.sampling_prob: sampling_prob
})
def train_option(self, inputs, action, target_q_value, predicted_v_value, sampling_prob):
return self.sess.run([self.option_optimize], feed_dict={
self.option_state_input: inputs,
self.option_action_input: action,
self.target_q_value: target_q_value,
self.predicted_v_value: predicted_v_value,
self.sampling_prob: sampling_prob
})
def max_option(self, inputs):
Q_predict = []
n = inputs.shape[0]
for o in range(int(self.option_num)):
action_i = self.predict_actor_target(inputs, o)
Q_predict_i, _ = self.predict_critic_target(inputs, action_i)
if o == 0:
Q_predict = np.reshape(Q_predict_i, (-1, 1))
else:
Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1))), axis=1)
o_max = np.argmax(Q_predict, axis=1)
Q_max = np.max(Q_predict, axis=1)
return o_max, Q_max, Q_predict
def softmax_option_target(self, inputs):
Q_predict = []
n = inputs.shape[0]
for o in range(int(self.option_num)):
action_i = self.predict_actor_target(inputs, o)
Q_predict_i, _ = self.predict_critic_target(inputs, action_i)
if o == 0:
Q_predict = np.reshape( Q_predict_i, (-1, 1) )
else:
Q_predict = np.concatenate((Q_predict, np.reshape(Q_predict_i, (-1, 1)) ), axis= 1)
p = softmax(Q_predict)
o_softmax = p_sample(p)
n = Q_predict.shape[0]
Q_softmax = Q_predict[np.arange(n), o_softmax.flatten()]
return o_softmax, np.reshape(Q_softmax, (n, 1)), Q_predict
def predict_actor_option(self, inputs, option):
return self.sess.run(self.actor_out_list[option], feed_dict={self.actor_state_input_list[option]: inputs})
def predict_actor(self, inputs, options):
action_list = []
for o in range(self.option_num):
action_o = self.predict_actor_option(inputs, o)
action_list.append(action_o)
n = inputs.shape[0]
action = 0
if n == 1 or np.isscalar(options):
action = action_list[options]
# calculate the action
else:
for i in range(n):
if i == 0:
action = action_list[int(options[i])][i, :]
else:
action = np.vstack((action, action_list[int(options[i])][i, :]))
return action
| [
"torch.zeros",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.Tensor"
] | 1.9.0 | hzm2016/assistive-gym-robosuite | 5c529f4444cc386383618bfa584341740a8468f9 |
1.3 | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ runner_mockingjay.py ]
# Synopsis [ runner for the mockingjay model ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import yaml
import torch
import random
import argparse
import numpy as np
from utility.timer import Timer
#############################
# MOCKINGJAY CONFIGURATIONS #
#############################
def get_mockingjay_args():
parser = argparse.ArgumentParser(description='Argument Parser for the mockingjay project.')
# setting
parser.add_argument('--config', default='config/mockingjay_libri.yaml', type=str, help='Path to experiment config.')
parser.add_argument('--seed', default=1337, type=int, help='Random seed for reproducable results.', required=False)
# Logging
parser.add_argument('--logdir', default='log/log_mockingjay/', type=str, help='Logging path.', required=False)
parser.add_argument('--name', default=None, type=str, help='Name for logging.', required=False)
# model ckpt
parser.add_argument('--load', action='store_true', help='Load pre-trained model to restore training, no need to specify this during testing.')
parser.add_argument('--ckpdir', default='result/result_mockingjay/', type=str, help='Checkpoint/Result path.', required=False)
parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_LinearLarge/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)
# parser.add_argument('--ckpt', default='mockingjay_libri_sd1337_MelBase/mockingjay-500000.ckpt', type=str, help='path to mockingjay model checkpoint.', required=False)
parser.add_argument('--dckpt', default='baseline_sentiment_libri_sd1337/baseline_sentiment-500000.ckpt', type=str, help='path to downstream checkpoint.', required=False)
parser.add_argument('--apc_path', default='./result/result_apc/apc_libri_sd1337_standard/apc-500000.ckpt', type=str, help='path to the apc model checkpoint.', required=False)
# mockingjay
parser.add_argument('--train', action='store_true', help='Train the model.')
parser.add_argument('--run_mockingjay', action='store_true', help='train and test the downstream tasks using mockingjay representations.')
parser.add_argument('--run_apc', action='store_true', help='train and test the downstream tasks using apc representations.')
parser.add_argument('--fine_tune', action='store_true', help='fine tune the mockingjay model with downstream task.')
parser.add_argument('--plot', action='store_true', help='Plot model generated results during testing.')
# phone task
parser.add_argument('--train_phone', action='store_true', help='Train the phone classifier on mel or mockingjay representations.')
parser.add_argument('--test_phone', action='store_true', help='Test mel or mockingjay representations using the trained phone classifier.')
# sentiment task
parser.add_argument('--train_sentiment', action='store_true', help='Train the sentiment classifier on mel or mockingjay representations.')
parser.add_argument('--test_sentiment', action='store_true', help='Test mel or mockingjay representations using the trained sentiment classifier.')
# speaker verification task
parser.add_argument('--train_speaker', action='store_true', help='Train the speaker classifier on mel or mockingjay representations.')
parser.add_argument('--test_speaker', action='store_true', help='Test mel or mockingjay representations using the trained speaker classifier.')
# Options
parser.add_argument('--with_head', action='store_true', help='inference with the spectrogram head, the model outputs spectrogram.')
parser.add_argument('--output_attention', action='store_true', help='plot attention')
parser.add_argument('--load_ws', default='result/result_mockingjay_sentiment/10111754-10170300-weight_sum/best_val.ckpt', help='load weighted-sum weights from trained downstream model')
parser.add_argument('--cpu', action='store_true', help='Disable GPU training.')
parser.add_argument('--no-msg', action='store_true', help='Hide all messages.')
args = parser.parse_args()
setattr(args,'gpu', not args.cpu)
setattr(args,'verbose', not args.no_msg)
config = yaml.load(open(args.config,'r'))
config['timer'] = Timer()
return config, args
########
# MAIN #
########
def main():
# get arguments
config, args = get_mockingjay_args()
# Fix seed and make backends deterministic
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available(): torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
# Train Mockingjay
if args.train:
from mockingjay.solver import Trainer
trainer = Trainer(config, args)
trainer.load_data(split='train')
trainer.set_model(inference=False)
trainer.exec()
##################################################################################
# Train Phone Task
elif args.train_phone:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_phone' if args.run_mockingjay \
else 'apc_phone' if args.run_apc else 'baseline_phone'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='phone')
trainer.set_model(inference=False)
trainer.exec()
# Test Phone Task
elif args.test_phone:
from downstream.solver import Downstream_Tester
task = 'mockingjay_phone' if args.run_mockingjay \
else 'apc_phone' if args.run_apc else 'baseline_phone'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='phone')
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Train Sentiment Task
elif args.train_sentiment:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_sentiment' if args.run_mockingjay \
else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='sentiment')
trainer.set_model(inference=False)
trainer.exec()
# Test Sentiment Task
elif args.test_sentiment:
from downstream.solver import Downstream_Tester
task = 'mockingjay_sentiment' if args.run_mockingjay \
else 'apc_sentiment' if args.run_apc else 'baseline_sentiment'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='sentiment')
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Train Speaker Task
elif args.train_speaker:
from downstream.solver import Downstream_Trainer
task = 'mockingjay_speaker' if args.run_mockingjay \
else 'apc_speaker' if args.run_apc else 'baseline_speaker'
trainer = Downstream_Trainer(config, args, task=task)
trainer.load_data(split='train', load='speaker')
# trainer.load_data(split='train', load='speaker_large') # Deprecated
trainer.set_model(inference=False)
trainer.exec()
# Test Speaker Task
elif args.test_speaker:
from downstream.solver import Downstream_Tester
task = 'mockingjay_speaker' if args.run_mockingjay \
else 'apc_speaker' if args.run_apc else 'baseline_speaker'
tester = Downstream_Tester(config, args, task=task)
tester.load_data(split='test', load='speaker')
# tester.load_data(split='test', load='speaker_large') # Deprecated
tester.set_model(inference=True)
tester.exec()
##################################################################################
# Visualize Mockingjay
elif args.plot:
from mockingjay.solver import Tester
tester = Tester(config, args)
tester.load_data(split='test', load_mel_only=True)
tester.set_model(inference=True, with_head=args.with_head, output_attention=args.output_attention)
tester.plot(with_head=args.with_head)
config['timer'].report()
########################
# GET MOCKINGJAY MODEL #
########################
def get_mockingjay_model(from_path='result/result_mockingjay/mockingjay_libri_sd1337_best/mockingjay-500000.ckpt', display_settings=False):
''' Wrapper that loads the mockingjay model from checkpoint path '''
# load config and paras
all_states = torch.load(from_path, map_location='cpu')
config = all_states['Settings']['Config']
paras = all_states['Settings']['Paras']
# display checkpoint settings
if display_settings:
for cluster in config:
print(cluster + ':')
for item in config[cluster]:
print('\t' + str(item) + ': ', config[cluster][item])
print('paras:')
v_paras = vars(paras)
for item in v_paras:
print('\t' + str(item) + ': ', v_paras[item])
# load model with Tester
from mockingjay.solver import Tester
mockingjay = Tester(config, paras)
mockingjay.set_model(inference=True, with_head=False, from_path=from_path)
return mockingjay
if __name__ == '__main__':
main()
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.load"
] | 1.3.0 | andi611/Mockingjay-Speech-Representation | e77df17a7f63a983c3757140c7a1e8c199cac614 |
1.8 | import torch
import torch.nn as nn
import torchvision
class ResNeXtBlock(nn.Module):
def __init__(self,in_places,places, stride=1,downsampling=False, expansion = 2, cardinality=32):
super(ResNeXtBlock,self).__init__()
self.expansion = expansion
self.downsampling = downsampling
self.bottleneck = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places, kernel_size=3, stride=stride, padding=1, bias=False, groups=cardinality),
nn.BatchNorm2d(places),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=places, out_channels=places * self.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(places * self.expansion),
)
if self.downsampling:
self.downsample = nn.Sequential(
nn.Conv2d(in_channels=in_places, out_channels=places * self.expansion, kernel_size=1, stride=stride,bias=False),
nn.BatchNorm2d(places * self.expansion)
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.bottleneck(x)
if self.downsampling:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
if __name__ =='__main__':
model = ResNeXtBlock(in_places=256, places=128)
print(model)
input = torch.randn(1,256,64,64)
out = model(input)
print(out.shape) | [
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d",
"torch.randn"
] | 1.8.1 | carlsummer/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 |
1.8 | # !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:5/12/2021 9:00 PM
# @File:DcnV2
"""
conda create -n DCNV2 python=3.8
conda activate DCNV2
git clone https://github.com/jinfagang/DCNv2_latest.git
cd DCNv2_latest/
pip install torch==1.6.0
pip install torchvision==0.7.0
python3 setup.py build develop
./make.sh # build
/home/deploy/anaconda3/envs/yolov5_py38_cu102_conda/lib/python3.8/site-packages/torch/utils/cpp_extension.py
如果报错subprocess.CalledProcessError: Command '['ninja', '-v']' returned non-zero exit status 1.
那么将命令command = ['ninja', '-v']改为command = ['ninja', '-V']
如果报错:
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/vision.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_cpu.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_im2col_cpu.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cpu/dcn_v2_psroi_pooling_cpu.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_cuda.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_im2col_cuda.o: No such file or directory
g++: error: /home/deploy/software/DCNv2-master/build/temp.linux-x86_64-3.8/home/deploy/software/DCNv2-master/src/cuda/dcn_v2_psroi_pooling_cuda.o: No such file or directory
那么:
python3 setup.py build develop
python testcpu.py # run examples and gradient check on cpu
python testcuda.py # run examples and gradient check on gpu
"""
# An Example
# deformable conv
import torch
from dcn_v2 import DCN
input = torch.randn(2, 64, 128, 128).cuda()
# wrap all things (offset and mask) in DCN
dcn = DCN(64, 64, kernel_size=(3,3), stride=1, padding=1, deformable_groups=2).cuda()
output = dcn(input)
print(output.shape)
# deformable roi pooling
from dcn_v2 import DCNPooling
input = torch.randn(2, 32, 64, 64).cuda()
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
# wrap all things (offset and mask) in DCNPooling
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1).cuda()
dout = dpooling(input, rois)
print(dout.shape) | [
"torch.randint",
"torch.cat",
"torch.randn"
] | 1.8.1 | carlsummer/python_developer_tools | fc0dcf5c4ef088e2e535206dc82f09bbfd01f280 |
1.9 | from itertools import chain
from pathlib import Path
from typing import Tuple
import torch
from accelerate import Accelerator
from torch.utils.data import DataLoader
from saticl.config import Configuration, SSLConfiguration
from saticl.datasets.icl import ICLDataset
from saticl.datasets.transforms import invariance_transforms, inverse_transform, ssl_transforms
from saticl.datasets.wrappers import SSLDataset
from saticl.logging.tensorboard import TensorBoardLogger
from saticl.losses.regularization import AugmentationInvariance
from saticl.models.icl import ICLSegmenter
from saticl.prepare import prepare_dataset, prepare_metrics, prepare_metrics_ssl, prepare_model, prepare_model_ssl
from saticl.tasks import Task
from saticl.trainer.base import Trainer
from saticl.trainer.callbacks import Checkpoint, DisplaySamples, EarlyStopping, EarlyStoppingCriterion
from saticl.trainer.invariance import AugInvarianceTrainer
from saticl.trainer.ssl import SSLStage, SSLTrainer
from saticl.utils.common import flatten_config, get_logger, git_revision_hash, store_config
from saticl.utils.ml import checkpoint_path, init_experiment, seed_everything, seed_worker
LOG = get_logger(__name__)
def init_from_previous_step(config: Configuration, new_model: ICLSegmenter, old_model: ICLSegmenter,
model_folder: Path, task: Task) -> Tuple[ICLSegmenter, ICLSegmenter]:
if task.step == 0:
LOG.info("Step 0: training from scratch without old model")
return new_model, old_model
LOG.info("Loading checkpoint from step: %d", task.step - 1)
if config.task.step_checkpoint is not None:
ckpt_path = Path(config.task.step_checkpoint)
else:
ckpt_path = checkpoint_path(model_folder, task_name=task.name, step=task.step - 1)
assert ckpt_path.exists() and ckpt_path.is_file(), f"Checkpoint for step {task.step-1} not found at {str(ckpt_path)}"
checkpoint = torch.load(str(ckpt_path), map_location="cpu")
# load checkpoint into the new model, without strict matching because of ICL heads
new_model.load_state_dict(checkpoint, strict=False)
if config.model.init_balanced:
new_model.init_classifier()
# load the same checkpoint into the old model, this time strict since it's the very same
old_model.load_state_dict(checkpoint, strict=True)
old_model.freeze()
old_model.eval()
del checkpoint
return new_model, old_model
def train(config: Configuration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config, partial_transforms=False)
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
# construct data loaders
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model = prepare_model(config=config, task=task)
new_model = new_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
optimizer = config.optimizer.instantiate(new_model.parameters())
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses
weights = None
if config.class_weights:
weights = train_set.load_class_weights(Path(config.class_weights),
device=accelerator.device,
normalize=config.ce.tversky)
LOG.info("Using class weights: %s", str(weights))
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count(), weight=weights)
distill_loss = config.kd.instantiate()
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
if "Unbiased" not in seg_loss_name:
LOG.warn(f"Non-ubiased segmentation loss '{seg_loss_name}' for step {task.step}!")
if "Unbiased" not in kdd_loss_name:
LOG.warn(f"Non-unbiased KD loss '{kdd_loss_name}' for step {task.step}")
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
# choose trainer class depending on task or regularization
trainer_class = Trainer
kwargs = dict()
if config.aug.apply:
inv_transforms = invariance_transforms(config.aug)
LOG.info("Invariance transforms: ")
LOG.info(str(inv_transforms))
kwargs.update(aug_criterion=AugmentationInvariance(transform=inv_transforms),
aug_lambda=config.aug.factor,
aug_lambda_icl=config.aug.factor_icl,
temperature=config.trainer.temperature,
temp_epochs=config.trainer.temp_epochs)
trainer_class = AugInvarianceTrainer
trainer = trainer_class(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug,
**kwargs)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
# storing config and starting training
config.version = git_revision_hash()
store_config(config, path=config_path)
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
def train_ssl(config: SSLConfiguration):
# assertions before starting
assert config.name is not None or config.task.step == 0, "Specify the experiment name with ICL steps >= 1!"
assert torch.backends.cudnn.enabled, "AMP requires CUDNN backend to be enabled."
if config.in_channels != 4:
LOG.warn("Forcing input channels to 4 (previous value: %d)", config.in_channels)
config.in_channels = 4
# prepare accelerator ASAP
accelerator = Accelerator(fp16=config.trainer.amp, cpu=config.trainer.cpu)
# Create the directory tree:
# outputs
# |-- dataset
# |--task_name
# |-- exp_name
# |-- models
# |-- logs
accelerator.wait_for_everyone()
log_name = f"output-{config.task.step}.log"
exp_id, out_folder, model_folder, logs_folder = init_experiment(config=config, log_name=log_name)
config_path = out_folder / f"segmenter-config-s{config.task.step}.yaml"
store_config(config, path=config_path)
LOG.info("Run started")
LOG.info("Experiment ID: %s", exp_id)
LOG.info("Output folder: %s", out_folder)
LOG.info("Models folder: %s", model_folder)
LOG.info("Logs folder: %s", logs_folder)
LOG.info("Configuration: %s", config_path)
# seeding everything
LOG.info("Using seed: %d", config.seed)
seed_everything(config.seed)
# prepare datasets
LOG.info("Loading datasets...")
train_set, valid_set = prepare_dataset(config=config)
train_set = SSLDataset(train_set, transform=ssl_transforms())
LOG.info("Full sets - train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
add_background = not train_set.has_background()
task = Task(dataset=config.dataset,
name=config.task.name,
step=config.task.step,
add_background=add_background)
train_mask, valid_mask = 0, 255
train_set = ICLDataset(dataset=train_set, task=task, mask_value=train_mask, filter_mode=config.task.filter_mode)
valid_set = ICLDataset(dataset=valid_set, task=task, mask_value=valid_mask, filter_mode=config.task.filter_mode)
train_loader = DataLoader(dataset=train_set,
batch_size=config.trainer.batch_size,
shuffle=True,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker,
drop_last=True)
valid_loader = DataLoader(dataset=valid_set,
batch_size=config.trainer.batch_size,
shuffle=False,
num_workers=config.trainer.num_workers,
worker_init_fn=seed_worker)
LOG.info("ICL sets - Train set: %d samples, validation set: %d samples", len(train_set), len(valid_set))
# prepare models
LOG.info("Preparing model...")
new_model, ssl_model = prepare_model_ssl(config=config, task=task)
new_model = new_model.to(accelerator.device)
ssl_model = ssl_model.to(accelerator.device)
if task.step > 0:
old_task = Task(dataset=config.dataset,
name=config.task.name,
step=task.step - 1,
add_background=add_background)
old_model = prepare_model(config=config, task=old_task)
old_model = old_model.to(accelerator.device)
else:
old_model = None
new_model, old_model = init_from_previous_step(config, new_model, old_model, model_folder, task)
LOG.info("Done preparing models")
# prepare optimizer and scheduler
parameters = chain(new_model.parameters(), ssl_model.head.parameters())
optimizer = config.optimizer.instantiate(parameters)
scheduler = config.scheduler.instantiate(optimizer)
# prepare losses, including SSL
segment_loss = config.ce.instantiate(ignore_index=255, old_class_count=task.old_class_count())
distill_loss = config.kd.instantiate()
pretext_loss = config.ssl_loss()
# asserts to verify their validity
if task.step > 0 and config.ce.unbiased:
seg_loss_name = str(type(segment_loss))
kdd_loss_name = str(type(distill_loss))
assert "Unbiased" in seg_loss_name, f"Wrong loss '{seg_loss_name}' for step {task.step}"
assert "Unbiased" in kdd_loss_name, f"Wrong loss '{kdd_loss_name}' for step {task.step}"
# prepare metrics and logger
monitored = config.trainer.monitor.name
train_metrics, valid_metrics = prepare_metrics(task=task, device=accelerator.device)
ssl_metrics = prepare_metrics_ssl(num_classes=config.model.pretext_classes, device=accelerator.device)
logger = TensorBoardLogger(log_folder=logs_folder,
filename_suffix=f"step-{task.step}",
icl_step=task.step,
comment=config.comment)
# logging configuration to tensorboard
LOG.debug("Logging flattened config. to TensorBoard")
logger.log_table("config", flatten_config(config.dict()))
# prepare trainer
LOG.info("Visualize: %s, num. batches for visualization: %s", str(config.visualize), str(config.num_samples))
num_samples = int(config.visualize) * config.num_samples
trainer = SSLTrainer(accelerator=accelerator,
task=task,
new_model=new_model,
old_model=old_model,
ssl_model=ssl_model,
optimizer=optimizer,
scheduler=scheduler,
train_metrics=train_metrics,
val_metrics=valid_metrics,
old_classes=train_set.old_categories(),
new_classes=train_set.new_categories(),
seg_criterion=segment_loss,
ssl_criterion=pretext_loss,
kdd_criterion=distill_loss,
kde_criterion=None,
kdd_lambda=config.kd.decoder_factor,
kde_lambda=config.kd.encoder_factor,
logger=logger,
samples=num_samples,
debug=config.debug)
trainer.add_metrics(SSLStage.ssl, metrics=ssl_metrics)
trainer.add_callback(EarlyStopping(call_every=1, metric=monitored,
criterion=EarlyStoppingCriterion.maximum,
patience=config.trainer.patience)) \
.add_callback(Checkpoint(call_every=1,
model_folder=model_folder,
name_format=f"task{task.name}_step-{task.step}",
save_best=True)) \
.add_callback(DisplaySamples(inverse_transform=inverse_transform(),
color_palette=train_set.palette()))
trainer.fit(train_dataloader=train_loader, val_dataloader=valid_loader, max_epochs=config.trainer.max_epochs)
LOG.info(f"Training completed at epoch {trainer.current_epoch:<2d} "
f"(best {monitored}: {trainer.best_score:.4f})")
LOG.info("Experiment %s (step %d) completed!", exp_id, task.step)
| [
"torch.utils.data.DataLoader"
] | 1.9.0 | edornd/multimodal-icl | f79bfa73665db471c12ee9cb57bbee1bcabb0467 |
1.2 | import copy
import os
import torch
from . import geom
from .cell import WaveCell
from .probe import WaveIntensityProbe
from .rnn import WaveRNN
from .source import WaveSource
from .utils import set_dtype
def save_model(model,
name,
savedir='./study/',
history=None,
history_geom_state=None,
cfg=None,
verbose=True):
"""Save the model state and history to a file
"""
str_filename = name + '.pt'
if not os.path.exists(savedir):
os.makedirs(savedir)
str_savepath = savedir + str_filename
if history_geom_state is None:
history_geom_state = [model.cell.geom.state_reconstruction_args()]
data = {'model_geom_class_str': model.cell.geom.__class__.__name__,
# Class name so we know which constructor to call in load()
'model_state': model.state_dict(),
# For now just store model state without history (only geom is likely to change)
'history': history,
'history_geom_state': history_geom_state, # Full history of the geometry state,
'cfg': cfg}
if verbose:
print("Saving model to %s" % str_savepath)
torch.save(data, str_savepath)
def new_geometry(class_str, state):
WaveGeometryClass = getattr(geom, class_str)
geom_state = copy.deepcopy(state)
return WaveGeometryClass(**geom_state)
def load_model(str_filename, which_iteration=-1):
"""Load a previously saved model and its history from a file
"""
print("Loading model from %s" % str_filename)
data = torch.load(str_filename)
# Set the type for floats from the save
set_dtype(data['cfg']['dtype'])
# Reconstruct Geometry
new_geom = new_geometry(data['model_geom_class_str'], data['history_geom_state'][which_iteration])
# Get model state to recreate probes and sources
model_state = copy.deepcopy(data['model_state'])
# Parse out the probe and source coords
px = [model_state[k].item() for k in model_state if 'probes' in k and 'x' in k]
py = [model_state[k].item() for k in model_state if 'probes' in k and 'y' in k]
sx = [model_state[k].item() for k in model_state if 'sources' in k and 'x' in k]
sy = [model_state[k].item() for k in model_state if 'sources' in k and 'y' in k]
# Manually add the probes and sources
new_probes = []
for (x, y) in zip(px, py):
new_probes.append(WaveIntensityProbe(x, y))
# TODO(ian): here we should actually try to infer the type of probe (e.g. intensity or not)
new_sources = []
for (x, y) in zip(sx, sy):
new_sources.append(WaveSource(x, y))
new_cell = WaveCell(model_state['cell.dt'].item(), new_geom)
new_model = WaveRNN(new_cell, new_sources, new_probes)
# Put into eval mode (doesn't really matter for us but whatever)
new_model.eval()
return new_model, data['history'], data['history_geom_state'], data['cfg']
| [
"torch.save",
"torch.load"
] | 1.2 | Kshitiz-Bansal/wavetorch | 927ad02dc9db83f72b8df1d91418a6681e60fd56 |
1.8 | import networkx as nx
import numpy as np
import torch
from torch.utils.data import Dataset
from dsloader.util import kron_graph, random_binary, make_fractional
class KroneckerDataset (Dataset):
def __init__(self, kron_iter=4, seed_size=4, fixed_seed=None, num_graphs=1, perms_per_graph=256, progress_bar=False):
self.kron_iter = kron_iter
self.seed_size = seed_size
self.num_nodes = seed_size ** (kron_iter + 1)
self.seeds = []
self.matrices = []
num_iter = range(num_graphs)
if progress_bar:
from tqdm import tqdm
num_iter = tqdm(num_iter)
for i in num_iter:
seed = random_binary(seed_size, use_sparsity=False)
self.seeds.append(seed)
if fixed_seed is not None:
k_g = kron_graph(fixed_seed, n=kron_iter).astype(np.float)
else:
k_g = kron_graph(seed, n=kron_iter).astype(np.float)
for j in range(perms_per_graph):
self.matrices.append(make_fractional(k_g, inplace=False))
def __len__(self):
return len(self.matrices)
def __getitem__(self, idx):
return torch.tensor(self.matrices[idx])
| [
"torch.tensor"
] | 1.8.0 | willshiao/brgan | 99d1627176a59811bf9032ef1f99d6e7261095fb |
1.8 | import torch
import torch.nn as nn
class ModdedSharedSvdGenerator(nn.Module):
def __init__(self, latent_dim=100, layer_size=128, num_nodes=500, rank=30, extra_dim=False):
super(ModdedSharedSvdGenerator, self).__init__()
self.num_nodes = num_nodes
self.rank = rank
self.latent_dim = latent_dim
self.extra_dim = extra_dim
self.output_factors = False
shared_layers = [
nn.Linear(latent_dim, layer_size),
nn.Linear(layer_size, layer_size * 2),
nn.BatchNorm1d(layer_size * 2),
nn.ReLU(inplace=True),
# New block
nn.Linear(layer_size * 2, layer_size * 4),
nn.BatchNorm1d(layer_size * 4),
]
mat_output_layers = [
[
nn.Linear(layer_size * 4, num_nodes * rank)
] for _ in range(2)
]
sigma_output_layers = [
nn.Linear(layer_size * 4, rank)
]
self.shared = nn.Sequential(*shared_layers)
self.output1 = nn.Sequential(*mat_output_layers[0])
self.output2 = nn.Sequential(*mat_output_layers[1])
self.output_sigma = nn.Sequential(*sigma_output_layers)
def set_factor_output(self, new_val):
self.output_factors = new_val
return True
def sample_latent(self, num_samples):
return torch.randn((num_samples, self.latent_dim))
def forward(self, noise):
batch_sz = noise.shape[0]
S = self.shared(noise)
U = self.output1(S).view(batch_sz, self.num_nodes, self.rank)
Vt = self.output2(S).view(batch_sz, self.rank, self.num_nodes)
sig = self.output_sigma(S).view(batch_sz, self.rank)
sig_diag = torch.diag_embed(sig)
U_scaled = torch.bmm(U, sig_diag)
res = torch.bmm(U_scaled, Vt)
if self.extra_dim:
out = res.view(batch_sz, 1, self.num_nodes, self.num_nodes)
elif not self.output_factors:
out = res.view(batch_sz, self.num_nodes, self.num_nodes)
if self.output_factors:
return (out, (U, Vt))
else:
return out
def sample_latent(self, num_samples):
return torch.randn((num_samples, self.latent_dim))
| [
"torch.diag_embed",
"torch.nn.Linear",
"torch.nn.Sequential",
"torch.bmm",
"torch.nn.ReLU",
"torch.nn.BatchNorm1d",
"torch.randn"
] | 1.8.0 | willshiao/brgan | 99d1627176a59811bf9032ef1f99d6e7261095fb |
0.4 | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from math import floor, log2
from functools import partial
from linear_attention_transformer import ImageLinearAttention
###
from random import random
import numpy as np
import torch.nn.functional as F
###
from models.networks_SPADE.base_network import BaseNetwork
from models.networks_SPADE.architecture import ResnetBlock as ResnetBlock
from models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_SPADE(opt,gpu_ids):
if('spade8' in opt.netG):
net = SPADE8Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
elif('spade6' in opt.netG):
net = SPADE6Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
else:
net = SPADEGenerator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids)
net.init_weights()
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_9blocksup':
net = ResnetGeneratorUp(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768':
net = UNet768(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768_sigm':
net = UNet768Sigm(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade':
net = UNet768PIXSPADE(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade8sm':
net = UNet768PIXSPADE8SM(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == 'conditional': #conditional patchGAN
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'unet':
net = UnetDiscriminator()
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class UnetGANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(UnetGANLoss, self).__init__()
self.register_buffer('real_label_1', torch.tensor(target_real_label))
self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))
self.register_buffer('fake_label_1', torch.tensor(target_fake_label))
self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))
self.loss_1 = nn.BCEWithLogitsLoss()
self.loss_2 = nn.BCEWithLogitsLoss()
def get_target_tensor(self, prediction_1, prediction_2, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor_1 = self.real_label_1
target_tensor_2 = self.real_label_2
else:
target_tensor_1 = self.fake_label_1
target_tensor_2 = self.fake_label_2
return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)
def __call__(self, prediction_1, prediction_2, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)
loss_1 = self.loss_1(prediction_1, target_tensor_1)
loss_2 = self.loss_2(prediction_2, target_tensor_2)
loss = loss_1.mean()+loss_2.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetGeneratorUp(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGeneratorUp, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.Upsample(scale_factor = 2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
#%%% Unet from DeepMact
class ConvBnRelu2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):
super(ConvBnRelu2d, self).__init__()
if is_decoder:
self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.conv = None
else:
self.transpConv = None
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)
self.relu = torch.nn.ReLU(inplace=True)
if is_bn is False: self.bn = None
if is_relu is False: self.relu = None
def forward(self, x):
if self.conv is None:
x = self.transpConv(x)
elif self.transpConv is None:
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class StackEncoder(torch.nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):
super(StackEncoder, self).__init__()
padding = (kernel_size - 1) // 2
self.encode = torch.nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x):
y = self.encode(x)
y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)
return y, y_small
class StackDecoder(torch.nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):
super(StackDecoder, self).__init__()
padding = (kernel_size - 1) // 2
self.decode = torch.nn.Sequential(
ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x_big, x):
N, C, H, W = x_big.size()
y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)
y = torch.cat([y, x_big], 1)
y = self.decode(y)
return y
# 768
class UNet768(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
#print('2',out.shape)
out = self.up4(down4, out)
#print('3',out.shape)
out = self.up3(down3, out)
#print('4',out.shape)
out = self.up2(down2, out)
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)
return out
#%%Unet_spade_768_300
#%%sigm
class UNet768Sigm(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768Sigm, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Sigmoid()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;print('down1',down1.size()) #256
down2, out = self.down2(out) # ;print('down2',down2.size()) #128
down3, out = self.down3(out) # ;print('down3',down3.size()) #64
down4, out = self.down4(out) # ;print('down4',down4.size()) #32
down5, out = self.down5(out) # ;print('down5',down5.size()) #16
down6, out = self.down6(out) # ;print('down6',down6.size()) #8
pass # ;print('out ',out.size())
out = self.center(out)
out = self.up6(down6, out)
out = self.up5(down5, out)
out = self.up4(down4, out)
out = self.up3(down3, out)
out = self.up2(down2, out)
out = self.up1(down1, out)
# 1024
out = self.final_out(self.classify(out))
out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)
return out
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
#%% Unet as Disdef random_hflip(tensor, prob):
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous(memory_format = torch.contiguous_format)
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
def random_float(lo, hi):
return lo + (hi - lo) * random()
def random_crop_and_resize(tensor, scale):
b, c, h, _ = tensor.shape
new_width = int(h * scale)
delta = h - new_width
h_delta = int(random() * delta)
w_delta = int(random() * delta)
cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()
return F.interpolate(cropped, size=(h, h), mode='bilinear')
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, types):
super().__init__()
self.D = D
self.types = types
def forward(self, images, prob = 0., detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
if detach:
images.detach_()
return self.D(images), images
def leaky_relu(p=0.2):
return nn.LeakyReLU(p)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Flatten(nn.Module):
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x.flatten(self.index)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
def double_conv(chan_in, chan_out):
return nn.Sequential(
nn.Conv2d(chan_in, chan_out, 3, padding=1),
leaky_relu(),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
leaky_relu()
)
class DownBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = double_conv(input_channels, filters)
self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
unet_res = x
if self.down is not None:
x = self.down(x)
x = x + res
return x, unet_res
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
class UpBlock(nn.Module):
def __init__(self, input_channels, filters):
super().__init__()
self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)
self.net = double_conv(input_channels, filters)
self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)
self.input_channels = input_channels
self.filters = filters
def forward(self, x, res):
*_, h, w = x.shape
conv_res = self.conv_res(x, output_size = (h * 2, w * 2))
x = self.up(x)
x = torch.cat((x, res), dim=1)
x = self.net(x)
x = x + conv_res
return x
class UnetDiscriminator(nn.Module):
def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):
super().__init__()
num_layers = int(log2(image_size) - 3)
num_init_filters = 2# if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
filters[-1] = filters[-2]
chan_in_out = list(zip(filters[:-1], filters[1:]))
chan_in_out = list(map(list, chan_in_out))
print('Channels',chan_in_out)
down_blocks = []
attn_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DownBlock(in_chan, out_chan, downsample = is_not_last)
down_blocks.append(block)
attn_fn = attn_and_ff(out_chan)
attn_blocks.append(attn_fn)
self.down_blocks = nn.ModuleList(down_blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
last_chan = filters[-1]
self.to_logit = nn.Sequential(
leaky_relu(),
nn.AvgPool2d(image_size // (2 ** num_layers)),
Flatten(1),
nn.Linear(last_chan, 1)
)
self.conv = double_conv(last_chan, last_chan)
dec_chan_in_out = chan_in_out[:-1][::-1]
self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))
self.conv_out = nn.Conv2d(2, 1, 1)
def forward(self, x):
#print('Input shape:', x.shape)
b, *_ = x.shape
residuals = []
i=0
for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):
#print('Step', i, x.shape)
i=i+1
x, unet_res = down_block(x)
residuals.append(unet_res)
if attn_block is not None:
x = attn_block(x)
x = self.conv(x) + x
enc_out = self.to_logit(x)
for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):
#print('in up blocK', x.shape)
x = up_block(x, res)
dec_out = self.conv_out(x)
return enc_out.squeeze(), dec_out
#%% SPADE RESNET
class SPADEGenerator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADEGenerator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 64
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
#x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
x = self.up(x)
# print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
# print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% spade8
class SPADE8Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE8Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 8
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
'''this can be removed'''
x = self.up(x)
#print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
#print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
'''til here'''
return output_5,output_6,output_9,output_11,output_15
#%%
class SPADE6Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE6Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 6
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 300
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = 10#self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
print('0,', x.shape)
x = self.head_0(x, seg)
print('1,', x.shape)
x = self.up(x)
print('2', x.shape)
x = self.G_middle_0(x, seg)
print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
print('5,', x.shape)
x = self.up(x)
output_6 = x
print('6,', x.shape)
x = self.up_0(x, seg)
print('7,', x.shape)
x = self.up(x)
print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
print('9,', x.shape)
x = self.up(x)
print('10,', x.shape)
x = self.up_2(x, seg)
print('11,', x.shape)
output_11 = x
x = self.up(x)
print('12,', x.shape)
x = self.up_3(x, seg)
print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% For the PIX2SPADE
class UNet768PIXSPADE(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
#%%Unet for spade8
class UNet768PIXSPADE8SM(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE8SM, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
| [
"torch.nn.Linear",
"torch.cat",
"torch.optim.lr_scheduler.StepLR",
"torch.nn.ModuleList",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm2d",
"torch.nn.init.kaiming_normal_",
"torch.cuda.is_available",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.functional.pad",
"torch.flip",
"torch.reshape",
"torch.nn.init.constant_",
"torch.nn.AvgPool2d",
"torch.nn.ConvTranspose2d",
"torch.nn.init.normal_",
"torch.tensor",
"torch.nn.ReflectionPad2d",
"torch.nn.init.orthogonal_",
"torch.nn.init.xavier_normal_",
"torch.zeros",
"torch.nn.functional.upsample",
"torch.nn.Sequential",
"torch.nn.Tanh",
"torch.nn.ReplicationPad2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.rand",
"torch.nn.Dropout",
"torch.nn.MSELoss",
"torch.arange",
"torch.nn.Sigmoid",
"torch.nn.functional.interpolate",
"torch.nn.Upsample",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.functional.max_pool2d",
"torch.nn.functional.leaky_relu"
] | 0.4.1 | izhorvath/MetGAN | aca85fb3306d2515a65c8d525cd78e1147ba7e1b |
1.8 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from collections import Counter
from pathlib import Path
from statistics import mean
import torch
import torch.nn as nn
from opacus import PrivacyEngine
from opacus.layers import DPGRU, DPLSTM, DPRNN
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
parser = argparse.ArgumentParser(
description="PyTorch Name language classification DP Training",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data-root",
required=True,
type=str,
help="Path to training set of names (ie. ~/data/names/)",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
help="GPU ID for this process",
)
parser.add_argument(
"-b",
"--batch-size",
default=800,
type=int,
metavar="N",
help="mini-batch size",
)
parser.add_argument(
"--mode",
default="lstm",
choices=["lstm", "gru", "rnn"],
help="recursive network type",
)
parser.add_argument(
"--embedding-size", default=64, type=int, help="Character embedding dimension"
)
parser.add_argument(
"--hidden-size", default=128, type=int, help="hidden state dimensions"
)
parser.add_argument("--n-layers", default=1, type=int, help="How many layers to use")
parser.add_argument(
"--test-every",
default=0,
type=int,
help="Run evaluation on the test every these many epochs",
)
parser.add_argument(
"--bidirectional",
action="store_true",
default=False,
help="If turned on, makes the RNN bidirectional",
)
parser.add_argument(
"--learning-rate",
default=2.0,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--epochs", type=int, default=10, help="Number of training epochs")
parser.add_argument(
"--train-split",
type=float,
default=0.8,
help="Fraction of data to utilize for training (rest for evaluation)",
)
parser.add_argument(
"--sigma",
type=float,
default=1.0,
metavar="S",
help="Noise multiplier",
)
parser.add_argument(
"-c",
"--max-per-sample-grad-norm",
type=float,
default=1.5,
metavar="C",
help="Clip per-sample gradients to this norm",
)
parser.add_argument(
"--disable-dp",
action="store_true",
default=False,
help="Disable privacy training and just train with vanilla SGD",
)
parser.add_argument(
"--secure-rng",
action="store_true",
default=False,
help="Enable Secure RNG to have trustworthy privacy guarantees. Comes at a performance cost",
)
parser.add_argument(
"--delta",
type=float,
default=8e-5,
metavar="D",
help="Target delta",
)
parser.add_argument(
"--print-every",
type=int,
default=5,
help="Print the evaluation accuracy every these many iterations",
)
class CharByteEncoder(nn.Module):
"""
This encoder takes a UTF-8 string and encodes its bytes into a Tensor. It can also
perform the opposite operation to check a result.
Examples:
>>> encoder = CharByteEncoder()
>>> t = encoder('Ślusàrski') # returns tensor([256, 197, 154, 108, 117, 115, 195, 160, 114, 115, 107, 105, 257])
>>> encoder.decode(t) # returns "<s>Ślusàrski</s>"
"""
def __init__(self):
super().__init__()
self.start_token = "<s>"
self.end_token = "</s>"
self.pad_token = "<pad>"
self.start_idx = 256
self.end_idx = 257
self.pad_idx = 258
def forward(self, s: str, pad_to=0) -> torch.LongTensor:
"""
Encodes a string. It will append a start token <s> (id=self.start_idx) and an end token </s>
(id=self.end_idx).
Args:
s: The string to encode.
pad_to: If not zero, pad by appending self.pad_idx until string is of length `pad_to`.
Defaults to 0.
Returns:
The encoded LongTensor of indices.
"""
encoded = s.encode()
n_pad = pad_to - len(encoded) if pad_to > len(encoded) else 0
return torch.LongTensor(
[self.start_idx]
+ [c for c in encoded] # noqa
+ [self.end_idx]
+ [self.pad_idx for _ in range(n_pad)]
)
def decode(self, char_ids_tensor: torch.LongTensor) -> str:
"""
The inverse of `forward`. Keeps the start, end and pad indices.
"""
char_ids = char_ids_tensor.cpu().detach().tolist()
out = []
buf = []
for c in char_ids:
if c < 256:
buf.append(c)
else:
if buf:
out.append(bytes(buf).decode())
buf = []
if c == self.start_idx:
out.append(self.start_token)
elif c == self.end_idx:
out.append(self.end_token)
elif c == self.pad_idx:
out.append(self.pad_token)
if buf: # in case some are left
out.append(bytes(buf).decode())
return "".join(out)
def __len__(self):
"""
The length of our encoder space. This is fixed to 256 (one byte) + 3 special chars
(start, end, pad).
Returns:
259
"""
return 259
class NamesDataset(Dataset):
def __init__(self, root):
self.root = Path(root)
self.labels = list({langfile.stem for langfile in self.root.iterdir()})
self.labels_dict = {label: i for i, label in enumerate(self.labels)}
self.encoder = CharByteEncoder()
self.samples = self.construct_samples()
def __getitem__(self, i):
return self.samples[i]
def __len__(self):
return len(self.samples)
def construct_samples(self):
samples = []
for langfile in self.root.iterdir():
label_name = langfile.stem
label_id = self.labels_dict[label_name]
with open(langfile, "r") as fin:
for row in fin:
samples.append(
(self.encoder(row.strip()), torch.tensor(label_id).long())
)
return samples
def label_count(self):
cnt = Counter()
for _x, y in self.samples:
label = self.labels[int(y)]
cnt[label] += 1
return cnt
VOCAB_SIZE = 256 + 3 # 256 alternatives in one byte, plus 3 special characters.
class CharNNClassifier(nn.Module):
def __init__(
self,
rnn_type,
embedding_size,
hidden_size,
output_size,
num_layers=1,
bidirectional=False,
vocab_size=VOCAB_SIZE,
):
super().__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.output_size = output_size
self.vocab_size = vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.rnn = rnn_type(
embedding_size,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
)
self.out_layer = nn.Linear(hidden_size, output_size)
def forward(self, x, hidden=None):
x = self.embedding(x) # -> [B, T, D]
x, _ = self.rnn(x, hidden) # -> [B, T, H]
x = x[:, -1, :] # -> [B, H]
x = self.out_layer(x) # -> [B, C]
return x
def padded_collate(batch, padding_idx=0):
x = pad_sequence(
[elem[0] for elem in batch], batch_first=True, padding_value=padding_idx
)
y = torch.stack([elem[1] for elem in batch]).long()
return x, y
def train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
target_delta,
device="cuda:0",
):
model.train()
accs = []
losses = []
for x, y in tqdm(train_loader):
x = x.to(device)
y = y.to(device)
logits = model(x)
loss = criterion(logits, y)
loss.backward()
optimizer.step()
optimizer.zero_grad()
preds = logits.argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
losses.append(float(loss))
printstr = (
f"\t Epoch {epoch}. Accuracy: {mean(accs):.6f} | Loss: {mean(losses):.6f}"
)
try:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" | (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
except AttributeError:
pass
print(printstr)
return
def test(model, test_loader, privacy_engine, target_delta, device="cuda:0"):
model.eval()
accs = []
with torch.no_grad():
for x, y in tqdm(test_loader):
x = x.to(device)
y = y.to(device)
preds = model(x).argmax(-1)
n_correct = float(preds.eq(y).sum())
batch_accuracy = n_correct / len(y)
accs.append(batch_accuracy)
mean_acc = mean(accs)
printstr = "\n----------------------------\n" f"Test Accuracy: {mean_acc:.6f}"
if privacy_engine:
epsilon, best_alpha = privacy_engine.accountant.get_privacy_spent(
delta=target_delta
)
printstr += f" (ε = {epsilon:.2f}, δ = {target_delta}) for α = {best_alpha}"
print(printstr + "\n----------------------------\n")
return mean_acc
def main():
args = parser.parse_args()
device = torch.device(args.device)
ds = NamesDataset(args.data_root)
train_len = int(args.train_split * len(ds))
test_len = len(ds) - train_len
print(f"{train_len} samples for training, {test_len} for testing")
if args.secure_rng:
try:
import torchcsprng as prng
except ImportError as e:
msg = (
"To use secure RNG, you must install the torchcsprng package! "
"Check out the instructions here: https://github.com/pytorch/csprng#installation"
)
raise ImportError(msg) from e
generator = prng.create_random_device_generator("/dev/urandom")
else:
generator = None
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
if args.mode == "rnn":
rnn_type = DPRNN
elif args.mode == "gru":
rnn_type = DPGRU
elif args.mode == "lstm":
rnn_type = DPLSTM
else:
raise ValueError(f"Invalid network type: {args.mode}")
model = CharNNClassifier(
rnn_type,
args.embedding_size,
args.hidden_size,
len(ds.labels),
args.n_layers,
args.bidirectional,
)
model = model.to(device)
train_ds, test_ds = torch.utils.data.random_split(
ds, [train_len, test_len], generator=generator
)
train_loader = DataLoader(
train_ds,
batch_size=args.batch_size,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
test_loader = DataLoader(
test_ds,
batch_size=2 * args.batch_size,
shuffle=False,
num_workers=1,
pin_memory=True,
collate_fn=padded_collate,
)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate)
if not args.disable_dp:
privacy_engine = PrivacyEngine(secure_mode=args.secure_rng)
model, optimizer, train_loader = privacy_engine.make_private(
module=model,
optimizer=optimizer,
data_loader=train_loader,
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
else:
privacy_engine = None
print(f"Train stats ({args.mode}): \n")
for epoch in tqdm(range(args.epochs)):
train(
model,
criterion,
optimizer,
train_loader,
epoch,
privacy_engine,
args.delta,
device=device,
)
if args.test_every:
if epoch % args.test_every == 0:
test(model, test_loader, privacy_engine, args.delta, device=device)
mean_acc = test(model, test_loader, privacy_engine, args.delta, device=device)
torch.save(mean_acc, f"run_results_chr_{args.mode}_classification.pt")
if __name__ == "__main__":
main()
| [
"torch.nn.Embedding",
"torch.device",
"torch.nn.Linear",
"torch.stack",
"torch.nn.utils.rnn.pad_sequence",
"torch.utils.data.random_split",
"torch.save",
"torch.no_grad",
"torch.tensor",
"torch.utils.data.DataLoader",
"torch.nn.CrossEntropyLoss"
] | 1.8 | iamgroot42/opacus | 51708309e71c030aa2bf15d6dccc7bcbbe9ed570 |
1.0 | import unittest
from transformers import AutoTokenizer, is_torch_available
from transformers.testing_utils import require_torch, slow
if is_torch_available():
import torch
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForNextSentencePrediction,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSOP,
GlueDataset,
GlueDataTrainingArguments,
LineByLineTextDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
default_data_collator,
)
PATH_SAMPLE_TEXT = "./tests/fixtures/sample_text.txt"
PATH_SAMPLE_TEXT_DIR = "./tests/fixtures/tests_samples/wiki_text"
@require_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def test_default_with_dict(self):
features = [{"label": i, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": [0, 1, 2], "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor([[0, 1, 2]] * 8)))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# Features can already be tensors
features = [{"label": i, "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
# Labels can already be tensors
features = [{"label": torch.tensor(i), "inputs": torch.randint(10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertTrue(batch["labels"].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch["labels"].dtype, torch.long)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 10]))
def test_default_with_no_labels(self):
features = [{"label": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
# With label_ids
features = [{"label_ids": None, "inputs": [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue("labels" not in batch)
self.assertEqual(batch["inputs"].shape, torch.Size([8, 6]))
@slow
def test_default_classification(self):
MODEL_ID = "bert-base-cased-finetuned-mrpc"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="mrpc", data_dir="./tests/fixtures/tests_samples/MRPC", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.long)
@slow
def test_default_regression(self):
MODEL_ID = "distilroberta-base"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
data_args = GlueDataTrainingArguments(
task_name="sts-b", data_dir="./tests/fixtures/tests_samples/STS-B", overwrite_cache=True
)
dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev")
data_collator = default_data_collator
batch = data_collator(dataset.features)
self.assertEqual(batch["labels"].dtype, torch.float)
@slow
def test_lm_tokenizer_without_padding(self):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# ^ causal lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
with self.assertRaises(ValueError):
# Expect error due to padding token missing on gpt2:
data_collator(examples)
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_lm_tokenizer_with_padding(self):
tokenizer = AutoTokenizer.from_pretrained("distilroberta-base")
data_collator = DataCollatorForLanguageModeling(tokenizer)
# ^ masked lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 107)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 107)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
@slow
def test_plm(self):
tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased")
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
# ^ permutation lm
dataset = LineByLineTextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((31, 112)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((31, 112, 112)))
self.assertEqual(batch["labels"].shape, torch.Size((31, 112)))
dataset = TextDataset(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512, overwrite_cache=True)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
self.assertEqual(batch["input_ids"].shape, torch.Size((2, 512)))
self.assertEqual(batch["perm_mask"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["target_mapping"].shape, torch.Size((2, 512, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((2, 512)))
example = [torch.randint(5, [5])]
with self.assertRaises(ValueError):
# Expect error due to odd sequence length
data_collator(example)
@slow
def test_nsp(self):
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
data_collator = DataCollatorForNextSentencePrediction(tokenizer)
dataset = TextDatasetForNextSentencePrediction(tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["masked_lm_labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["next_sentence_label"].shape, torch.Size((total_samples,)))
@slow
def test_sop(self):
tokenizer = AutoTokenizer.from_pretrained("albert-base-v2")
data_collator = DataCollatorForSOP(tokenizer)
dataset = LineByLineWithSOPTextDataset(tokenizer, file_dir=PATH_SAMPLE_TEXT_DIR, block_size=512)
examples = [dataset[i] for i in range(len(dataset))]
batch = data_collator(examples)
self.assertIsInstance(batch, dict)
# Since there are randomly generated false samples, the total number of samples is not fixed.
total_samples = batch["input_ids"].shape[0]
self.assertEqual(batch["input_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["token_type_ids"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["labels"].shape, torch.Size((total_samples, 512)))
self.assertEqual(batch["sentence_order_label"].shape, torch.Size((total_samples,)))
| [
"torch.Size",
"torch.randint",
"torch.tensor"
] | 1.0 | WERimagin/transformers | cc7d14511c647f8147494df72f8b0575015e37ab |
1.4 | from typing import List
import torch
from torch.nn import ParameterList, Parameter
from allennlp.common.checks import ConfigurationError
class ScalarMix(torch.nn.Module):
"""
Computes a parameterised scalar mixture of N tensors, ``mixture = gamma * sum(s_k * tensor_k)``
where ``s = softmax(w)``, with ``w`` and ``gamma`` scalar parameters.
In addition, if ``do_layer_norm=True`` then apply layer normalization to each tensor
before weighting.
"""
def __init__(self,
mixture_size: int,
do_layer_norm: bool = False,
initial_scalar_parameters: List[float] = None,
trainable: bool = True) -> None:
super(ScalarMix, self).__init__()
self.mixture_size = mixture_size
self.do_layer_norm = do_layer_norm
if initial_scalar_parameters is None:
initial_scalar_parameters = [0.0] * mixture_size
elif len(initial_scalar_parameters) != mixture_size:
raise ConfigurationError("Length of initial_scalar_parameters {} differs "
"from mixture_size {}".format(
initial_scalar_parameters, mixture_size))
self.scalar_parameters = ParameterList(
[Parameter(torch.FloatTensor([initial_scalar_parameters[i]]),
requires_grad=trainable) for i
in range(mixture_size)])
self.gamma = Parameter(torch.FloatTensor([1.0]), requires_grad=trainable)
def forward(self, tensors: List[torch.Tensor], # pylint: disable=arguments-differ
mask: torch.Tensor = None) -> torch.Tensor:
"""
Compute a weighted average of the ``tensors``. The input tensors an be any shape
with at least two dimensions, but must all be the same shape.
When ``do_layer_norm=True``, the ``mask`` is required input. If the ``tensors`` are
dimensioned ``(dim_0, ..., dim_{n-1}, dim_n)``, then the ``mask`` is dimensioned
``(dim_0, ..., dim_{n-1})``, as in the typical case with ``tensors`` of shape
``(batch_size, timesteps, dim)`` and ``mask`` of shape ``(batch_size, timesteps)``.
When ``do_layer_norm=False`` the ``mask`` is ignored.
"""
if len(tensors) != self.mixture_size:
raise ConfigurationError("{} tensors were passed, but the module was initialized to "
"mix {} tensors.".format(len(tensors), self.mixture_size))
def _do_layer_norm(tensor, broadcast_mask, num_elements_not_masked):
tensor_masked = tensor * broadcast_mask
mean = torch.sum(tensor_masked) / num_elements_not_masked
variance = torch.sum(((tensor_masked - mean) * broadcast_mask)**2) / num_elements_not_masked
return (tensor - mean) / torch.sqrt(variance + 1E-12)
normed_weights = torch.nn.functional.softmax(torch.cat([parameter for parameter
in self.scalar_parameters]), dim=0)
normed_weights = torch.split(normed_weights, split_size_or_sections=1)
if not self.do_layer_norm:
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * tensor)
return self.gamma * sum(pieces)
else:
mask_float = mask.float()
broadcast_mask = mask_float.unsqueeze(-1)
input_dim = tensors[0].size(-1)
num_elements_not_masked = torch.sum(mask_float) * input_dim
pieces = []
for weight, tensor in zip(normed_weights, tensors):
pieces.append(weight * _do_layer_norm(tensor,
broadcast_mask, num_elements_not_masked))
return self.gamma * sum(pieces)
| [
"torch.cat",
"torch.sqrt",
"torch.split",
"torch.FloatTensor",
"torch.sum"
] | 1.4.0 | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f |
1.4 | # pylint: disable=no-self-use,invalid-name
from numpy.testing import assert_almost_equal
import torch
from allennlp.modules import Highway
from allennlp.common.testing import AllenNlpTestCase
class TestHighway(AllenNlpTestCase):
def test_forward_works_on_simple_input(self):
highway = Highway(2, 2)
# pylint: disable=protected-access
highway._layers[0].weight.data.fill_(1)
highway._layers[0].bias.data.fill_(0)
highway._layers[1].weight.data.fill_(2)
highway._layers[1].bias.data.fill_(-2)
input_tensor = torch.FloatTensor([[-2, 1], [3, -2]])
result = highway(input_tensor).data.numpy()
assert result.shape == (2, 2)
# This was checked by hand.
assert_almost_equal(result, [[-0.0394, 0.0197], [1.7527, -0.5550]], decimal=4)
def test_forward_works_on_nd_input(self):
highway = Highway(2, 2)
input_tensor = torch.ones(2, 2, 2)
output = highway(input_tensor)
assert output.size() == (2, 2, 2)
| [
"torch.FloatTensor",
"torch.ones"
] | 1.4.0 | annaproxy/udify-metalearning | 55206a3aac0aba74a3615a36192d03b6467cfd6f |
1.8 | from typing import Optional, Tuple
import torch
def marginal_pdf(
values: torch.Tensor, bins: torch.Tensor, sigma: torch.Tensor, epsilon: float = 1e-10
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Calculate the marginal probability distribution function of the input tensor based on the number of
histogram bins.
Args:
values: shape [BxNx1].
bins: shape [NUM_BINS].
sigma: shape [1], gaussian smoothing factor.
epsilon: scalar, for numerical stability.
Returns:
Tuple[torch.Tensor, torch.Tensor]:
- torch.Tensor: shape [BxN].
- torch.Tensor: shape [BxNxNUM_BINS].
"""
if not isinstance(values, torch.Tensor):
raise TypeError(f"Input values type is not a torch.Tensor. Got {type(values)}")
if not isinstance(bins, torch.Tensor):
raise TypeError(f"Input bins type is not a torch.Tensor. Got {type(bins)}")
if not isinstance(sigma, torch.Tensor):
raise TypeError(f"Input sigma type is not a torch.Tensor. Got {type(sigma)}")
if not values.dim() == 3:
raise ValueError("Input values must be a of the shape BxNx1." " Got {}".format(values.shape))
if not bins.dim() == 1:
raise ValueError("Input bins must be a of the shape NUM_BINS" " Got {}".format(bins.shape))
if not sigma.dim() == 0:
raise ValueError("Input sigma must be a of the shape 1" " Got {}".format(sigma.shape))
residuals = values - bins.unsqueeze(0).unsqueeze(0)
kernel_values = torch.exp(-0.5 * (residuals / sigma).pow(2))
pdf = torch.mean(kernel_values, dim=1)
normalization = torch.sum(pdf, dim=1).unsqueeze(1) + epsilon
pdf = pdf / normalization
return pdf, kernel_values
def joint_pdf(kernel_values1: torch.Tensor, kernel_values2: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Calculate the joint probability distribution function of the input tensors based on the number of histogram
bins.
Args:
kernel_values1: shape [BxNxNUM_BINS].
kernel_values2: shape [BxNxNUM_BINS].
epsilon: scalar, for numerical stability.
Returns:
shape [BxNUM_BINSxNUM_BINS].
"""
if not isinstance(kernel_values1, torch.Tensor):
raise TypeError(f"Input kernel_values1 type is not a torch.Tensor. Got {type(kernel_values1)}")
if not isinstance(kernel_values2, torch.Tensor):
raise TypeError(f"Input kernel_values2 type is not a torch.Tensor. Got {type(kernel_values2)}")
if not kernel_values1.dim() == 3:
raise ValueError("Input kernel_values1 must be a of the shape BxN." " Got {}".format(kernel_values1.shape))
if not kernel_values2.dim() == 3:
raise ValueError("Input kernel_values2 must be a of the shape BxN." " Got {}".format(kernel_values2.shape))
if kernel_values1.shape != kernel_values2.shape:
raise ValueError(
"Inputs kernel_values1 and kernel_values2 must have the same shape."
" Got {} and {}".format(kernel_values1.shape, kernel_values2.shape)
)
joint_kernel_values = torch.matmul(kernel_values1.transpose(1, 2), kernel_values2)
normalization = torch.sum(joint_kernel_values, dim=(1, 2)).view(-1, 1, 1) + epsilon
pdf = joint_kernel_values / normalization
return pdf
def histogram(x: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10) -> torch.Tensor:
"""Estimate the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x: Input tensor to compute the histogram with shape :math:`(B, D)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability.
Returns:
Computed histogram of shape :math:`(B, N_{bins})`.
Examples:
>>> x = torch.rand(1, 10)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([1, 128])
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf
def histogram2d(
x1: torch.Tensor, x2: torch.Tensor, bins: torch.Tensor, bandwidth: torch.Tensor, epsilon: float = 1e-10
) -> torch.Tensor:
"""Estimate the 2d histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
Args:
x1: Input tensor to compute the histogram with shape :math:`(B, D1)`.
x2: Input tensor to compute the histogram with shape :math:`(B, D2)`.
bins: The number of bins to use the histogram :math:`(N_{bins})`.
bandwidth: Gaussian smoothing factor with shape shape [1].
epsilon: A scalar, for numerical stability. Default: 1e-10.
Returns:
Computed histogram of shape :math:`(B, N_{bins}), N_{bins})`.
Examples:
>>> x1 = torch.rand(2, 32)
>>> x2 = torch.rand(2, 32)
>>> bins = torch.torch.linspace(0, 255, 128)
>>> hist = histogram2d(x1, x2, bins, bandwidth=torch.tensor(0.9))
>>> hist.shape
torch.Size([2, 128, 128])
"""
_, kernel_values1 = marginal_pdf(x1.unsqueeze(2), bins, bandwidth, epsilon)
_, kernel_values2 = marginal_pdf(x2.unsqueeze(2), bins, bandwidth, epsilon)
pdf = joint_pdf(kernel_values1, kernel_values2)
return pdf
def image_histogram2d(
image: torch.Tensor,
min: float = 0.0,
max: float = 255.0,
n_bins: int = 256,
bandwidth: Optional[float] = None,
centers: Optional[torch.Tensor] = None,
return_pdf: bool = False,
kernel: str = "triangular",
eps: float = 1e-10,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Estimate the histogram of the input image(s).
The calculation uses triangular kernel density estimation.
Args:
image: Input tensor to compute the histogram with shape
:math:`(H, W)`, :math:`(C, H, W)` or :math:`(B, C, H, W)`.
min: Lower end of the interval (inclusive).
max: Upper end of the interval (inclusive). Ignored when
:attr:`centers` is specified.
n_bins: The number of histogram bins. Ignored when
:attr:`centers` is specified.
bandwidth: Smoothing factor. If not specified or equal to -1,
:math:`(bandwidth = (max - min) / n_bins)`.
centers: Centers of the bins with shape :math:`(n_bins,)`.
If not specified or empty, it is calculated as centers of
equal width bins of [min, max] range.
return_pdf: If True, also return probability densities for
each bin.
kernel: kernel to perform kernel density estimation
``(`triangular`, `gaussian`, `uniform`, `epanechnikov`)``.
Returns:
Computed histogram of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`.
Computed probability densities of shape :math:`(bins)`, :math:`(C, bins)`,
:math:`(B, C, bins)`, if return_pdf is ``True``. Tensor of zeros with shape
of the histogram otherwise.
"""
if image is not None and not isinstance(image, torch.Tensor):
raise TypeError(f"Input image type is not a torch.Tensor. Got {type(image)}.")
if centers is not None and not isinstance(centers, torch.Tensor):
raise TypeError(f"Bins' centers type is not a torch.Tensor. Got {type(centers)}.")
if centers is not None and len(centers.shape) > 0 and centers.dim() != 1:
raise ValueError(f"Bins' centers must be a torch.Tensor of the shape (n_bins,). Got {centers.shape}.")
if not isinstance(min, float):
raise TypeError(f'Type of lower end of the range is not a float. Got {type(min)}.')
if not isinstance(max, float):
raise TypeError(f"Type of upper end of the range is not a float. Got {type(min)}.")
if not isinstance(n_bins, int):
raise TypeError(f"Type of number of bins is not an int. Got {type(n_bins)}.")
if bandwidth is not None and not isinstance(bandwidth, float):
raise TypeError(f"Bandwidth type is not a float. Got {type(bandwidth)}.")
if not isinstance(return_pdf, bool):
raise TypeError(f"Return_pdf type is not a bool. Got {type(return_pdf)}.")
if bandwidth is None:
bandwidth = (max - min) / n_bins
if centers is None:
centers = min + bandwidth * (torch.arange(n_bins, device=image.device, dtype=image.dtype) + 0.5)
centers = centers.reshape(-1, 1, 1, 1, 1)
u = torch.abs(image.unsqueeze(0) - centers) / bandwidth
if kernel == "gaussian":
kernel_values = torch.exp(-0.5 * u ** 2)
elif kernel in ("triangular", "uniform", "epanechnikov",):
# compute the mask and cast to floating point
mask = (u <= 1).to(u.dtype)
if kernel == "triangular":
kernel_values = (1. - u) * mask
elif kernel == "uniform":
kernel_values = torch.ones_like(u) * mask
else: # kernel == "epanechnikov"
kernel_values = (1. - u ** 2) * mask
else:
raise ValueError(f"Kernel must be 'triangular', 'gaussian', " f"'uniform' or 'epanechnikov'. Got {kernel}.")
hist = torch.sum(kernel_values, dim=(-2, -1)).permute(1, 2, 0)
if return_pdf:
normalization = torch.sum(hist, dim=-1, keepdim=True) + eps
pdf = hist / normalization
if image.dim() == 2:
hist = hist.squeeze()
pdf = pdf.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
pdf = pdf.squeeze(0)
return hist, pdf
if image.dim() == 2:
hist = hist.squeeze()
elif image.dim() == 3:
hist = hist.squeeze(0)
return hist, torch.zeros_like(hist)
| [
"torch.arange",
"torch.ones_like",
"torch.zeros_like",
"torch.exp",
"torch.mean",
"torch.sum"
] | 1.8.1 | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 |
1.8 | # based on: https://github.com/ShiqiYu/libfacedetection.train/blob/74f3aa77c63234dd954d21286e9a60703b8d0868/tasks/task1/yufacedetectnet.py # noqa
import math
from enum import Enum
from typing import Callable, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from kornia.geometry.bbox import nms as nms_kornia
__all__ = [
"FaceDetector",
"FaceDetectorResult",
"FaceKeypoint",
]
url: str = "https://github.com/ShiqiYu/libfacedetection.train/raw/master/tasks/task1/weights/yunet_final.pth"
class FaceKeypoint(Enum):
r"""Define the keypoints detected in a face.
The left/right convention is based on the screen viewer.
"""
EYE_LEFT = 0
EYE_RIGHT = 1
NOSE = 2
MOUTH_LEFT = 3
MOUTH_RIGHT = 4
class FaceDetectorResult:
r"""Encapsulate the results obtained by the :py:class:`kornia.contrib.FaceDetector`.
Args:
data: the encoded results coming from the feature detector with shape :math:`(14,)`.
"""
def __init__(self, data: torch.Tensor) -> None:
if len(data) < 15:
raise ValueError(f"Result must comes as vector of size(14). Got: {data.shape}.")
self._data = data
def to(self, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> "FaceDetectorResult":
"""Like :func:`torch.nn.Module.to()` method."""
self._data = self._data.to(device=device, dtype=dtype)
return self
@property
def xmin(self) -> torch.Tensor:
"""The bounding box top-left x-coordinate."""
return self._data[..., 0]
@property
def ymin(self) -> torch.Tensor:
"""The bounding box top-left y-coordinate."""
return self._data[..., 1]
@property
def xmax(self) -> torch.Tensor:
"""The bounding box bottom-right x-coordinate."""
return self._data[..., 2]
@property
def ymax(self) -> torch.Tensor:
"""The bounding box bottom-right y-coordinate."""
return self._data[..., 3]
def get_keypoint(self, keypoint: FaceKeypoint) -> torch.Tensor:
"""The [x y] position of a given facial keypoint.
Args:
keypoint: the keypoint type to return the position.
"""
if keypoint == FaceKeypoint.EYE_LEFT:
out = self._data[..., (4, 5)]
elif keypoint == FaceKeypoint.EYE_RIGHT:
out = self._data[..., (6, 7)]
elif keypoint == FaceKeypoint.NOSE:
out = self._data[..., (8, 9)]
elif keypoint == FaceKeypoint.MOUTH_LEFT:
out = self._data[..., (10, 11)]
elif keypoint == FaceKeypoint.MOUTH_RIGHT:
out = self._data[..., (12, 13)]
else:
raise ValueError(f"Not valid keypoint type. Got: {keypoint}.")
return out
@property
def score(self) -> torch.Tensor:
"""The detection score."""
return self._data[..., 14]
@property
def width(self) -> torch.Tensor:
"""The bounding box width."""
return self.xmax - self.xmin
@property
def height(self) -> torch.Tensor:
"""The bounding box height."""
return self.ymax - self.ymin
@property
def top_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
return self._data[..., (0, 1)]
@property
def top_right(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 0] += self.width
return out
@property
def bottom_right(self) -> torch.Tensor:
"""The [x y] position of the bottom-right coordinate of the bounding box."""
return self._data[..., (2, 3)]
@property
def bottom_left(self) -> torch.Tensor:
"""The [x y] position of the top-left coordinate of the bounding box."""
out = self.top_left
out[..., 1] += self.height
return out
class FaceDetector(nn.Module):
r"""Detect faces in a given image using a CNN.
By default, it uses the method described in :cite:`facedetect-yu`.
Args:
top_k: the maximum number of detections to return before the nms.
confidence_threshold: the threshold used to discard detections.
nms_threshold: the threshold used by the nms for iou.
keep_top_k: the maximum number of detections to return after the nms.
Return:
A tensor of shape :math:`(N,15)` to be used with :py:class:`kornia.contrib.FaceDetectorResult`.
Example:
>>> img = torch.rand(1, 3, 320, 320)
>>> detect = FaceDetector()
>>> res = detect(img)
"""
def __init__(self,
top_k: int = 5000,
confidence_threshold: float = 0.3,
nms_threshold: float = 0.3,
keep_top_k: int = 750) -> None:
super().__init__()
self.top_k = top_k
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.config = {
'name': 'YuFaceDetectNet',
'min_sizes': [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]],
'steps': [8, 16, 32, 64],
'variance': [0.1, 0.2],
'clip': False,
}
self.min_sizes: List[List[int]] = [[10, 16, 24], [32, 48], [64, 96], [128, 192, 256]]
self.steps: List[int] = [8, 16, 32, 64]
self.variance: List[float] = [0.1, 0.2]
self.clip: bool = False
self.model = YuFaceDetectNet('test', pretrained=True)
self.nms: Callable = nms_kornia
def preprocess(self, image: torch.Tensor) -> torch.Tensor:
return image
def postprocess(self, data: Dict[str, torch.Tensor], height: int, width: int) -> torch.Tensor:
loc, conf, iou = data['loc'], data['conf'], data['iou']
scale = torch.tensor([
width, height, width, height,
width, height, width, height,
width, height, width, height,
width, height,
], device=loc.device, dtype=loc.dtype) # 14
priors = _PriorBox(self.min_sizes, self.steps, self.clip, image_size=(height, width))
priors = priors.to(loc.device, loc.dtype)
boxes = _decode(loc, priors(), self.variance) # Nx14
boxes = boxes * scale
# clamp here for the compatibility for ONNX
cls_scores, iou_scores = conf[:, 1], iou[:, 0]
scores = (cls_scores * iou_scores.clamp(0., 1.)).sqrt()
# ignore low scores
inds = (scores > self.confidence_threshold)
boxes, scores = boxes[inds], scores[inds]
# keep top-K before NMS
order = scores.sort(descending=True)[1][:self.top_k]
boxes, scores = boxes[order], scores[order]
# performd NMS
# NOTE: nms need to be revise since does not export well to onnx
dets = torch.cat((boxes, scores[:, None]), dim=-1) # Nx15
keep = self.nms(boxes[:, :4], scores, self.nms_threshold)
if len(keep) > 0:
dets = dets[keep, :]
# keep top-K faster NMS
return dets[:self.keep_top_k]
def forward(self, image: torch.Tensor) -> torch.Tensor:
img = self.preprocess(image)
out = self.model(img)
return self.postprocess(out, img.shape[-2], img.shape[-1])
# utils for the network
class ConvDPUnit(nn.Sequential):
def __init__(self, in_channels, out_channels, withBNRelu=True):
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, out_channels, 1, 1, 0, bias=True, groups=1))
self.add_module("conv2", nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=True, groups=out_channels))
if withBNRelu:
self.add_module("bn", nn.BatchNorm2d(out_channels))
self.add_module("relu", nn.ReLU(inplace=True))
class Conv_head(nn.Sequential):
def __init__(self, in_channels: int, mid_channels: int, out_channels: int) -> None:
super().__init__()
self.add_module("conv1", nn.Conv2d(in_channels, mid_channels, 3, 2, 1, bias=True, groups=1))
self.add_module("bn1", nn.BatchNorm2d(mid_channels))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module("conv2", ConvDPUnit(mid_channels, out_channels))
class Conv4layerBlock(nn.Sequential):
def __init__(self, in_channels: int, out_channels: int, withBNRelu: bool = True) -> None:
super().__init__()
self.add_module("conv1", ConvDPUnit(in_channels, in_channels, True))
self.add_module("conv2", ConvDPUnit(in_channels, out_channels, withBNRelu))
class YuFaceDetectNet(nn.Module):
def __init__(self, phase, pretrained: bool):
super().__init__()
self.phase = phase
self.num_classes = 2
self.model0 = Conv_head(3, 16, 16)
self.model1 = Conv4layerBlock(16, 64)
self.model2 = Conv4layerBlock(64, 64)
self.model3 = Conv4layerBlock(64, 64)
self.model4 = Conv4layerBlock(64, 64)
self.model5 = Conv4layerBlock(64, 64)
self.model6 = Conv4layerBlock(64, 64)
self.head = nn.Sequential(
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 2 * (14 + 2 + 1), False),
Conv4layerBlock(64, 3 * (14 + 2 + 1), False),
)
if self.phase == 'train':
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
nn.init.xavier_normal_(m.weight.data)
m.bias.data.fill_(0.02)
else:
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# use torch.hub to load pretrained model
if pretrained:
pretrained_dict = torch.hub.load_state_dict_from_url(
url, map_location=lambda storage, loc: storage
)
self.load_state_dict(pretrained_dict, strict=True)
self.eval()
def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
detection_sources, head_list = [], []
x = self.model0(x)
x = F.max_pool2d(x, 2)
x = self.model1(x)
x = self.model2(x)
x = F.max_pool2d(x, 2)
x = self.model3(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model4(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model5(x)
detection_sources.append(x)
x = F.max_pool2d(x, 2)
x = self.model6(x)
detection_sources.append(x)
for i, h in enumerate(self.head):
x_tmp = h(detection_sources[i])
head_list.append(x_tmp.permute(0, 2, 3, 1).contiguous())
head_data = torch.cat([o.view(o.size(0), -1) for o in head_list], 1)
head_data = head_data.view(head_data.size(0), -1, 17)
loc_data, conf_data, iou_data = head_data.split((14, 2, 1), dim=-1)
if self.phase == "test":
loc_data = loc_data.view(-1, 14)
conf_data = torch.softmax(conf_data.view(-1, self.num_classes), dim=-1)
iou_data = iou_data.view(-1, 1)
else:
loc_data = loc_data.view(loc_data.size(0), -1, 14)
conf_data = conf_data.view(conf_data.size(0), -1, self.num_classes)
iou_data = iou_data.view(iou_data.size(0), -1, 1)
return {"loc": loc_data, "conf": conf_data, "iou": iou_data}
# utils for post-processing
# Adapted from https://github.com/Hakuyume/chainer-ssd
def _decode(loc: torch.Tensor, priors: torch.Tensor, variances: List[float]) -> torch.Tensor:
"""Decode locations from predictions using priors to undo the encoding we did for offset regression at train
time.
Args:
loc:location predictions for loc layers. Shape: [num_priors,4].
priors: Prior boxes in center-offset form. Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes.
Return:
Tensor containing decoded bounding box predictions.
"""
boxes = torch.cat((
priors[:, 0:2] + loc[:, 0:2] * variances[0] * priors[:, 2:4],
priors[:, 2:4] * torch.exp(loc[:, 2:4] * variances[1]),
priors[:, 0:2] + loc[:, 4:6] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 6:8] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 8:10] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 10:12] * variances[0] * priors[:, 2:4],
priors[:, 0:2] + loc[:, 12:14] * variances[0] * priors[:, 2:4]), 1)
# prepare final output
tmp = boxes[:, 0:2] - boxes[:, 2:4] / 2
return torch.cat((tmp, boxes[:, 2:4] + tmp, boxes[:, 4:]), dim=-1)
class _PriorBox:
def __init__(self, min_sizes: List[List[int]], steps: List[int], clip: bool, image_size: Tuple[int, int]) -> None:
self.min_sizes = min_sizes
self.steps = steps
self.clip = clip
self.image_size = image_size
self.device: torch.device = torch.device('cpu')
self.dtype: torch.dtype = torch.float32
for i in range(4):
if(self.steps[i] != math.pow(2, (i + 3))):
raise ValueError("steps must be [8,16,32,64]")
self.feature_map_2th = [int(int((self.image_size[0] + 1) / 2) / 2),
int(int((self.image_size[1] + 1) / 2) / 2)]
self.feature_map_3th = [int(self.feature_map_2th[0] / 2),
int(self.feature_map_2th[1] / 2)]
self.feature_map_4th = [int(self.feature_map_3th[0] / 2),
int(self.feature_map_3th[1] / 2)]
self.feature_map_5th = [int(self.feature_map_4th[0] / 2),
int(self.feature_map_4th[1] / 2)]
self.feature_map_6th = [int(self.feature_map_5th[0] / 2),
int(self.feature_map_5th[1] / 2)]
self.feature_maps = [self.feature_map_3th, self.feature_map_4th,
self.feature_map_5th, self.feature_map_6th]
def to(self, device: torch.device, dtype: torch.dtype) -> '_PriorBox':
self.device = device
self.dtype = dtype
return self
def __call__(self) -> torch.Tensor:
anchors: List[float] = []
for k, f in enumerate(self.feature_maps):
min_sizes: List[int] = self.min_sizes[k]
# NOTE: the nested loop it's to make torchscript happy
for i in range(f[0]):
for j in range(f[1]):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
cx = (j + 0.5) * self.steps[k] / self.image_size[1]
cy = (i + 0.5) * self.steps[k] / self.image_size[0]
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.tensor(anchors, device=self.device, dtype=self.dtype).view(-1, 4)
if self.clip:
output = output.clamp(max=1, min=0)
return output
| [
"torch.device",
"torch.cat",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.tensor",
"torch.hub.load_state_dict_from_url",
"torch.nn.init.xavier_normal_",
"torch.nn.functional.max_pool2d",
"torch.exp"
] | 1.8.1 | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 |
1.8 | import os
import cv2
import imageio
import torch
import kornia as K
import kornia.geometry as KG
def load_timg(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor."""
assert os.path.isfile(file_name), f"Invalid file {file_name}" # nosec
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = K.image_to_tensor(img, None).float() / 255.
return K.color.bgr_to_rgb(tensor)
registrator = KG.ImageRegistrator('similarity')
img1 = K.resize(load_timg('/Users/oldufo/datasets/stewart/MR-CT/CT.png'), (400, 600))
img2 = K.resize(load_timg('/Users/oldufo/datasets/stewart/MR-CT/MR.png'), (400, 600))
model, intermediate = registrator.register(img1, img2, output_intermediate_models=True)
video_writer = imageio.get_writer('medical_registration.gif', fps=2)
timg_dst_first = img1.clone()
timg_dst_first[0, 0, :, :] = img2[0, 0, :, :]
video_writer.append_data(K.tensor_to_image((timg_dst_first * 255.).byte()))
with torch.no_grad():
for m in intermediate:
timg_dst = KG.homography_warp(img1, m, img2.shape[-2:])
timg_dst[0, 0, :, :] = img2[0, 0, :, :]
video_writer.append_data(K.tensor_to_image((timg_dst_first * 255.).byte()))
video_writer.close()
| [
"torch.no_grad"
] | 1.8.1 | Ishticode/kornia | 974abb43ec72d12dbd244a2fb247bbbab8498de0 |
1.6 | '''
Code taken from https://github.com/WilhelmT/ClassMix
Slightly modified
'''
import kornia
import torch
import random
import torch.nn as nn
def normalize_rgb(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (122.6789143, 116.66876762, 104.00698793) # rgb
elif dataset == 'cityscapes':
mean = (73.15835921, 82.90891754, 72.39239876) # rgb
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def normalize_bgr(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (104.00698793, 116.66876762, 122.6789143) # bgr
elif dataset == 'cityscapes':
mean = (72.39239876, 82.90891754, 73.15835921) # bgr
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def grayscale(grayscale, data = None, target = None, probs = None):
"""
Args:
grayscale: boolean whether to apply grayscale augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data is converted from rgb to grayscale if [grayscale] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if grayscale and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )
data = seq(data)
return data, target, probs
def colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None):
"""
Args:
colorJitter: boolean whether to apply colorJitter augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
s: brightness and contrast strength of the color jitter
Returns:
colorJitter is applied to data if [colorJitter] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if colorJitter and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.))
data = seq(data/255.)*255. # assumes [0,1]
return data, target, probs
def gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None):
"""
Args:
blur: boolean whether to apply blur
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
min_sigma: minimum sigma value for the gaussian blur
max_sigma: maximum sigma value for the gaussian blur
Returns:
gaussian blur is applied to data if [blur] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if blur and data.shape[1]==3:
seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma)))
data = seq(data)
return data, target, probs
def flip(flip, data = None, target = None, probs = None):
"""
Args:
flip: boolean whether to apply flip augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs are flipped if the boolean flip is True
"""
if flip:
if not (data is None): data = torch.flip(data,(3,))
if not (target is None):
target = torch.flip(target,(2,))
if not (probs is None):
probs = torch.flip(probs,(2,))
return data, target, probs
def solarize(solarize, data = None, target = None, probs = None):
"""
Args:
solarize: boolean whether to apply solarize augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target, probs, where
data is solarized if [solarize] is True
"""
if not (data is None):
if solarize and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1)))
data = seq(data.cpu()/255.).cuda()*255.
return data, target, probs
def mix(mask, data = None, target = None, probs = None):
"""
Applies classMix augmentation:
https://openaccess.thecvf.com/content/WACV2021/papers/Olsson_ClassMix_Segmentation-Based_Data_Augmentation_for_Semi-Supervised_Learning_WACV_2021_paper.pdf
Args:
mask: masks for applying ClassMix. A list of B elements of CxWxH tensors
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs augmented with classMix
"""
if not (data is None):
if mask.shape[0] == data.shape[0]:
data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])])
if not (target is None):
target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])])
if not (probs is None):
probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])])
return data, target, probs
def random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None):
"""
Args:
scale: scale ratio. Float
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
ignore_label: integeer value that defines the ignore class in the datasets for the labels
Returns:
data, target and prob, after applied a scaling operation. output resolution is preserve as the same as the input resolution WxH
"""
if scale != 1:
init_size_w = data.shape[2]
init_size_h = data.shape[3]
# scale data, labels and probs
data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True)
if target is not None:
target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1)
if probs is not None:
probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1)
final_size_w = data.shape[2]
final_size_h = data.shape[3]
diff_h = init_size_h - final_size_h
diff_w = init_size_w - final_size_w
if scale < 1: # add padding if needed
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0)
data = pad(data)
if probs is not None:
probs = pad(probs)
# padding with ignore label to add to labels
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label)
if target is not None:
target = pad(target)
else: # crop if needed
w = random.randint(0, data.shape[2] - init_size_w)
h = random.randint(0, data.shape[3] - init_size_h)
data = data [:,:,h:h+init_size_h,w:w + init_size_w]
if probs is not None:
probs = probs [:,h:h+init_size_h,w:w + init_size_w]
if target is not None:
target = target [:,h:h+init_size_h,w:w + init_size_w]
return data, target, probs
| [
"torch.nn.ConstantPad2d",
"torch.nn.functional.interpolate",
"torch.Tensor",
"torch.flip"
] | 1.6.0 | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c |
1.9 | from collections import OrderedDict
import torch
from torch import Tensor
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
from ptnetworks.ActivationTracker import ActivationTracker
from typing import Type, Any, Callable, Union, List, Optional
class ResNetCIFAR(nn.Module):
def __init__(self,
variant='resnet050',
n_classes=100,
pretrained=False,
freeze_features_until='', #exclusive
no_gradient_required=False,
enforce_batchnorm_requires_gradient=False,
n_layers_to_be_removed_from_blocks=[],
no_classifier=False,
activation='relu',
init_mode='kaiming_normal',
statedict='',
strict_loading=True):
super().__init__()
arg_dict = {
'pretrained' : pretrained,
'num_classes' : n_classes,
'init_mode' : init_mode,
'activation' : activation,
}
if variant == 'resnet018':
self.embedded_model = resnet18(**arg_dict)
elif variant == 'resnet034':
self.embedded_model = resnet34(**arg_dict)
elif variant == 'resnet050':
self.embedded_model = resnet50(**arg_dict)
elif variant == 'resnet101':
self.embedded_model = resnet101(**arg_dict)
elif variant == 'resnet152':
self.embedded_model = resnet152(**arg_dict)
elif variant == 'resnext050_32x4d':
self.embedded_model = resnext50_32x4d(**arg_dict)
elif variant == 'resnext101_32x8d':
self.embedded_model = resnext101_32x8d(**arg_dict)
elif variant == 'wide_resnet050_2':
self.embedded_model = wide_resnet50_2(**arg_dict)
elif variant == 'wide_resnet101_2':
self.embedded_model = wide_resnet101_2(**arg_dict)
else:
print('select valid model variant')
if no_classifier:
self.embedded_model.classifier = nn.Identity()
module_dict = OrderedDict([
('classifier', self.embedded_model.classifier),
('layer4', self.embedded_model.layer4),
('layer3', self.embedded_model.layer3),
('layer2', self.embedded_model.layer2),
('layer1', self.embedded_model.layer1),
])
if freeze_features_until:
for param in self.embedded_model.parameters():
param.requires_grad = False
if freeze_features_until not in module_dict:
raise ValueError("freeue_features_until does not match any network module")
for key, module in module_dict.items():
for param in module.parameters():
param.requires_grad = True
if freeze_features_until == key:
break
if n_layers_to_be_removed_from_blocks:
modules = [
self.embedded_model.layer1,
self.embedded_model.layer2,
self.embedded_model.layer3,
self.embedded_model.layer4,
]
for n_layers, layer in zip(n_layers_to_be_removed_from_blocks, modules):
for i in range(n_layers):
layer[-i-1] = nn.Identity()
if statedict:
pretrained_dict = torch.load(statedict, map_location=torch.device('cpu'))
missing = self.load_state_dict(pretrained_dict, strict=strict_loading)
print('Loading weights from statedict. Missing and unexpected keys:')
print(missing)
if enforce_batchnorm_requires_gradient:
for m in self.embedded_model.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
for param in m.parameters():
param.requires_grad = True
if no_gradient_required:
for param in self.embedded_model.parameters():
param.requires_grad = False
def forward(self, batch):
if isinstance(batch, dict) and 'data' in batch:
logits = self.embedded_model(batch['data'])
out = {'logits' : logits}
return out
else:
return self.embedded_model(batch)
def forward_features(self, batch, module=None):
track_modules = ActivationTracker()
assert isinstance(batch, dict) and 'data' in batch
logits, activation_dict = track_modules.collect_stats(self.embedded_model, batch['data'], module)
out = {'logits' : logits, 'activations' : activation_dict}
return out
def save(self, statedict_name):
torch.save(self.state_dict(), statedict_name)
MODEL_DIR = '/nfshome/linse/NO_INB_BACKUP/ModelZoo'
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu_1 = activation_layer(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.relu_2 = activation_layer(inplace=False)
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_2(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
activation_layer=nn.ReLU
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu_1 = activation_layer(inplace=False)
self.relu_2 = activation_layer(inplace=False)
self.relu_3 = activation_layer(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu_1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu_2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu_3(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
init_mode='kaiming_normal',
activation='relu',
) -> None:
super().__init__()
self.ID = 'ResNet'
if activation == 'relu':
activation_layer = nn.ReLU
elif activation == 'leaky_relu':
activation_layer = nn.LeakyReLU
self._activation_layer = activation_layer
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
#for CIFAR we choose a kernel size of 3 in the first convolutional layer
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=2, padding=3,
bias=False)
self.conv1.ID = self.ID + '_first_layer'
self.bn1 = norm_layer(self.inplanes)
self.relu = self._activation_layer(inplace=False)
#we do not apply maxpooling after the first layer for CIFAR
self.maxpool = nn.Identity() #nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512 * block.expansion, num_classes)
self.reinitialize(init_mode, activation, zero_init_residual)
def reinitialize(self, init_mode, activation, zero_init_residual):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if init_mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, mode='fan_out', nonlinearity=activation)
elif init_mode == 'sparse':
nn.init.sparse_(m.weight, sparsity=0.1, std=0.01)
elif init_mode == 'orthogonal':
nn.init.orthogonal_(m.weight, gain=1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
activation_layer = self._activation_layer
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer, activation_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer, activation_layer=activation_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress, model_dir=MODEL_DIR)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs) | [
"torch.nn.Linear",
"torch.device",
"torch.nn.Identity",
"torch.nn.init.kaiming_uniform_",
"torch.flatten",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.nn.init.orthogonal_",
"torch.nn.init.kaiming_normal_",
"torch.utils.model_zoo.load_url",
"torch.nn.init.sparse_",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
] | 1.9.1 | Criscraft/pytorch_classification | d5772963e55ce218ae4719fb7f85604263aab65f |
1.7 | # ===============================================================================
# Author: Xianyuan Liu, [email protected]
# Raivo Koot, [email protected]
# Haiping Lu, [email protected] or [email protected]
# ===============================================================================
"""Data downloading and compressed data extraction functions, Based on
https://github.com/pytorch/vision/blob/master/torchvision/datasets/utils.py
https://github.com/pytorch/pytorch/blob/master/torch/hub.py
"""
import logging
import os
from pathlib import Path
from torch.hub import download_url_to_file
from torchvision.datasets.utils import download_and_extract_archive, download_file_from_google_drive, extract_archive
def download_file_by_url(url, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by url.
Args:
url (string): URL of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example: (Grab the raw link from GitHub. Notice that using "raw" in the URL.)
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data/ADL/annotations/labels_train_test/adl_P_04_train.pkl"
>>> download_file_by_url(url, "data", "a.pkl", "pkl")
>>> url = "https://github.com/pykale/data/raw/main/videos/video_test_data.zip"
>>> download_file_by_url(url, "data", "video_test_data.zip", "zip")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
if not os.path.exists(output_directory):
os.makedirs(output_directory)
if file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Downloading and extracting {}.".format(output_file_name))
download_and_extract_archive(url=url, download_root=output_directory, filename=output_file_name)
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Downloading {}.".format(output_file_name))
download_url_to_file(url, file)
logging.info("Datasets downloaded in {}".format(file))
def download_file_gdrive(id, output_directory, output_file_name, file_format=None):
"""Download file/compressed file by Google Drive id.
Args:
id (string): Google Drive file id of the object to download
output_directory (string, optional): Full path where object will be saved
Abosolute path recommended. Relative path also works.
output_file_name (string, optional): File name which object will be saved as
file_format (string, optional): File format
For compressed file, support ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]
Example:
>>> gdrive_id = "1U4D23R8u8MJX9KVKb92bZZX-tbpKWtga"
>>> download_file_gdrive(gdrive_id, "data", "demo_datasets.zip", "zip")
>>> gdrive_id = "1SV7fmAnWj-6AU9X5BGOrvGMoh2Gu9Nih"
>>> download_file_gdrive(gdrive_id, "data", "dummy_data.csv", "csv")
"""
output_directory = Path(output_directory).absolute()
file = Path(output_directory).joinpath(output_file_name)
if os.path.exists(file):
logging.info("Skipping Download and Extraction")
return
os.makedirs(output_directory, exist_ok=True)
logging.info("Downloading {}.".format(output_file_name))
download_file_from_google_drive(id, output_directory, output_file_name)
if file_format is not None and file_format in ["tar.xz", "tar", "tar.gz", "tgz", "gz", "zip"]:
logging.info("Extracting {}.".format(output_file_name))
extract_archive(file.as_posix())
logging.info("Datasets downloaded and extracted in {}".format(file))
else:
logging.info("Datasets downloaded in {}".format(file))
| [
"torch.hub.download_url_to_file"
] | 1.7.0 | SheffieldAI/pykale | 1f5cce57a50f7772520a482e8135a391eb0517f5 |
1.7 | import pytest
import torch
from kale.predict.losses import multitask_topk_accuracy, topk_accuracy
# Dummy data: [batch_size, num_classes]
# Dummy ground truth: batch_size
FIRST_PREDS = torch.tensor(
(
[0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
)
)
FIRST_LABELS = torch.tensor((0, 2, 4, 5, 5))
SECOND_PREDS = torch.tensor(
(
[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
[0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1],
)
)
SECOND_LABELS = torch.tensor((0, 0, 4, 4, 5))
MULTI_PREDS = (FIRST_PREDS, SECOND_PREDS)
MULTI_LABELS = (FIRST_LABELS, SECOND_LABELS)
def test_topk_accuracy():
# Test topk_accuracy with single-task input
preds = FIRST_PREDS
labels = FIRST_LABELS
k = (1, 3, 5)
top1, top3, top5 = topk_accuracy(preds, labels, k)
top1_value = top1.double().mean()
top3_value = top3.double().mean()
top5_value = top5.double().mean()
assert top1_value.cpu() == pytest.approx(1 / 5)
assert top3_value.cpu() == pytest.approx(2 / 5)
assert top5_value.cpu() == pytest.approx(3 / 5)
def test_multitask_topk_accuracy():
# Test multitask_topk_accuracy with input for two tasks
preds = MULTI_PREDS
labels = MULTI_LABELS
k = (1, 3, 5)
top1, top3, top5 = multitask_topk_accuracy(preds, labels, k)
top1_value = top1.double().mean()
top3_value = top3.double().mean()
top5_value = top5.double().mean()
assert top1_value.cpu() == pytest.approx(1 / 5)
assert top3_value.cpu() == pytest.approx(2 / 5)
assert top5_value.cpu() == pytest.approx(3 / 5)
| [
"torch.tensor"
] | 1.7.0 | SheffieldAI/pykale | 1f5cce57a50f7772520a482e8135a391eb0517f5 |
1.7 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import torch
import flash
from flash.core.data.io.input import DataKeys, Input, ServeInput
from flash.core.data.utilities.paths import filter_valid_files, has_file_allowed_extension, PATH_TYPE
from flash.core.data.utilities.samples import to_samples
from flash.core.data.utils import image_default_loader
from flash.core.utilities.imports import _TORCHVISION_AVAILABLE, Image, requires
if _TORCHVISION_AVAILABLE:
from torchvision.datasets.folder import IMG_EXTENSIONS
from torchvision.transforms.functional import to_pil_image
else:
IMG_EXTENSIONS = (".jpg", ".jpeg", ".png", ".ppm", ".bmp", ".pgm", ".tif", ".tiff", ".webp")
NP_EXTENSIONS = (".npy",)
def image_loader(filepath: str):
if has_file_allowed_extension(filepath, IMG_EXTENSIONS):
img = image_default_loader(filepath)
elif has_file_allowed_extension(filepath, NP_EXTENSIONS):
img = Image.fromarray(np.load(filepath).astype("uint8"), "RGB")
else:
raise ValueError(
f"File: {filepath} has an unsupported extension. Supported extensions: "
f"{list(IMG_EXTENSIONS + NP_EXTENSIONS)}."
)
return img
class ImageDeserializer(ServeInput):
@requires("image")
def serve_load_sample(self, data: str) -> Dict:
encoded_with_padding = (data + "===").encode("ascii")
img = base64.b64decode(encoded_with_padding)
buffer = BytesIO(img)
img = Image.open(buffer, mode="r")
return {
DataKeys.INPUT: img,
}
@property
def example_input(self) -> str:
with (Path(flash.ASSETS_ROOT) / "fish.jpg").open("rb") as f:
return base64.b64encode(f.read()).decode("UTF-8")
class ImageInput(Input):
@requires("image")
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
w, h = sample[DataKeys.INPUT].size # W x H
if DataKeys.METADATA not in sample:
sample[DataKeys.METADATA] = {}
sample[DataKeys.METADATA]["size"] = (h, w)
return sample
class ImageFilesInput(ImageInput):
def load_data(self, files: List[PATH_TYPE]) -> List[Dict[str, Any]]:
files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS + NP_EXTENSIONS)
return to_samples(files)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
filepath = sample[DataKeys.INPUT]
sample[DataKeys.INPUT] = image_loader(filepath)
sample = super().load_sample(sample)
sample[DataKeys.METADATA]["filepath"] = filepath
return sample
class ImageTensorInput(ImageInput):
def load_data(self, tensor: Any) -> List[Dict[str, Any]]:
return to_samples(tensor)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(sample[DataKeys.INPUT])
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
class ImageNumpyInput(ImageInput):
def load_data(self, array: Any) -> List[Dict[str, Any]]:
return to_samples(array)
def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:
img = to_pil_image(torch.from_numpy(sample[DataKeys.INPUT]))
sample[DataKeys.INPUT] = img
return super().load_sample(sample)
| [
"torch.from_numpy"
] | 1.7.1 | dudeperf3ct/lightning-flash | a855cd14cf1cd0301b4a2f82c0c95e4d8d986650 |
1.0 | """ Class for the Sequence to sequence model for ATIS."""
import os
import torch
import torch.nn.functional as F
from . import torch_utils
from . import utils_bert
from data_util.vocabulary import DEL_TOK, UNK_TOK
from .encoder import Encoder, Encoder_Gnn
from .embedder import Embedder
from .token_predictor import construct_token_predictor
import numpy as np
from data_util.atis_vocab import ATISVocabulary
from .gated_graph_conv import GatedGraphConv
def get_token_indices(token, index_to_token):
""" Maps from a gold token (string) to a list of indices.
Inputs:
token (string): String to look up.
index_to_token (list of tokens): Ordered list of tokens.
Returns:
list of int, representing the indices of the token in the probability
distribution.
"""
if token in index_to_token:
if len(set(index_to_token)) == len(index_to_token): # no duplicates
return [index_to_token.index(token)]
else:
indices = []
for index, other_token in enumerate(index_to_token):
if token == other_token:
indices.append(index)
assert len(indices) == len(set(indices))
return indices
else:
return [index_to_token.index(UNK_TOK)]
def flatten_utterances(utterances):
""" Gets a flat sequence from a sequence of utterances.
Inputs:
utterances (list of list of str): Utterances to concatenate.
Returns:
list of str, representing the flattened sequence with separating
delimiter tokens.
"""
sequence = []
for i, utterance in enumerate(utterances):
sequence.extend(utterance)
if i < len(utterances) - 1:
sequence.append(DEL_TOK)
return sequence
def encode_snippets_with_states(snippets, states):
""" Encodes snippets by using previous query states instead.
Inputs:
snippets (list of Snippet): Input snippets.
states (list of dy.Expression): Previous hidden states to use.
TODO: should this by dy.Expression or vector values?
"""
for snippet in snippets:
snippet.set_embedding(torch.cat([states[snippet.startpos],states[snippet.endpos]], dim=0))
return snippets
def load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params):
# print(output_vocabulary.inorder_tokens)
# print()
def read_glove_embedding(embedding_filename, embedding_size):
glove_embeddings = {}
with open(embedding_filename) as f:
cnt = 1
for line in f:
cnt += 1
if params.debug or not params.train:
if cnt == 1000:
print('Read 1000 word embeddings')
break
l_split = line.split()
word = " ".join(l_split[0:len(l_split) - embedding_size])
embedding = np.array([float(val) for val in l_split[-embedding_size:]])
glove_embeddings[word] = embedding
return glove_embeddings
print('Loading Glove Embedding from', params.embedding_filename)
glove_embedding_size = 300
glove_embeddings = read_glove_embedding(params.embedding_filename, glove_embedding_size)
print('Done')
input_embedding_size = glove_embedding_size
def create_word_embeddings(vocab):
vocabulary_embeddings = np.zeros((len(vocab), glove_embedding_size), dtype=np.float32)
vocabulary_tokens = vocab.inorder_tokens
glove_oov = 0
para_oov = 0
for token in vocabulary_tokens:
token_id = vocab.token_to_id(token)
if token in glove_embeddings:
vocabulary_embeddings[token_id][:glove_embedding_size] = glove_embeddings[token]
else:
glove_oov += 1
print('Glove OOV:', glove_oov, 'Para OOV', para_oov, 'Total', len(vocab))
return vocabulary_embeddings
input_vocabulary_embeddings = create_word_embeddings(input_vocabulary)
output_vocabulary_embeddings = create_word_embeddings(output_vocabulary)
output_vocabulary_schema_embeddings = None
if output_vocabulary_schema:
output_vocabulary_schema_embeddings = create_word_embeddings(output_vocabulary_schema)
return input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size
class ATISModel(torch.nn.Module):
""" Sequence-to-sequence model for predicting a SQL query given an utterance
and an interaction prefix.
"""
def __init__(
self,
params,
input_vocabulary,
output_vocabulary,
output_vocabulary_schema,
anonymizer):
super().__init__()
self.params = params
if params.use_bert:
self.model_bert, self.tokenizer, self.bert_config = utils_bert.get_bert(params)
self.gnn=None
if 'atis' not in params.data_directory:
if params.use_bert:
if params.use_gnn:
encoder_input_size = self.bert_config.hidden_size
encoder_output_size = params.encoder_state_size
self.gnn = GatedGraphConv(encoder_output_size, 2, 3) #input_dim, num_timesteps, num_edge_types,
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
else:
input_vocabulary_embeddings, output_vocabulary_embeddings, output_vocabulary_schema_embeddings, input_embedding_size = load_word_embeddings(input_vocabulary, output_vocabulary, output_vocabulary_schema, params)
params.input_embedding_size = input_embedding_size
self.params.input_embedding_size = input_embedding_size
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
initializer=input_vocabulary_embeddings,
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=params.freeze)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
initializer=output_vocabulary_embeddings,
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = Embedder(params.input_embedding_size,
name="schema-embedding",
initializer=output_vocabulary_schema_embeddings,
vocabulary=output_vocabulary_schema,
anonymizer=anonymizer,
freeze=params.freeze)
else:
# Create the input embeddings
self.input_embedder = Embedder(params.input_embedding_size,
name="input-embedding",
vocabulary=input_vocabulary,
anonymizer=anonymizer,
freeze=False)
# Create the output embeddings
self.output_embedder = Embedder(params.output_embedding_size,
name="output-embedding",
vocabulary=output_vocabulary,
anonymizer=anonymizer,
freeze=False)
self.column_name_token_embedder = None
# Create the encoder
encoder_input_size = params.input_embedding_size
encoder_output_size = params.encoder_state_size
if params.use_bert:
encoder_input_size = self.bert_config.hidden_size
if params.discourse_level_lstm:
encoder_input_size += params.encoder_state_size / 2
self.utterance_encoder = Encoder(params.encoder_num_layers, encoder_input_size, encoder_output_size)
# Positional embedder for utterances
attention_key_size = params.encoder_state_size
self.schema_attention_key_size = attention_key_size
if params.state_positional_embeddings:
attention_key_size += params.positional_embedding_size
self.positional_embedder = Embedder(
params.positional_embedding_size,
name="positional-embedding",
num_tokens=params.maximum_utterances)
self.utterance_attention_key_size = attention_key_size
# Create the discourse-level LSTM parameters
if params.discourse_level_lstm:
self.discourse_lstms = torch_utils.create_multilayer_lstm_params(1, params.encoder_state_size, params.encoder_state_size / 2, "LSTM-t")
self.initial_discourse_state = torch_utils.add_params(tuple([params.encoder_state_size / 2]), "V-turn-state-0")
# Snippet encoder
final_snippet_size = 0
if params.use_snippets and not params.previous_decoder_snippet_encoding:
snippet_encoding_size = int(params.encoder_state_size / 2)
final_snippet_size = params.encoder_state_size
if params.snippet_age_embedding:
snippet_encoding_size -= int(
params.snippet_age_embedding_size / 4)
self.snippet_age_embedder = Embedder(
params.snippet_age_embedding_size,
name="snippet-age-embedding",
num_tokens=params.max_snippet_age_embedding)
final_snippet_size = params.encoder_state_size + params.snippet_age_embedding_size / 2
self.snippet_encoder = Encoder(params.snippet_num_layers,
params.output_embedding_size,
snippet_encoding_size)
# Previous query Encoder
if params.use_previous_query:
self.query_encoder = Encoder(params.encoder_num_layers, params.output_embedding_size, params.encoder_state_size)
self.final_snippet_size = final_snippet_size
self.dropout = 0.
def _encode_snippets(self, previous_query, snippets, input_schema):
""" Computes a single vector representation for each snippet.
Inputs:
previous_query (list of str): Previous query in the interaction.
snippets (list of Snippet): Snippets extracted from the previous
Returns:
list of Snippets, where the embedding is set to a vector.
"""
startpoints = [snippet.startpos for snippet in snippets]
endpoints = [snippet.endpos for snippet in snippets]
assert len(startpoints) == 0 or min(startpoints) >= 0
if input_schema:
assert len(endpoints) == 0 or max(endpoints) <= len(previous_query)
else:
assert len(endpoints) == 0 or max(endpoints) < len(previous_query)
snippet_embedder = lambda query_token: self.get_query_token_embedding(query_token, input_schema)
if previous_query and snippets:
_, previous_outputs = self.snippet_encoder(
previous_query, snippet_embedder, dropout_amount=self.dropout)
assert len(previous_outputs) == len(previous_query)
for snippet in snippets:
if input_schema:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos-1]], dim=0)
else:
embedding = torch.cat([previous_outputs[snippet.startpos],previous_outputs[snippet.endpos]], dim=0)
if self.params.snippet_age_embedding:
embedding = torch.cat([embedding, self.snippet_age_embedder(min(snippet.age, self.params.max_snippet_age_embedding - 1))], dim=0)
snippet.set_embedding(embedding)
return snippets
def _initialize_discourse_states(self):
discourse_state = self.initial_discourse_state
discourse_lstm_states = []
for lstm in self.discourse_lstms:
hidden_size = lstm.weight_hh.size()[1]
if lstm.weight_hh.is_cuda:
h_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
c_0 = torch.cuda.FloatTensor(1,hidden_size).fill_(0)
else:
h_0 = torch.zeros(1,hidden_size)
c_0 = torch.zeros(1,hidden_size)
discourse_lstm_states.append((h_0, c_0))
return discourse_state, discourse_lstm_states
def _add_positional_embeddings(self, hidden_states, utterances, group=False):
grouped_states = []
start_index = 0
for utterance in utterances:
grouped_states.append(hidden_states[start_index:start_index + len(utterance)])
start_index += len(utterance)
assert len(hidden_states) == sum([len(seq) for seq in grouped_states]) == sum([len(utterance) for utterance in utterances])
new_states = []
flat_sequence = []
num_utterances_to_keep = min(self.params.maximum_utterances, len(utterances))
for i, (states, utterance) in enumerate(zip(
grouped_states[-num_utterances_to_keep:], utterances[-num_utterances_to_keep:])):
positional_sequence = []
index = num_utterances_to_keep - i - 1
for state in states:
positional_sequence.append(torch.cat([state, self.positional_embedder(index)], dim=0))
assert len(positional_sequence) == len(utterance), \
"Expected utterance and state sequence length to be the same, " \
+ "but they were " + str(len(utterance)) \
+ " and " + str(len(positional_sequence))
if group:
new_states.append(positional_sequence)
else:
new_states.extend(positional_sequence)
flat_sequence.extend(utterance)
return new_states, flat_sequence
def build_optim(self):
params_trainer = []
params_bert_trainer = []
for name, param in self.named_parameters():
if param.requires_grad:
if 'model_bert' in name:
params_bert_trainer.append(param)
else:
params_trainer.append(param)
self.trainer = torch.optim.Adam(params_trainer, lr=self.params.initial_learning_rate)
if self.params.fine_tune_bert:
self.bert_trainer = torch.optim.Adam(params_bert_trainer, lr=self.params.lr_bert)
def set_dropout(self, value):
""" Sets the dropout to a specified value.
Inputs:
value (float): Value to set dropout to.
"""
self.dropout = value
def set_learning_rate(self, value):
""" Sets the learning rate for the trainer.
Inputs:
value (float): The new learning rate.
"""
for param_group in self.trainer.param_groups:
param_group['lr'] = value
def save(self, filename):
""" Saves the model to the specified filename.
Inputs:
filename (str): The filename to save to.
"""
torch.save(self.state_dict(), filename)
def load(self, filename):
""" Loads saved parameters into the parameter collection.
Inputs:
filename (str): Name of file containing parameters.
"""
self.load_state_dict(torch.load(filename))
print("Loaded model from file " + filename)
| [
"torch.zeros",
"torch.cat",
"torch.optim.Adam",
"torch.load",
"torch.cuda.FloatTensor"
] | 1.0.1 | sahara2001/editsql | d4325ac996d1ed0069def6d349e43e2a1914e761 |
1.0 | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import json
import logging
import os
import numpy as np
import torch
from espnet.asr.asr_utils import add_results_to_json, add_single_results
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0]
if args.prefix_decode:
best, ids, score = model.prefix_recognize(feat, args, train_args, train_args.char_list, rnnlm)
new_js[name] = add_single_results(js[name], best, ids, score)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def viterbi_decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(
args.model, os.path.join(os.path.dirname(args.model), 'model.json'))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
if args.model is not None:
load_params = dict(torch.load(args.model))
if 'model' in load_params:
load_params = dict(load_params['model'])
if 'state_dict' in load_params:
load_params = dict(load_params['state_dict'])
model_params = dict(model.named_parameters())
for k, v in load_params.items():
k = k.replace('module.', '')
if k in model_params and v.size() == model_params[k].size():
model_params[k].data = v.data
logging.warning('load parameters {}'.format(k))
model.recog_args = args
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
y = np.fromiter(map(int, batch[0][1]['output'][0]['tokenid'].split()), dtype=np.int64)
align = model.viterbi_decode(feat[0][0], y)
assert len(align) == len(y)
new_js[name] = js[name]
new_js[name]['output'][0]['align'] = ' '.join([str(i) for i in list(align)])
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
| [
"torch.is_tensor",
"torch.no_grad",
"torch.load"
] | 1.0.1 | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from itertools import product
import torch
from fvcore.common.benchmark import benchmark
from pytorch3d.ops.interp_face_attrs import (
interpolate_face_attributes,
interpolate_face_attributes_python,
)
def _generate_data(N, S, K, F, D, device, requires_grad=False):
pix_to_face = torch.randint(-10, F, (N, S, S, K), device=device)
barycentric_coords = torch.randn(
N, S, S, K, 3, device=device, requires_grad=requires_grad
)
face_attrs = torch.randn(F, 3, D, device=device, requires_grad=requires_grad)
grad_pix_attrs = torch.randn(N, S, S, K, D, device=device)
return pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs
def _bm_forward(N, S, F, K, D, impl):
# The runtime depends on the values of pix_to_face. So for proper
# benchmarking we should probably take the average of multiple
# values of pix to face. But this doesn't easily fit into fvcore
# benchmarking, so instead we'll just set a manual seed to make sure
# that different impls will use the same data.
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=False)
args = data[:3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
return lambda: fun(*args)
def _bm_forward_backward(N, S, F, K, D, impl):
torch.manual_seed(0)
device = torch.device("cuda")
data = _generate_data(N, S, K, F, D, device, requires_grad=True)
args, grad = data[:3], data[3]
torch.cuda.synchronize()
if impl == "cuda":
fun = interpolate_face_attributes
elif impl == "python":
fun = interpolate_face_attributes_python
def run():
out = fun(*args)
out.backward(gradient=grad)
return run
def bm_interpolate_face_attribues() -> None:
# For now only benchmark on GPU
if not torch.cuda.is_available():
return
Ns = [1, 4]
Ss = [128]
Ks = [1, 10, 40]
Fs = [5000]
Ds = [1, 3, 16]
impls = ["python", "cuda"]
test_cases = product(Ns, Ss, Ks, Fs, Ds, impls)
kwargs_list = []
for case in test_cases:
N, S, K, F, D, impl = case
kwargs_list.append({"N": N, "S": S, "K": K, "F": F, "D": D, "impl": impl})
benchmark(_bm_forward, "FORWARD", kwargs_list, warmup_iters=3)
benchmark(_bm_forward_backward, "FORWARD+BACKWARD", kwargs_list, warmup_iters=3)
if __name__ == "__main__":
bm_interpolate_face_attribues()
| [
"torch.device",
"torch.cuda.synchronize",
"torch.manual_seed",
"torch.randint",
"torch.cuda.is_available",
"torch.randn"
] | 3 | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 |
3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import collections
import os
import pickle
import warnings
import hydra
import numpy as np
import torch
from nerf.dataset import get_nerf_datasets, trivial_collate
from nerf.nerf_renderer import RadianceFieldRenderer, visualize_nerf_outputs
from nerf.stats import Stats
from omegaconf import DictConfig
from visdom import Visdom
CONFIG_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
@hydra.main(config_path=CONFIG_DIR, config_name="lego")
def main(cfg: DictConfig):
# Set the relevant seeds for reproducibility.
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
# Device on which to run.
if torch.cuda.is_available():
device = "cuda"
else:
warnings.warn(
"Please note that although executing on CPU is supported,"
+ "the training is unlikely to finish in resonable time."
)
device = "cpu"
# Initialize the Radiance Field model.
model = RadianceFieldRenderer(
image_size=cfg.data.image_size,
n_pts_per_ray=cfg.raysampler.n_pts_per_ray,
n_pts_per_ray_fine=cfg.raysampler.n_pts_per_ray,
n_rays_per_image=cfg.raysampler.n_rays_per_image,
min_depth=cfg.raysampler.min_depth,
max_depth=cfg.raysampler.max_depth,
stratified=cfg.raysampler.stratified,
stratified_test=cfg.raysampler.stratified_test,
chunk_size_test=cfg.raysampler.chunk_size_test,
n_harmonic_functions_xyz=cfg.implicit_function.n_harmonic_functions_xyz,
n_harmonic_functions_dir=cfg.implicit_function.n_harmonic_functions_dir,
n_hidden_neurons_xyz=cfg.implicit_function.n_hidden_neurons_xyz,
n_hidden_neurons_dir=cfg.implicit_function.n_hidden_neurons_dir,
n_layers_xyz=cfg.implicit_function.n_layers_xyz,
density_noise_std=cfg.implicit_function.density_noise_std,
)
# Move the model to the relevant device.
model.to(device)
# Init stats to None before loading.
stats = None
optimizer_state_dict = None
start_epoch = 0
checkpoint_path = os.path.join(hydra.utils.get_original_cwd(), cfg.checkpoint_path)
if len(cfg.checkpoint_path) > 0:
# Make the root of the experiment directory.
checkpoint_dir = os.path.split(checkpoint_path)[0]
os.makedirs(checkpoint_dir, exist_ok=True)
# Resume training if requested.
if cfg.resume and os.path.isfile(checkpoint_path):
print(f"Resuming from checkpoint {checkpoint_path}.")
loaded_data = torch.load(checkpoint_path)
model.load_state_dict(loaded_data["model"])
stats = pickle.loads(loaded_data["stats"])
print(f" => resuming from epoch {stats.epoch}.")
optimizer_state_dict = loaded_data["optimizer"]
start_epoch = stats.epoch
# Initialize the optimizer.
optimizer = torch.optim.Adam(
model.parameters(),
lr=cfg.optimizer.lr,
)
# Load the optimizer state dict in case we are resuming.
if optimizer_state_dict is not None:
optimizer.load_state_dict(optimizer_state_dict)
optimizer.last_epoch = start_epoch
# Init the stats object.
if stats is None:
stats = Stats(
["loss", "mse_coarse", "mse_fine", "psnr_coarse", "psnr_fine", "sec/it"],
)
# Learning rate scheduler setup.
# Following the original code, we use exponential decay of the
# learning rate: current_lr = base_lr * gamma ** (epoch / step_size)
def lr_lambda(epoch):
return cfg.optimizer.lr_scheduler_gamma ** (
epoch / cfg.optimizer.lr_scheduler_step_size
)
# The learning rate scheduling is implemented with LambdaLR PyTorch scheduler.
lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda, last_epoch=start_epoch - 1, verbose=False
)
# Initialize the cache for storing variables needed for visulization.
visuals_cache = collections.deque(maxlen=cfg.visualization.history_size)
# Init the visualization visdom env.
if cfg.visualization.visdom:
viz = Visdom(
server=cfg.visualization.visdom_server,
port=cfg.visualization.visdom_port,
use_incoming_socket=False,
)
else:
viz = None
# Load the training/validation data.
train_dataset, val_dataset, _ = get_nerf_datasets(
dataset_name=cfg.data.dataset_name,
image_size=cfg.data.image_size,
)
if cfg.data.precache_rays:
# Precache the projection rays.
model.eval()
with torch.no_grad():
for dataset in (train_dataset, val_dataset):
cache_cameras = [e["camera"].to(device) for e in dataset]
cache_camera_hashes = [e["camera_idx"] for e in dataset]
model.precache_rays(cache_cameras, cache_camera_hashes)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=1,
shuffle=True,
num_workers=0,
collate_fn=trivial_collate,
)
# The validation dataloader is just an endless stream of random samples.
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
num_workers=0,
collate_fn=trivial_collate,
sampler=torch.utils.data.RandomSampler(
val_dataset,
replacement=True,
num_samples=cfg.optimizer.max_epochs,
),
)
# Set the model to the training mode.
model.train()
# Run the main training loop.
for epoch in range(start_epoch, cfg.optimizer.max_epochs):
stats.new_epoch() # Init a new epoch.
for iteration, batch in enumerate(train_dataloader):
image, camera, camera_idx = batch[0].values()
image = image.to(device)
camera = camera.to(device)
optimizer.zero_grad()
# Run the forward pass of the model.
nerf_out, metrics = model(
camera_idx if cfg.data.precache_rays else None,
camera,
image,
)
# The loss is a sum of coarse and fine MSEs
loss = metrics["mse_coarse"] + metrics["mse_fine"]
# Take the training step.
loss.backward()
optimizer.step()
# Update stats with the current metrics.
stats.update(
{"loss": float(loss), **metrics},
stat_set="train",
)
if iteration % cfg.stats_print_interval == 0:
stats.print(stat_set="train")
# Update the visualisatioon cache.
visuals_cache.append(
{
"camera": camera.cpu(),
"camera_idx": camera_idx,
"image": image.cpu().detach(),
"rgb_fine": nerf_out["rgb_fine"].cpu().detach(),
"rgb_coarse": nerf_out["rgb_coarse"].cpu().detach(),
"rgb_gt": nerf_out["rgb_gt"].cpu().detach(),
"coarse_ray_bundle": nerf_out["coarse_ray_bundle"],
}
)
# Adjust the learning rate.
lr_scheduler.step()
# Validation
if epoch % cfg.validation_epoch_interval == 0 and epoch > 0:
# Sample a validation camera/image.
val_batch = next(val_dataloader.__iter__())
val_image, val_camera, camera_idx = val_batch[0].values()
val_image = val_image.to(device)
val_camera = val_camera.to(device)
# Activate eval mode of the model (allows to do a full rendering pass).
model.eval()
with torch.no_grad():
val_nerf_out, val_metrics = model(
camera_idx if cfg.data.precache_rays else None,
val_camera,
val_image,
)
# Update stats with the validation metrics.
stats.update(val_metrics, stat_set="val")
stats.print(stat_set="val")
if viz is not None:
# Plot that loss curves into visdom.
stats.plot_stats(
viz=viz,
visdom_env=cfg.visualization.visdom_env,
plot_file=None,
)
# Visualize the intermediate results.
visualize_nerf_outputs(
val_nerf_out, visuals_cache, viz, cfg.visualization.visdom_env
)
# Set the model back to train mode.
model.train()
# Checkpoint.
if (
epoch % cfg.checkpoint_epoch_interval == 0
and len(cfg.checkpoint_path) > 0
and epoch > 0
):
print(f"Storing checkpoint {checkpoint_path}.")
data_to_store = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"stats": pickle.dumps(stats),
}
torch.save(data_to_store, checkpoint_path)
if __name__ == "__main__":
main()
| [
"torch.utils.data.RandomSampler",
"torch.no_grad",
"torch.save",
"torch.manual_seed",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.optim.lr_scheduler.LambdaLR"
] | 3 | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 |
3 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""This module implements utility functions for loading .mtl files and textures."""
import os
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from iopath.common.file_io import PathManager
from pytorch3d.io.utils import _open_file, _read_image
def make_mesh_texture_atlas(
material_properties: Dict,
texture_images: Dict,
face_material_names,
faces_uvs: torch.Tensor,
verts_uvs: torch.Tensor,
texture_size: int,
texture_wrap: Optional[str],
) -> torch.Tensor:
"""
Given properties for materials defined in the .mtl file, and the face texture uv
coordinates, construct an (F, R, R, 3) texture atlas where R is the texture_size
and F is the number of faces in the mesh.
Args:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an emtpy dict.
texture_images: dict of material names and texture images
face_material_names: numpy array of the material name corresponding to each
face. Faces which don't have an associated material will be an empty string.
For these faces, a uniform white texture is assigned.
faces_uvs: LongTensor of shape (F, 3,) giving the index into the verts_uvs for
each face in the mesh.
verts_uvs: FloatTensor of shape (V, 2) giving the uv coordinates for each vertex.
texture_size: the resolution of the per face texture map returned by this function.
Each face will have a texture map of shape (texture_size, texture_size, 3).
texture_wrap: string, one of ["repeat", "clamp", None]
If `texture_wrap="repeat"` for uv values outside the range [0, 1] the integer part
is ignored and a repeating pattern is formed.
If `texture_wrap="clamp"` the values are clamped to the range [0, 1].
If None, do nothing.
Returns:
atlas: FloatTensor of shape (F, texture_size, texture_size, 3) giving the per
face texture map.
"""
# Create an R x R texture map per face in the mesh
R = texture_size
F = faces_uvs.shape[0]
# Initialize the per face texture map to a white color.
# TODO: allow customization of this base color?
atlas = torch.ones(size=(F, R, R, 3), dtype=torch.float32, device=faces_uvs.device)
# Check for empty materials.
if not material_properties and not texture_images:
return atlas
# Iterate through the material properties - not
# all materials have texture images so this is
# done first separately to the texture interpolation.
for material_name, props in material_properties.items():
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_uvs.device
)
if faces_material_ind.sum() > 0:
# For these faces, update the base color to the
# diffuse material color.
if "diffuse_color" not in props:
continue
atlas[faces_material_ind, ...] = props["diffuse_color"][None, :]
# If there are vertex texture coordinates, create an (F, 3, 2)
# tensor of the vertex textures per face.
faces_verts_uvs = verts_uvs[faces_uvs] if len(verts_uvs) > 0 else None
# Some meshes only have material properties and no texture image.
# In this case, return the atlas here.
if faces_verts_uvs is None:
return atlas
if texture_wrap == "repeat":
# If texture uv coordinates are outside the range [0, 1] follow
# the convention GL_REPEAT in OpenGL i.e the integer part of the coordinate
# will be ignored and a repeating pattern is formed.
# Shapenet data uses this format see:
# https://shapenet.org/qaforum/index.php?qa=15&qa_1=why-is-the-texture-coordinate-in-the-obj-file-not-in-the-range # noqa: B950
if (faces_verts_uvs > 1).any() or (faces_verts_uvs < 0).any():
msg = "Texture UV coordinates outside the range [0, 1]. \
The integer part will be ignored to form a repeating pattern."
warnings.warn(msg)
faces_verts_uvs = faces_verts_uvs % 1
elif texture_wrap == "clamp":
# Clamp uv coordinates to the [0, 1] range.
faces_verts_uvs = faces_verts_uvs.clamp(0.0, 1.0)
# Iterate through the materials used in this mesh. Update the
# texture atlas for the faces which use this material.
# Faces without texture are white.
for material_name, image in list(texture_images.items()):
# Only use the RGB colors
if image.shape[2] == 4:
image = image[:, :, :3]
# Reverse the image y direction
image = torch.flip(image, [0]).type_as(faces_verts_uvs)
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_verts_uvs.device
)
# Find the subset of faces which use this texture with this texture image
uvs_subset = faces_verts_uvs[faces_material_ind, :, :]
# Update the texture atlas for the faces which use this texture.
# TODO: should the texture map values be multiplied
# by the diffuse material color (i.e. use *= as the atlas has
# been initialized to the diffuse color)?. This is
# not being done in SoftRas.
atlas[faces_material_ind, :, :] = make_material_atlas(image, uvs_subset, R)
return atlas
def make_material_atlas(
image: torch.Tensor, faces_verts_uvs: torch.Tensor, texture_size: int
) -> torch.Tensor:
r"""
Given a single texture image and the uv coordinates for all the
face vertices, create a square texture map per face using
the formulation from [1].
For a triangle with vertices (v0, v1, v2) we can create a barycentric coordinate system
with the x axis being the vector (v0 - v2) and the y axis being the vector (v1 - v2).
The barycentric coordinates range from [0, 1] in the +x and +y direction so this creates
a triangular texture space with vertices at (0, 1), (0, 0) and (1, 0).
The per face texture map is of shape (texture_size, texture_size, 3)
which is a square. To map a triangular texture to a square grid, each
triangle is parametrized as follows (e.g. R = texture_size = 3):
The triangle texture is first divided into RxR = 9 subtriangles which each
map to one grid cell. The numbers in the grid cells and triangles show the mapping.
..code-block::python
Triangular Texture Space:
1
|\
|6 \
|____\
|\ 7 |\
|3 \ |4 \
|____\|____\
|\ 8 |\ 5 |\
|0 \ |1 \ |2 \
|____\|____\|____\
0 1
Square per face texture map:
R ____________________
| | | |
| 6 | 7 | 8 |
|______|______|______|
| | | |
| 3 | 4 | 5 |
|______|______|______|
| | | |
| 0 | 1 | 2 |
|______|______|______|
0 R
The barycentric coordinates of each grid cell are calculated using the
xy coordinates:
..code-block::python
The cartesian coordinates are:
Grid 1:
R ____________________
| | | |
| 20 | 21 | 22 |
|______|______|______|
| | | |
| 10 | 11 | 12 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
where 02 means y = 0, x = 2
Now consider this subset of the triangle which corresponds to
grid cells 0 and 8:
..code-block::python
1/R ________
|\ 8 |
| \ |
| 0 \ |
|_______\|
0 1/R
The centroids of the triangles are:
0: (1/3, 1/3) * 1/R
8: (2/3, 2/3) * 1/R
For each grid cell we can now calculate the centroid `(c_y, c_x)`
of the corresponding texture triangle:
- if `(x + y) < R`, then offsett the centroid of
triangle 0 by `(y, x) * (1/R)`
- if `(x + y) > R`, then offset the centroid of
triangle 8 by `((R-1-y), (R-1-x)) * (1/R)`.
This is equivalent to updating the portion of Grid 1
above the diagnonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
..code-block::python
R _____________________
| | | |
| 20 | 01 | 00 |
|______|______|______|
| | | |
| 10 | 11 | 10 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
The barycentric coordinates (w0, w1, w2) are then given by:
..code-block::python
w0 = c_x
w1 = c_y
w2 = 1- w0 - w1
Args:
image: FloatTensor of shape (H, W, 3)
faces_verts_uvs: uv coordinates for each vertex in each face (F, 3, 2)
texture_size: int
Returns:
atlas: a FloatTensor of shape (F, texture_size, texture_size, 3) giving a
per face texture map.
[1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
3D Reasoning', ICCV 2019
"""
R = texture_size
device = faces_verts_uvs.device
rng = torch.arange(R, device=device)
# Meshgrid returns (row, column) i.e (Y, X)
# Change order to (X, Y) to make the grid.
Y, X = torch.meshgrid(rng, rng)
# pyre-fixme[28]: Unexpected keyword argument `axis`.
grid = torch.stack([X, Y], axis=-1) # (R, R, 2)
# Grid cells below the diagonal: x + y < R.
below_diag = grid.sum(-1) < R
# map a [0, R] grid -> to a [0, 1] barycentric coordinates of
# the texture triangle centroids.
bary = torch.zeros((R, R, 3), device=device) # (R, R, 3)
slc = torch.arange(2, device=device)[:, None]
# w0, w1
bary[below_diag, slc] = ((grid[below_diag] + 1.0 / 3.0) / R).T
# w0, w1 for above diagonal grid cells.
# pyre-fixme[16]: `float` has no attribute `T`.
bary[~below_diag, slc] = (((R - 1.0 - grid[~below_diag]) + 2.0 / 3.0) / R).T
# w2 = 1. - w0 - w1
bary[..., -1] = 1 - bary[..., :2].sum(dim=-1)
# Calculate the uv position in the image for each pixel
# in the per face texture map
# (F, 1, 1, 3, 2) * (R, R, 3, 1) -> (F, R, R, 3, 2) -> (F, R, R, 2)
uv_pos = (faces_verts_uvs[:, None, None] * bary[..., None]).sum(-2)
# bi-linearly interpolate the textures from the images
# using the uv coordinates given by uv_pos.
textures = _bilinear_interpolation_vectorized(image, uv_pos)
return textures
def _bilinear_interpolation_vectorized(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the naming conventions for torch.nn.functional.grid_sample).
This implementation uses the same steps as in the SoftRas cuda kernel
to make it easy to compare. This vectorized version requires less memory than
_bilinear_interpolation_grid_sample but is slightly slower.
If speed is an issue and the number of faces in the mesh and texture image sizes
are small, consider using _bilinear_interpolation_grid_sample instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
H, W, _ = image.shape
# Convert [0, 1] to the range [0, W-1] and [0, H-1]
grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid)
weight_1 = grid - grid.int()
weight_0 = 1.0 - weight_1
grid_x, grid_y = grid.unbind(-1)
y0 = grid_y.to(torch.int64)
y1 = (grid_y + 1).to(torch.int64)
x0 = grid_x.to(torch.int64)
x1 = x0 + 1
weight_x0, weight_y0 = weight_0.unbind(-1)
weight_x1, weight_y1 = weight_1.unbind(-1)
# Bi-linear interpolation
# griditions = [[y, x], [(y+1), x]
# [y, (x+1)], [(y+1), (x+1)]]
# weights = [[wx0*wy0, wx0*wy1],
# [wx1*wy0, wx1*wy1]]
out = (
image[y0, x0] * (weight_x0 * weight_y0)[..., None]
+ image[y1, x0] * (weight_x0 * weight_y1)[..., None]
+ image[y0, x1] * (weight_x1 * weight_y0)[..., None]
+ image[y1, x1] * (weight_x1 * weight_y1)[..., None]
)
return out
def _bilinear_interpolation_grid_sample(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the conventions for torch.nn.functional.grid_sample).
This implementation is faster than _bilinear_interpolation_vectorized but
requires more memory so can cause OOMs. If speed is an issue try this function
instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
N = grid.shape[0]
# convert [0, 1] to the range [-1, 1] expected by grid_sample.
grid = grid * 2.0 - 1.0
image = image.permute(2, 0, 1)[None, ...].expand(N, -1, -1, -1) # (N, 3, H, W)
# Align_corners has to be set to True to match the output of the SoftRas
# cuda kernel for bilinear sampling.
out = F.grid_sample(image, grid, mode="bilinear", align_corners=True)
return out.permute(0, 2, 3, 1)
MaterialProperties = Dict[str, Dict[str, torch.Tensor]]
TextureFiles = Dict[str, str]
TextureImages = Dict[str, torch.Tensor]
def _parse_mtl(
f, path_manager: PathManager, device="cpu"
) -> Tuple[MaterialProperties, TextureFiles]:
material_properties = {}
texture_files = {}
material_name = ""
with _open_file(f, path_manager, "r") as f:
for line in f:
tokens = line.strip().split()
if not tokens:
continue
if tokens[0] == "newmtl":
material_name = tokens[1]
material_properties[material_name] = {}
elif tokens[0] == "map_Kd":
# Diffuse texture map
# Account for the case where filenames might have spaces
filename = line.strip()[7:]
texture_files[material_name] = filename
elif tokens[0] == "Kd":
# RGB diffuse reflectivity
kd = np.array(tokens[1:4]).astype(np.float32)
kd = torch.from_numpy(kd).to(device)
material_properties[material_name]["diffuse_color"] = kd
elif tokens[0] == "Ka":
# RGB ambient reflectivity
ka = np.array(tokens[1:4]).astype(np.float32)
ka = torch.from_numpy(ka).to(device)
material_properties[material_name]["ambient_color"] = ka
elif tokens[0] == "Ks":
# RGB specular reflectivity
ks = np.array(tokens[1:4]).astype(np.float32)
ks = torch.from_numpy(ks).to(device)
material_properties[material_name]["specular_color"] = ks
elif tokens[0] == "Ns":
# Specular exponent
ns = np.array(tokens[1:4]).astype(np.float32)
ns = torch.from_numpy(ns).to(device)
material_properties[material_name]["shininess"] = ns
return material_properties, texture_files
def _load_texture_images(
material_names: List[str],
data_dir: str,
material_properties: MaterialProperties,
texture_files: TextureFiles,
path_manager: PathManager,
) -> Tuple[MaterialProperties, TextureImages]:
final_material_properties = {}
texture_images = {}
# Only keep the materials referenced in the obj.
for material_name in material_names:
if material_name in texture_files:
# Load the texture image.
path = os.path.join(data_dir, texture_files[material_name])
if os.path.isfile(path):
image = (
_read_image(path, path_manager=path_manager, format="RGB") / 255.0
)
image = torch.from_numpy(image)
texture_images[material_name] = image
else:
msg = f"Texture file does not exist: {path}"
warnings.warn(msg)
if material_name in material_properties:
final_material_properties[material_name] = material_properties[
material_name
]
return final_material_properties, texture_images
def load_mtl(
f,
*,
material_names: List[str],
data_dir: str,
device="cpu",
path_manager: PathManager,
) -> Tuple[MaterialProperties, TextureImages]:
"""
Load texture images and material reflectivity values for ambient, diffuse
and specular light (Ka, Kd, Ks, Ns).
Args:
f: a file-like object of the material information.
material_names: a list of the material names found in the .obj file.
data_dir: the directory where the material texture files are located.
path_manager: PathManager for interpreting both f and material_names.
Returns:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an empty dict.
{
material_name_1: {
"ambient_color": tensor of shape (1, 3),
"diffuse_color": tensor of shape (1, 3),
"specular_color": tensor of shape (1, 3),
"shininess": tensor of shape (1)
},
material_name_2: {},
...
}
texture_images: dict of material names and texture images
{
material_name_1: (H, W, 3) image,
...
}
"""
material_properties, texture_files = _parse_mtl(f, path_manager, device)
return _load_texture_images(
material_names,
data_dir,
material_properties,
texture_files,
path_manager=path_manager,
)
| [
"torch.zeros",
"torch.stack",
"torch.arange",
"torch.ones",
"torch.from_numpy",
"torch.nn.functional.grid_sample",
"torch.tensor",
"torch.meshgrid",
"torch.flip"
] | 3 | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 |
3 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
This example demonstrates camera parameter optimization with the plain
pulsar interface. For this, a reference image has been pre-generated
(you can find it at `../../tests/pulsar/reference/examples_TestRenderer_test_cam.png`).
The same scene parameterization is loaded and the camera parameters
distorted. Gradient-based optimization is used to converge towards the
original camera parameters.
Output: cam.gif.
"""
import logging
import math
from os import path
import cv2
import imageio
import numpy as np
import torch
from pytorch3d.renderer.points.pulsar import Renderer
from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_rotation_6d
from torch import nn, optim
LOGGER = logging.getLogger(__name__)
N_POINTS = 20
WIDTH = 1_000
HEIGHT = 1_000
DEVICE = torch.device("cuda")
class SceneModel(nn.Module):
"""
A simple scene model to demonstrate use of pulsar in PyTorch modules.
The scene model is parameterized with sphere locations (vert_pos),
channel content (vert_col), radiuses (vert_rad), camera position (cam_pos),
camera rotation (cam_rot) and sensor focal length and width (cam_sensor).
The forward method of the model renders this scene description. Any
of these parameters could instead be passed as inputs to the forward
method and come from a different model.
"""
def __init__(self):
super(SceneModel, self).__init__()
self.gamma = 0.1
# Points.
torch.manual_seed(1)
vert_pos = torch.rand(N_POINTS, 3, dtype=torch.float32) * 10.0
vert_pos[:, 2] += 25.0
vert_pos[:, :2] -= 5.0
self.register_parameter("vert_pos", nn.Parameter(vert_pos, requires_grad=False))
self.register_parameter(
"vert_col",
nn.Parameter(
torch.rand(N_POINTS, 3, dtype=torch.float32), requires_grad=False
),
)
self.register_parameter(
"vert_rad",
nn.Parameter(
torch.rand(N_POINTS, dtype=torch.float32), requires_grad=False
),
)
self.register_parameter(
"cam_pos",
nn.Parameter(
torch.tensor([0.1, 0.1, 0.0], dtype=torch.float32), requires_grad=True
),
)
self.register_parameter(
"cam_rot",
# We're using the 6D rot. representation for better gradients.
nn.Parameter(
matrix_to_rotation_6d(
axis_angle_to_matrix(
torch.tensor(
[
[0.02, math.pi + 0.02, 0.01],
],
dtype=torch.float32,
)
)
)[0],
requires_grad=True,
),
)
self.register_parameter(
"cam_sensor",
nn.Parameter(
torch.tensor([4.8, 1.8], dtype=torch.float32), requires_grad=True
),
)
self.renderer = Renderer(WIDTH, HEIGHT, N_POINTS, right_handed_system=True)
def forward(self):
return self.renderer.forward(
self.vert_pos,
self.vert_col,
self.vert_rad,
torch.cat([self.cam_pos, self.cam_rot, self.cam_sensor]),
self.gamma,
45.0,
)
def cli():
"""
Camera optimization example using pulsar.
Writes to `cam.gif`.
"""
LOGGER.info("Loading reference...")
# Load reference.
ref = (
torch.from_numpy(
imageio.imread(
"../../tests/pulsar/reference/examples_TestRenderer_test_cam.png"
)[:, ::-1, :].copy()
).to(torch.float32)
/ 255.0
).to(DEVICE)
# Set up model.
model = SceneModel().to(DEVICE)
# Optimizer.
optimizer = optim.SGD(
[
{"params": [model.cam_pos], "lr": 1e-4}, # 1e-3
{"params": [model.cam_rot], "lr": 5e-6},
{"params": [model.cam_sensor], "lr": 1e-4},
]
)
LOGGER.info("Writing video to `%s`.", path.abspath("cam.gif"))
writer = imageio.get_writer("cam.gif", format="gif", fps=25)
# Optimize.
for i in range(300):
optimizer.zero_grad()
result = model()
# Visualize.
result_im = (result.cpu().detach().numpy() * 255).astype(np.uint8)
cv2.imshow("opt", result_im[:, :, ::-1])
writer.append_data(result_im)
overlay_img = np.ascontiguousarray(
((result * 0.5 + ref * 0.5).cpu().detach().numpy() * 255).astype(np.uint8)[
:, :, ::-1
]
)
overlay_img = cv2.putText(
overlay_img,
"Step %d" % (i),
(10, 40),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(0, 0, 0),
2,
cv2.LINE_AA,
False,
)
cv2.imshow("overlay", overlay_img)
cv2.waitKey(1)
# Update.
loss = ((result - ref) ** 2).sum()
LOGGER.info("loss %d: %f", i, loss.item())
loss.backward()
optimizer.step()
writer.close()
LOGGER.info("Done.")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
cli()
| [
"torch.device",
"torch.rand",
"torch.cat",
"torch.optim.SGD",
"torch.nn.Parameter",
"torch.manual_seed",
"torch.tensor"
] | 3 | shubham-goel/pytorch3d | e5e6e90af6f81b3eccb35bbdfdc7e64ec6a4df21 |
1.10 | #!/usr/bin/env python
"""Train the Text2Mel network. See: https://arxiv.org/abs/1710.08969"""
__author__ = 'Erdene-Ochir Tuguldur'
import sys
import time
import argparse
from tqdm import *
import numpy as np
import torch
import torch.nn.functional as F
# project imports
from models import Text2Mel
from hyperparams import HParams as hp
from logger import Logger
from utils import get_last_checkpoint_file_name, load_checkpoint, save_checkpoint, load_checkpoint_test
from datasets.data_loader import Text2MelDataLoader
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", required=True, choices=['ljspeech', 'mbspeech','emovdb'], help='dataset name')
args = parser.parse_args()
if args.dataset == 'ljspeech':
from datasets.lj_speech import vocab, LJSpeech as SpeechDataset
elif args.dataset == 'emovdb':
from datasets.emovdb import vocab, Emovdb as SpeechDataset
else:
from datasets.mb_speech import vocab, MBSpeech as SpeechDataset
use_gpu = torch.cuda.is_available()
print('use_gpu', use_gpu)
if use_gpu:
torch.backends.cudnn.benchmark = True
train_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='train')
valid_data_loader = Text2MelDataLoader(text2mel_dataset=SpeechDataset(['texts', 'mels', 'mel_gates']), batch_size=64,
mode='valid')
text2mel = Text2Mel(vocab).cpu()
start_timestamp = int(time.time() * 1000)
start_epoch = 0
global_step = 0
logger = Logger(args.dataset, 'text2mel')
# load the last checkpoint if exists
last_checkpoint_file_name = get_last_checkpoint_file_name(logger.logdir)
if last_checkpoint_file_name:
print("loading the last checkpoint: %s" % last_checkpoint_file_name)
start_epoch, global_step = load_checkpoint(last_checkpoint_file_name, text2mel, None)
optimizer = torch.optim.Adam(text2mel.parameters(), lr=hp.text2mel_lr)
def get_lr():
return optimizer.param_groups[0]['lr']
def lr_decay(step, warmup_steps=4000):
new_lr = hp.text2mel_lr * warmup_steps ** 0.5 * min((step + 1) * warmup_steps ** -1.5, (step + 1) ** -0.5)
optimizer.param_groups[0]['lr'] = new_lr
def train(train_epoch, phase='train'):
global global_step
lr_decay(global_step)
print("epoch %3d with lr=%.02e" % (train_epoch, get_lr()))
text2mel.train() if phase == 'train' else text2mel.eval()
torch.set_grad_enabled(True) if phase == 'train' else torch.set_grad_enabled(False)
data_loader = train_data_loader if phase == 'train' else valid_data_loader
it = 0
running_loss = 0.0
running_l1_loss = 0.0
running_att_loss = 0.0
pbar = tqdm(data_loader, unit="audios", unit_scale=data_loader.batch_size, disable=hp.disable_progress_bar)
for batch in pbar:
L, S, gates = batch['texts'], batch['mels'], batch['mel_gates']
S = S.permute(0, 2, 1) # TODO: because of pre processing
B, N = L.size() # batch size and text count
_, n_mels, T = S.size() # number of melspectrogram bins and time
assert gates.size(0) == B # TODO: later remove
assert gates.size(1) == T
S_shifted = torch.cat((S[:, :, 1:], torch.zeros(B, n_mels, 1)), 2)
S.requires_grad = False
S_shifted.requires_grad = False
gates.requires_grad = False
def W_nt(_, n, t, g=0.2):
return 1.0 - np.exp(-((n / float(N) - t / float(T)) ** 2) / (2 * g ** 2))
W = np.fromfunction(W_nt, (B, N, T), dtype=np.float32)
W = torch.from_numpy(W)
L = L.cpu()
S = S.cpu()
S_shifted = S_shifted.cpu()
W = W.cpu()
gates = gates.cpu()
Y_logit, Y, A = text2mel(L, S, monotonic_attention=True)
l1_loss = F.l1_loss(Y, S_shifted)
masks = gates.reshape(B, 1, T).float()
att_loss = (A * W * masks).mean()
loss = l1_loss + att_loss
if phase == 'train':
lr_decay(global_step)
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
it += 1
loss, l1_loss, att_loss = loss.item(), l1_loss.item(), att_loss.item()
running_loss += loss
running_l1_loss += l1_loss
running_att_loss += att_loss
if phase == 'train':
# update the progress bar
pbar.set_postfix({
'l1': "%.05f" % (running_l1_loss / it),
'att': "%.05f" % (running_att_loss / it)
})
logger.log_step(phase, global_step, {'loss_l1': l1_loss, 'loss_att': att_loss},
{'mels-true': S[:1, :, :], 'mels-pred': Y[:1, :, :], 'attention': A[:1, :, :]})
if global_step % 1000 == 0:
# checkpoint at every 1000th step
save_checkpoint(logger.logdir, train_epoch, global_step, text2mel, optimizer)
epoch_loss = running_loss / it
epoch_l1_loss = running_l1_loss / it
epoch_att_loss = running_att_loss / it
logger.log_epoch(phase, global_step, {'loss_l1': epoch_l1_loss, 'loss_att': epoch_att_loss})
return epoch_loss
since = time.time()
epoch = start_epoch
while True:
train_epoch_loss = train(epoch, phase='train')
time_elapsed = time.time() - since
time_str = 'total time elapsed: {:.0f}h {:.0f}m {:.0f}s '.format(time_elapsed // 3600, time_elapsed % 3600 // 60,
time_elapsed % 60)
print("train epoch loss %f, step=%d, %s" % (train_epoch_loss, global_step, time_str))
valid_epoch_loss = train(epoch, phase='valid')
print("valid epoch loss %f" % valid_epoch_loss)
epoch += 1
if global_step >= hp.text2mel_max_iteration:
print("max step %d (current step %d) reached, exiting..." % (hp.text2mel_max_iteration, global_step))
sys.exit(0)
| [
"torch.zeros",
"torch.nn.functional.l1_loss",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.set_grad_enabled"
] | 1.10.2 | TraceOnBrainOff/pytorch-dc-tts | 993a0fbace561729b04df2179b41a0a7ea502e93 |
1.8 | #!/usr/bin/python3
"""Recipe for training a speaker verification system based on PLDA using the voxceleb dataset.
The system employs a pre-trained model followed by a PLDA transformation.
The pre-trained model is automatically downloaded from the web if not specified.
To run this recipe, run the following command:
> python speaker_verification_plda.py hyperparams/verification_plda_xvector.yaml
Authors
* Nauman Dawalatabad 2020
* Mirco Ravanelli 2020
"""
import os
import sys
import torch
import torchaudio
import logging
import speechbrain as sb
import numpy
import pickle
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.metric_stats import EER, minDCF
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing.PLDA_LDA import Ndx
from speechbrain.processing.PLDA_LDA import fast_PLDA_scoring
from speechbrain.utils.data_utils import download_file
from speechbrain.utils.distributed import run_on_main
# Compute embeddings from the waveforms
def compute_embeddings(wavs, wav_lens):
"""Compute speaker embeddings.
Arguments
---------
wavs : Torch.Tensor
Tensor containing the speech waveform (batch, time).
Make sure the sample rate is fs=16000 Hz.
wav_lens: Torch.Tensor
Tensor containing the relative length for each sentence
in the length (e.g., [0.8 0.6 1.0])
"""
wavs = wavs.to(params["device"])
wav_lens = wav_lens.to(params["device"])
with torch.no_grad():
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, wav_lens)
embeddings = params["embedding_model"](feats, wav_lens)
embeddings = params["mean_var_norm_emb"](
embeddings, torch.ones(embeddings.shape[0]).to(embeddings.device)
)
return embeddings.squeeze(1)
def emb_computation_loop(split, set_loader, stat_file):
"""Computes the embeddings and saves the in a stat file"""
# Extract embeddings (skip if already done)
if not os.path.isfile(stat_file):
embeddings = numpy.empty(
shape=[0, params["emb_dim"]], dtype=numpy.float64
)
modelset = []
segset = []
with tqdm(set_loader, dynamic_ncols=True) as t:
for batch in t:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Enrollment and test embeddings
embs = compute_embeddings(wavs, lens)
xv = embs.squeeze().cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
# Stat object (used to collect embeddings)
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.info(f"Saving stat obj for {split}")
stat_obj.save_stat_object(stat_file)
else:
logger.info(f"Skipping embedding Extraction for {split}")
logger.info(f"Loading previously saved stat_object for {split}")
with open(stat_file, "rb") as input:
stat_obj = pickle.load(input)
return stat_obj
def verification_performance(scores_plda):
"""Computes the Equal Error Rate give the PLDA scores"""
# Create ids, labels, and scoring list for EER evaluation
ids = []
labels = []
positive_scores = []
negative_scores = []
for line in open(veri_file_path):
lab = int(line.split(" ")[0].rstrip().split(".")[0].strip())
enrol_id = line.split(" ")[1].rstrip().split(".")[0].strip()
test_id = line.split(" ")[2].rstrip().split(".")[0].strip()
# Assuming enrol_id and test_id are unique
i = int(numpy.where(scores_plda.modelset == enrol_id)[0][0])
j = int(numpy.where(scores_plda.segset == test_id)[0][0])
s = float(scores_plda.scoremat[i, j])
labels.append(lab)
ids.append(enrol_id + "<>" + test_id)
if lab == 1:
positive_scores.append(s)
else:
negative_scores.append(s)
# Clean variable
del scores_plda
# Final EER computation
eer, th = EER(torch.tensor(positive_scores), torch.tensor(negative_scores))
min_dcf, th = minDCF(
torch.tensor(positive_scores), torch.tensor(negative_scores)
)
return eer, min_dcf
# Function to get mod and seg
def get_utt_ids_for_test(ids, data_dict):
mod = [data_dict[x]["wav1"]["data"] for x in ids]
seg = [data_dict[x]["wav2"]["data"] for x in ids]
return mod, seg
def dataio_prep(params):
"Creates the dataloaders and their data processing pipelines."
data_folder = params["data_folder"]
# 1. Declarations:
# Train data (used for normalization)
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["train_data"], replacements={"data_root": data_folder},
)
train_data = train_data.filtered_sorted(
sort_key="duration", select_n=params["n_train_snts"]
)
# Enrol data
enrol_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["enrol_data"], replacements={"data_root": data_folder},
)
enrol_data = enrol_data.filtered_sorted(sort_key="duration")
# Test data
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=params["test_data"], replacements={"data_root": data_folder},
)
test_data = test_data.filtered_sorted(sort_key="duration")
datasets = [train_data, enrol_data, test_data]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start", "stop")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start, stop):
start = int(start)
stop = int(stop)
num_frames = stop - start
sig, fs = torchaudio.load(
wav, num_frames=num_frames, frame_offset=start
)
sig = sig.transpose(0, 1).squeeze(1)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "spk_id"])
# 4 Create dataloaders
train_dataloader = sb.dataio.dataloader.make_dataloader(
train_data, **params["train_dataloader_opts"]
)
enrol_dataloader = sb.dataio.dataloader.make_dataloader(
enrol_data, **params["enrol_dataloader_opts"]
)
test_dataloader = sb.dataio.dataloader.make_dataloader(
test_data, **params["test_dataloader_opts"]
)
return train_dataloader, enrol_dataloader, test_dataloader
if __name__ == "__main__":
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Download verification list (to exlude verification sentences from train)
veri_file_path = os.path.join(
params["save_folder"], os.path.basename(params["verification_file"])
)
download_file(params["verification_file"], veri_file_path)
from voxceleb_prepare import prepare_voxceleb # noqa E402
# Create experiment directory
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Prepare data from dev of Voxceleb1
logger.info("Data preparation")
prepare_voxceleb(
data_folder=params["data_folder"],
save_folder=params["save_folder"],
verification_pairs_file=veri_file_path,
splits=["train", "test"],
split_ratio=[90, 10],
seg_dur=3,
)
# here we create the datasets objects as well as tokenization and encoding
train_dataloader, enrol_dataloader, test_dataloader = dataio_prep(params)
# Initialize PLDA vars
modelset, segset = [], []
embeddings = numpy.empty(shape=[0, params["emb_dim"]], dtype=numpy.float64)
# Embedding file for train data
xv_file = os.path.join(
params["save_folder"], "VoxCeleb1_train_embeddings_stat_obj.pkl"
)
# We download the pretrained LM from HuggingFace (or elsewhere depending on
# the path given in the YAML file). The tokenizer is loaded at the same time.
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected()
params["embedding_model"].eval()
params["embedding_model"].to(params["device"])
# Computing training embeddings (skip it of if already extracted)
if not os.path.exists(xv_file):
logger.info("Extracting embeddings from Training set..")
with tqdm(train_dataloader, dynamic_ncols=True) as t:
for batch in t:
snt_id = batch.id
wav, lens = batch.sig
spk_ids = batch.spk_id
# Flattening speaker ids
modelset = modelset + spk_ids
# For segset
segset = segset + snt_id
# Compute embeddings
emb = compute_embeddings(wav, lens)
xv = emb.squeeze(1).cpu().numpy()
embeddings = numpy.concatenate((embeddings, xv), axis=0)
# Speaker IDs and utterance IDs
modelset = numpy.array(modelset, dtype="|O")
segset = numpy.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0
s = numpy.array([None] * embeddings.shape[0])
b = numpy.array([[1.0]] * embeddings.shape[0])
embeddings_stat = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
del embeddings
# Save TRAINING embeddings in StatObject_SB object
embeddings_stat.save_stat_object(xv_file)
else:
# Load the saved stat object for train embedding
logger.info("Skipping embedding Extraction for training set")
logger.info(
"Loading previously saved stat_object for train embeddings.."
)
with open(xv_file, "rb") as input:
embeddings_stat = pickle.load(input)
# Training Gaussian PLDA model
logger.info("Training PLDA model")
params["compute_plda"].plda(embeddings_stat)
logger.info("PLDA training completed")
# Set paths for enrol/test embeddings
enrol_stat_file = os.path.join(params["save_folder"], "stat_enrol.pkl")
test_stat_file = os.path.join(params["save_folder"], "stat_test.pkl")
ndx_file = os.path.join(params["save_folder"], "ndx.pkl")
# Compute enrol and Test embeddings
enrol_obj = emb_computation_loop("enrol", enrol_dataloader, enrol_stat_file)
test_obj = emb_computation_loop("test", test_dataloader, test_stat_file)
# Prepare Ndx Object
if not os.path.isfile(ndx_file):
models = enrol_obj.modelset
testsegs = test_obj.modelset
logger.info("Preparing Ndx")
ndx_obj = Ndx(models=models, testsegs=testsegs)
logger.info("Saving ndx obj...")
ndx_obj.save_ndx_object(ndx_file)
else:
logger.info("Skipping Ndx preparation")
logger.info("Loading Ndx from disk")
with open(ndx_file, "rb") as input:
ndx_obj = pickle.load(input)
# PLDA scoring
logger.info("PLDA scoring...")
scores_plda = fast_PLDA_scoring(
enrol_obj,
test_obj,
ndx_obj,
params["compute_plda"].mean,
params["compute_plda"].F,
params["compute_plda"].Sigma,
)
logger.info("Computing EER... ")
# Cleaning variable
del enrol_dataloader
del test_dataloader
del enrol_obj
del test_obj
del embeddings_stat
# Final EER computation
eer, min_dcf = verification_performance(scores_plda)
logger.info("EER(%%)=%f", eer * 100)
logger.info("min_dcf=%f", min_dcf * 100)
| [
"torch.no_grad",
"torch.ones",
"torch.tensor"
] | 1.8.0 | pnsafari/speechbrain | 3a6956a838f3796ff6d041ee6a20bcdea55794cb |
1.8 | import torch
import torch.nn.functional
from e3nn.o3 import Irreps
from e3nn.util.jit import compile_mode
from nequip.data import AtomicDataDict
from .._graph_mixin import GraphModuleMixin
@compile_mode("script")
class OneHotAtomEncoding(GraphModuleMixin, torch.nn.Module):
num_types: int
set_features: bool
# TODO: use torch.unique?
# TODO: type annotation
# Docstrings
def __init__(
self,
num_types: int,
set_features: bool = True,
irreps_in=None,
):
super().__init__()
self.num_types = num_types
self.set_features = set_features
# Output irreps are num_types even (invariant) scalars
irreps_out = {AtomicDataDict.NODE_ATTRS_KEY: Irreps([(self.num_types, (0, 1))])}
if self.set_features:
irreps_out[AtomicDataDict.NODE_FEATURES_KEY] = irreps_out[
AtomicDataDict.NODE_ATTRS_KEY
]
self._init_irreps(irreps_in=irreps_in, irreps_out=irreps_out)
def forward(self, data: AtomicDataDict.Type):
type_numbers = data[AtomicDataDict.ATOM_TYPE_KEY].squeeze(-1)
one_hot = torch.nn.functional.one_hot(
type_numbers, num_classes=self.num_types
).to(device=type_numbers.device, dtype=data[AtomicDataDict.POSITIONS_KEY].dtype)
data[AtomicDataDict.NODE_ATTRS_KEY] = one_hot
if self.set_features:
data[AtomicDataDict.NODE_FEATURES_KEY] = one_hot
return data
| [
"torch.nn.functional.one_hot"
] | 1.8 | mir-group/nequip | 4e6a0914a289cf000da57a6b6e79678efdf3347f |
1.8 | #!/usr/bin/env python3
"""Generate `trn` files for Librispeech
Given a Librispeech directory, parse transcript files,
transcribe the corresponding audio, and generate hypothesis files.
"""
import os
import time
import logging
import argparse
from pathlib import Path
import torch
import torchaudio
import fairseq
import simple_ctc
_LG = logging.getLogger(__name__)
def _parse_args():
def _path(path):
return Path(os.path.normpath(path))
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--root-dir',
required=True,
type=_path,
help='The root directory on which data are persed.'
)
parser.add_argument(
'--output-dir',
required=True,
type=_path,
help='The output directory where trn files are generated.'
)
parser.add_argument(
'--model-file',
required=True,
type=_path,
help='Path to a finetuned weight file.'
)
parser.add_argument(
'--dict-file',
required=True,
type=_path,
help='Path to `dict.ltr.txt` file.'
)
parser.add_argument(
'--num-threads',
type=int,
default=4,
help='Maximum number of threads .'
)
args = parser.parse_args()
for path in [args.root_dir, args.output_dir, args.model_file, args.dict_file]:
if not os.path.exists(path):
raise RuntimeError(f'File or directory does not exist: {path}')
return args
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if not line:
continue
id, transcription = line.split(' ', maxsplit=1)
yield id, transcription
def _parse_transcriptions(root_dir, output_dir):
_LG.info('Parsing transcriptions')
audios = []
trn = output_dir / 'ref.trn'
txt = output_dir / 'ref.trans.txt'
with open(trn, 'w') as trn_fileobj, open(txt, 'w') as txt_fileobj:
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id, transcription in _parse_transcript(trans_file):
trn_fileobj.write(f'{transcription} ({id})\n')
txt_fileobj.write(f'{id} {transcription}\n')
audio_path = trans_dir / f'{id}.flac'
audios.append((id, audio_path))
return audios
def _load_vocab(dict_file):
tokens = ["<s>", "<pad>", "</s>", "<unk>"]
with open(dict_file, mode='r', encoding='utf-8') as fileobj:
for line in fileobj:
tokens.append(line.split()[0])
return tokens
def _count_params(model):
return sum(p.numel() for p in model.parameters())
def _load_model(model_file, dict_file):
_LG.info('Loading the model')
labels = _load_vocab(dict_file)
overrides = {'data': str(dict_file.parent)}
models, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[str(model_file)], arg_overrides=overrides
)
model = models[0].eval()
encoder = model.w2v_encoder
decoder = simple_ctc.BeamSearchDecoder(
labels,
cutoff_top_n=40,
cutoff_prob=0.8,
beam_size=100,
num_processes=1,
blank_id=0,
is_nll=True,
)
_LG.info('#parameters: %s', _count_params(encoder))
return encoder, decoder
def _decode(audios, encoder, decoder, output_dir):
trn = output_dir / 'hyp.trn'
trans = output_dir / 'hyp.trans.txt'
t_enc, t_dec, num_frames = 0.0, 0.0, 0
with open(trn, 'w') as trn_fileobj, open(trans, 'w') as txt_fileobj:
for i, (id, path) in enumerate(audios):
waveform, _ = torchaudio.load(path)
mask = torch.zeros_like(waveform)
t0 = time.monotonic()
ir = encoder(waveform, mask)['encoder_out'].transpose(1, 0)
t1 = time.monotonic()
result = decoder.decode(ir)
t2 = time.monotonic()
trn = ''.join(result.label_sequences[0][0]).replace('|', ' ')
trn_fileobj.write(f'{trn} ({id})\n')
txt_fileobj.write(f'{id} {trn}\n')
_LG.info('%d/%d: %s: %s', i, len(audios), id, trn)
num_frames += waveform.size(1)
t_enc += t1 - t0
t_dec += t2 - t1
t_audio = num_frames / 16000
_LG.info('Audio duration: %s [sec]', t_audio)
_LG.info('Encoding Time: %s [sec]', t_enc)
_LG.info('Decoding Time: %s [sec]', t_dec)
_LG.info('Total Inference Time: %s [sec]', t_enc + t_dec)
def _main():
args = _parse_args()
torch.set_num_threads(args.num_threads)
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO)
audios = _parse_transcriptions(args.root_dir, args.output_dir)
encoder, decoder = _load_model(args.model_file, args.dict_file)
_decode(audios, encoder, decoder, args.output_dir)
if __name__ == '__main__':
_main()
| [
"torch.zeros_like",
"torch.set_num_threads"
] | 1.8 | mthrok/ctcdecode | b1a30d7a65342012e0d2524d9bae1c5412b24a23 |
1.5 | import torch
from torch import nn
from torch.nn import init
from torch.utils.data import DataLoader
from overrides import overrides
import numpy as np
import time
from models.BaseModel import BaseModel
class PCAModel(BaseModel):
def __init__(self, configs: object):
super().__init__(configs.model.model_name, configs.device)
from sklearn.decomposition import PCA
self.pca_cls = PCA(n_components=30)
from sklearn.svm import SVC
self.svm_cls = SVC(kernel="rbf", probability=True, )
@overrides
def train_epoch(self, epoch_num: int, train_loader: DataLoader):
x = torch.flatten(train_loader.dataset.data, 1).numpy()
y = train_loader.dataset.targets.numpy()
self.pca_cls.fit(x, y)
x_pca = self.pca_cls.transform(x)
# print(x_pca.shape)
self.svm_cls.fit(x_pca, y)
@overrides
def test_epoch(self, epoch_num: int, test_loader: DataLoader):
x = torch.flatten(test_loader.dataset.data, 1).numpy()
y = test_loader.dataset.targets.numpy()
pca_result: np.ndarray = self.pca_cls.transform(x)
predict_score = self.svm_cls.predict(pca_result)
predict_result = predict_score
# predict_result = np.argmax(predict_score,axis=1)
# print(x.shape, predict_score.shape, predict_result.shape, y.shape)
results: np.ndarray = predict_result == y
return sum(results) / len(results)
@overrides
def run_epochs(self, epochs: int, train_loader: DataLoader, test_loader: DataLoader):
t1 = time.time()
self.train_epoch(0, train_loader)
t2 = time.time()
acc = self.test_epoch(0, test_loader)
if self.writer:
self.writer.add_scalar('test_acc', acc, 0)
print(acc, t2 - t1, time.time() - t2)
| [
"torch.flatten"
] | 1.5.0 | madcpt/MachineWontLie | 992156f3916bafeaa01a3685eae285550391132e |
1.8 | # Copyright (C) 2021, Mindee.
# This program is licensed under the Apache License version 2.
# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
from typing import Any, List, Tuple, Union
import numpy as np
import torch
from torch import nn
from doctr.models.preprocessor import PreProcessor
from ._utils import remap_preds, split_crops
__all__ = ['RecognitionPredictor']
class RecognitionPredictor(nn.Module):
"""Implements an object able to identify character sequences in images
Args:
pre_processor: transform inputs for easier batched model inference
model: core detection architecture
split_wide_crops: wether to use crop splitting for high aspect ratio crops
"""
def __init__(
self,
pre_processor: PreProcessor,
model: nn.Module,
split_wide_crops: bool = True,
) -> None:
super().__init__()
self.pre_processor = pre_processor
self.model = model.eval()
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
self.target_ar = 4 # Target aspect ratio
@torch.no_grad()
def forward(
self,
crops: List[Union[np.ndarray, torch.Tensor]],
**kwargs: Any,
) -> List[Tuple[str, float]]:
if len(crops) == 0:
return []
# Dimension check
if any(crop.ndim != 3 for crop in crops):
raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
# Split crops that are too wide
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(
crops,
self.critical_ar,
self.target_ar,
self.dil_factor,
isinstance(crops[0], np.ndarray)
)
if remapped:
crops = new_crops
# Resize & batch them
processed_batches = self.pre_processor(crops)
# Forward it
raw = [
self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]
for batch in processed_batches
]
# Process outputs
out = [charseq for batch in raw for charseq in batch]
# Remap crops
if self.split_wide_crops and remapped:
out = remap_preds(out, crop_map, self.dil_factor)
return out
| [
"torch.no_grad"
] | 1.8.0 | thentgesMindee/doctr | f97e92ba1b7bcb785a60f2cf549f13f88e510609 |
0.4 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
def convbn(in_planes, out_planes, kernel_size, stride, pad, dilation):
return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_planes)
)
def convbn_3d(in_planes, out_planes, kernel_size, stride, pad):
return nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, padding=pad, stride=stride, bias=False),
nn.BatchNorm3d(out_planes)
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, kernel_size, stride, downsample, pad, dilation):
super(BasicBlock, self).__init__()
self.conv1 = nn.Sequential(convbn(inplanes, planes, kernel_size, stride, pad, dilation),
nn.ReLU(inplace=True)
)
self.conv2 = convbn(planes, planes, kernel_size, 1, pad, dilation)
self.stride = stride
self.downsample = downsample
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
if self.downsample is not None:
x = self.downsample(x)
out += x
return out
class matchshifted(nn.Module):
def __init__(self):
super(matchshifted, self).__init__()
def forward(self, left, right, shift):
batch, filters, height, width = left.size()
shifted_left = F.pad(torch.index_select(left, 3, Variable(torch.LongTensor([i for i in range(shift, width)])).cuda()), (shift, 0, 0, 0))
shifted_right = F.pad(torch.index_select(right, 3, Variable(torch.LongTensor([i for i in range(width-shift)])).cuda()), (shift, 0, 0, 0))
out = torch.cat((shifted_left, shifted_right), 1).view(batch, filters*2, 1, height, width)
return out
class disparityregression(nn.Module):
def __init__(self, maxdisp):
super(disparityregression, self).__init__()
self.disp = Variable(torch.Tensor(np.reshape(np.array(range(maxdisp)), [1, maxdisp, 1, 1])).cuda(), requires_grad=False)
def forward(self, x):
disp = self.disp.repeat(x.size()[0], 1, x.size()[2], x.size()[3])
out = torch.sum(x*disp, 1)
return out
class feature_extraction(nn.Module):
def __init__(self):
super(feature_extraction, self).__init__()
self.inplanes = 32
self.layer0 = nn.Sequential(convbn(in_planes=3, out_planes=32, kernel_size=3, stride=1, pad=1, dilation=1),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_layer(block=BasicBlock, planes=32, blocks=3, kernel_size=3, stride=2, pad=1, dilation=1, order=1)
self.layer2 = self._make_layer(BasicBlock, 64, 8, 3, 2, 1, 1, 1)
self.layer3 = self._make_layer(BasicBlock, 128, 3, 3, 2, 1, 1, 2)
self.layer1_after = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer2_after = nn.Sequential(convbn(32, 64, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer3_after = nn.Sequential(convbn(64, 128, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.layer1_final = nn.Sequential(convbn(32, 32, 3, 2, 1, 1),
nn.ReLU(inplace=True))
self.dilat1 = nn.Sequential(convbn(128, 32, 3, 1, 1, 32),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat2 = nn.Sequential(convbn(128, 32, 3, 1, 1, 16),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.dilat3 = nn.Sequential(convbn(128, 32, 3, 1, 1, 8),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 4),
nn.ReLU(inplace=True))
self.dilat4 = nn.Sequential(convbn(128, 32, 3, 1, 1, 6),
nn.ReLU(inplace=True),
convbn(32, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True))
self.branch1 = nn.Sequential(nn.AvgPool2d((64, 64), stride=(64, 64)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch2 = nn.Sequential(nn.AvgPool2d((32, 32), stride=(32, 32)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch3 = nn.Sequential(nn.AvgPool2d((16, 16), stride=(16, 16)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.branch4 = nn.Sequential(nn.AvgPool2d((8, 8), stride=(8, 8)),
convbn(128, 32, 1, 1, 0, 1),
nn.ReLU(inplace=True))
self.concat_dilate_pool = nn.Sequential(convbn(64, 32, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False))
self.lastconv = nn.Sequential(convbn(352, 128, 3, 1, 1, 1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 32, kernel_size=1, padding=0, stride=1, bias=False))
def _make_layer(self, block, planes, blocks, kernel_size, stride, pad, dilation, order):
downsample = None
if stride != 1:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes * order, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),)
layers = []
layers.append(block(self.inplanes*order, planes, kernel_size, stride, downsample, pad, dilation))
if blocks != 1:
for i in range(1, blocks):
layers.append(block(planes, planes, kernel_size, 1, None, pad, dilation))
return nn.Sequential(*layers)
def forward(self, x):
out_0 = self.layer0(x)
out_1 = self.layer1(out_0)
out_1_a = self.layer1_after(out_0)
out_1 = out_1 + out_1_a
out_2 = self.layer2(out_1)
out_2_a = self.layer2_after(out_1)
out_2 = out_2 + out_2_a
out_3 = self.layer3(out_2)
out_3_a = self.layer3_after(out_2)
out_3 = out_3 + out_3_a
out_1 = self.layer1_final(out_1)
inPooling = F.upsample(out_3, (out_2.size()[2], out_2.size()[3]), mode='bilinear')
#Pooling
output_dilate1 = self.dilat1(inPooling)
output_dilate2 = self.dilat2(inPooling)
output_dilate3 = self.dilat3(inPooling)
output_dilate4 = self.dilat4(inPooling)
output_branch1 = self.branch1(inPooling)
output_branch1 = F.upsample(output_branch1, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch2 = self.branch2(inPooling)
output_branch2 = F.upsample(output_branch2, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch3 = self.branch3(inPooling)
output_branch3 = F.upsample(output_branch3, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
output_branch4 = self.branch4(inPooling)
output_branch4 = F.upsample(output_branch4, (inPooling.size()[2], inPooling.size()[3]), mode='bilinear')
#concat dilate and avgpool
out_fusion1 = torch.cat((output_dilate1, output_branch1), 1)
out_fusion1 = self.concat_dilate_pool(out_fusion1)
out_fusion2 = torch.cat((output_dilate2, output_branch2), 1)
out_fusion2 = self.concat_dilate_pool(out_fusion2)
out_fusion3 = torch.cat((output_dilate3, output_branch3), 1)
out_fusion3 = self.concat_dilate_pool(out_fusion3)
out_fusion4 = torch.cat((output_dilate4, output_branch4), 1)
out_fusion4 = self.concat_dilate_pool(out_fusion4)
output_feature = torch.cat((out_1, out_2, inPooling, out_fusion1, out_fusion2, out_fusion3, out_fusion4), 1)
output_feature = self.lastconv(output_feature)
return output_feature
| [
"torch.cat",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Conv3d",
"torch.nn.BatchNorm3d",
"torch.sum"
] | 0.4.0 | wangqingyu985/OpenStereo | 91d605357d65281b99b0d8cf45e3f15f0543c9fa |
1.1 | import signal
import time
from typing import Any, Callable
import torch
from easydict import EasyDict
from .time_helper_base import TimeWrapper
from .time_helper_cuda import get_cuda_time_wrapper
def build_time_helper(cfg: EasyDict = None, wrapper_type: str = None) -> Callable[[], 'TimeWrapper']:
r"""
Overview:
Build the timehelper
Arguments:
- cfg (:obj:`dict`):
The config file, which is a multilevel dict, have large domain like
evaluate, common, model, train etc, and each large domain
has it's smaller domain.
- wrapper_type (:obj:`str`): The type of wrapper returned, support ``['time', 'cuda']``
Returns:
- time_wrapper (:obj:`TimeWrapper`):
Return the corresponding timewrapper, Reference: ``ding.utils.timehelper.TimeWrapperTime``
and ``ding.utils.timehelper.get_cuda_time_wrapper``.
"""
# Note: wrapper_type has higher priority
if wrapper_type is not None:
time_wrapper_type = wrapper_type
elif cfg is not None:
time_wrapper_type = cfg.common.time_wrapper_type
else:
raise RuntimeError('Either wrapper_type or cfg should be provided.')
if time_wrapper_type == 'time':
return TimeWrapperTime
elif time_wrapper_type == 'cuda':
if torch.cuda.is_available():
# lazy initialize to make code runnable locally
return get_cuda_time_wrapper()
else:
return TimeWrapperTime
else:
raise KeyError('invalid time_wrapper_type: {}'.format(time_wrapper_type))
class EasyTimer:
r"""
Overview:
A decent timer wrapper that can be used easily.
Interface:
``__init__``, ``__enter__``, ``__exit__``
Example:
>>> wait_timer = EasyTimer()
>>> with wait_timer:
>>> func(...)
>>> time_ = wait_timer.value # in second
"""
def __init__(self, cuda=True):
r"""
Overview:
Init class EasyTimer
Arguments:
- cuda (:obj:`bool`): Whether to build timer with cuda type
"""
if torch.cuda.is_available() and cuda:
time_wrapper_type = "cuda"
else:
time_wrapper_type = "time"
self._timer = build_time_helper(wrapper_type=time_wrapper_type)
self.value = 0.0
def __enter__(self):
r"""
Overview:
Enter timer, start timing
"""
self.value = 0.0
self._timer.start_time()
def __exit__(self, *args):
r"""
Overview:
Exit timer, stop timing
"""
self.value = self._timer.end_time()
class TimeWrapperTime(TimeWrapper):
r"""
Overview:
A class method that inherit from ``TimeWrapper`` class
Interface:
``start_time``, ``end_time``
"""
# overwrite
@classmethod
def start_time(cls):
r"""
Overview:
Implement and overide the ``start_time`` method in ``TimeWrapper`` class
"""
cls.start = time.time()
# overwrite
@classmethod
def end_time(cls):
r"""
Overview:
Implement and overide the end_time method in ``TimeWrapper`` class
Returns:
- time(:obj:`float`): The time between ``start_time`` and end_time
"""
cls.end = time.time()
return cls.end - cls.start
class WatchDog(object):
"""
Overview:
Simple watchdog timer to detect timeouts
Arguments:
- timeout (:obj:`int`): Timeout value of the ``watchdog [seconds]``.
.. note::
If it is not reset before exceeding this value, ``TimeourError`` raised.
Interface:
``start``, ``stop``
Examples:
>>> watchdog = WatchDog(x) # x is a timeout value
>>> ...
>>> watchdog.start()
>>> ... # Some function
"""
def __init__(self, timeout: int = 1):
self._timeout = timeout + 1
self._failed = False
def start(self):
r"""
Overview:
Start watchdog.
"""
signal.signal(signal.SIGALRM, self._event)
signal.alarm(self._timeout)
@staticmethod
def _event(signum: Any, frame: Any):
raise TimeoutError()
def stop(self):
r"""
Overview:
Stop watchdog with ``alarm(0)``, ``SIGALRM``, and ``SIG_DFL`` signals.
"""
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
| [
"torch.cuda.is_available"
] | 1.1.0 | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.