repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
cwn | cwn-main/exp/run_exp.py | import os
import numpy as np
import copy
import pickle
import torch
import torch.optim as optim
import random
from data.data_loading import DataLoader, load_dataset, load_graph_dataset
from torch_geometric.data import DataLoader as PyGDataLoader
from exp.train_utils import train, eval, Evaluator
from exp.parser import get_parser, validate_args
from mp.graph_models import GIN0, GINWithJK
from mp.models import CIN0, Dummy, SparseCIN, CINpp, EdgeOrient, EdgeMPNN, MessagePassingAgnostic
from mp.molec_models import EmbedSparseCIN, EmbedCINpp, OGBEmbedSparseCIN, OGBEmbedCINpp, EmbedSparseCINNoRings, EmbedGIN
from mp.ring_exp_models import RingSparseCIN, RingGIN
def main(args):
"""The common training and evaluation script used by all the experiments."""
# set device
device = torch.device(
"cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu")
print("==========================================================")
print("Using device", str(device))
print(f"Fold: {args.fold}")
print(f"Seed: {args.seed}")
print("======================== Args ===========================")
print(args)
print("===================================================")
# Set the seed for everything
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
# Set double precision for SR experiments
if args.task_type == 'isomorphism':
assert args.dataset.startswith('sr')
torch.set_default_dtype(torch.float64)
# Create results folder
result_folder = os.path.join(
args.result_folder, f'{args.dataset}-{args.exp_name}', f'seed-{args.seed}')
if args.fold is not None:
result_folder = os.path.join(result_folder, f'fold-{args.fold}')
if not os.path.exists(result_folder):
os.makedirs(result_folder)
filename = os.path.join(result_folder, 'results.txt')
if args.model.startswith('gin'): # load graph dataset
graph_list, train_ids, val_ids, test_ids, num_classes = load_graph_dataset(
args.dataset, fold=args.fold, max_ring_size=args.max_ring_size)
train_graphs = [graph_list[i] for i in train_ids]
val_graphs = [graph_list[i] for i in val_ids]
train_loader = PyGDataLoader(train_graphs, batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers)
valid_loader = PyGDataLoader(val_graphs, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers)
if test_ids is not None:
test_graphs = [graph_list[i] for i in test_ids]
test_loader = PyGDataLoader(test_graphs, batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers)
else:
test_loader = None
if args.dataset.startswith('sr'):
num_features = 1
num_classes = args.emb_dim
else:
num_features = graph_list[0].x.shape[1]
else:
# Data loading
dataset = load_dataset(args.dataset, max_dim=args.max_dim, fold=args.fold,
init_method=args.init_method, emb_dim=args.emb_dim,
flow_points=args.flow_points, flow_classes=args.flow_classes,
max_ring_size=args.max_ring_size,
use_edge_features=args.use_edge_features,
include_down_adj=args.include_down_adj,
simple_features=args.simple_features, n_jobs=args.preproc_jobs,
train_orient=args.train_orient, test_orient=args.test_orient)
if args.tune:
split_idx = dataset.get_tune_idx_split()
else:
split_idx = dataset.get_idx_split()
# Instantiate data loaders
train_loader = DataLoader(dataset.get_split('train'), batch_size=args.batch_size,
shuffle=True, num_workers=args.num_workers, max_dim=dataset.max_dim)
valid_loader = DataLoader(dataset.get_split('valid'), batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, max_dim=dataset.max_dim)
test_split = split_idx.get("test", None)
test_loader = None
if test_split is not None:
test_loader = DataLoader(dataset.get_split('test'), batch_size=args.batch_size,
shuffle=False, num_workers=args.num_workers, max_dim=dataset.max_dim)
# Automatic evaluator, takes dataset name as input
evaluator = Evaluator(args.eval_metric, eps=args.iso_eps)
# Use coboundaries?
use_coboundaries = args.use_coboundaries.lower() == 'true'
# Readout dimensions
readout_dims = tuple(sorted(args.readout_dims))
# Instantiate model
# NB: here we assume to have the same number of features per dim
if args.model == 'cin':
model = CIN0(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'sparse_cin':
model = SparseCIN(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'cin++':
model = CINpp(dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'ring_sparse_cin':
model = RingSparseCIN(
dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
max_dim=dataset.max_dim, # max_dim
nonlinearity=args.nonlinearity, # nonlinearity
use_coboundaries=use_coboundaries, # whether to use coboundaries in up-msg
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'gin':
model = GIN0(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'gin_ring':
model = RingGIN(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
nonlinearity=args.nonlinearity, # nonlinearity
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'gin_jk':
model = GINWithJK(num_features, # num_input_features
args.num_layers, # num_layers
args.emb_dim, # hidden
num_classes, # num_classes
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'mp_agnostic':
model = MessagePassingAgnostic(
dataset.num_features_in_dim(0), # num_input_features
dataset.num_classes, # num_classes
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
).to(device)
elif args.model == 'dummy':
model = Dummy(dataset.num_features_in_dim(0),
dataset.num_classes,
args.num_layers,
max_dim=dataset.max_dim,
readout=args.readout,
).to(device)
elif args.model == 'edge_orient':
model = EdgeOrient(1,
dataset.num_classes,
args.num_layers,
args.emb_dim, # hidden
readout=args.readout,
nonlinearity=args.nonlinearity, # nonlinearity
dropout_rate=args.drop_rate, # dropout rate
fully_invar=args.fully_orient_invar
).to(device)
elif args.model == 'edge_mpnn':
model = EdgeMPNN(1,
dataset.num_classes,
args.num_layers,
args.emb_dim, # hidden
readout=args.readout,
nonlinearity=args.nonlinearity, # nonlinearity
dropout_rate=args.drop_rate, # dropout rate
fully_invar=args.fully_orient_invar,
).to(device)
elif args.model == 'embed_sparse_cin':
model = EmbedSparseCIN(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'embed_cin++':
model = EmbedCINpp(atom_types=dataset.num_node_type, # The number of atomic types
bond_types=dataset.num_edge_type, # The number of bond types
out_size=dataset.num_classes, # num_classes
num_layers=args.num_layers, # num_layers
hidden=args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'embed_sparse_cin_no_rings':
model = EmbedSparseCINNoRings(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries,
embed_edge=args.use_edge_features,
graph_norm=args.graph_norm, # normalization layer
).to(device)
elif args.model == 'embed_gin':
model = EmbedGIN(dataset.num_node_type, # The number of atomic types
dataset.num_edge_type, # The number of bond types
dataset.num_classes, # num_classes
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout rate
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
apply_dropout_before=args.drop_position, # where to apply dropout
embed_edge=args.use_edge_features,
).to(device)
# TODO: handle this as above
elif args.model == 'ogb_embed_sparse_cin':
model = OGBEmbedSparseCIN(dataset.num_tasks, # out_size
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout_rate
indropout_rate=args.indrop_rate, # in-dropout_rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump_mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries
embed_edge=args.use_edge_features, # whether to use edge feats
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
elif args.model == 'ogb_embed_cin++':
model = OGBEmbedCINpp(dataset.num_tasks, # out_size
args.num_layers, # num_layers
args.emb_dim, # hidden
dropout_rate=args.drop_rate, # dropout_rate
indropout_rate=args.indrop_rate, # in-dropout_rate
max_dim=dataset.max_dim, # max_dim
jump_mode=args.jump_mode, # jump_mode
nonlinearity=args.nonlinearity, # nonlinearity
readout=args.readout, # readout
final_readout=args.final_readout, # final readout
apply_dropout_before=args.drop_position, # where to apply dropout
use_coboundaries=use_coboundaries, # whether to use coboundaries
embed_edge=args.use_edge_features, # whether to use edge feats
graph_norm=args.graph_norm, # normalization layer
readout_dims=readout_dims # readout_dims
).to(device)
else:
raise ValueError('Invalid model type {}.'.format(args.model))
print("============= Model Parameters =================")
trainable_params = 0
total_params = 0
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.size())
trainable_params += param.numel()
total_params += param.numel()
print("============= Params stats ==================")
print(f"Trainable params: {trainable_params}")
print(f"Total params : {total_params}")
# instantiate optimiser
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# instantiate learning rate decay
if args.lr_scheduler == 'ReduceLROnPlateau':
mode = 'min' if args.minimize else 'max'
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=mode,
factor=args.lr_scheduler_decay_rate,
patience=args.lr_scheduler_patience,
verbose=True)
elif args.lr_scheduler == 'StepLR':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_scheduler_decay_steps,
gamma=args.lr_scheduler_decay_rate)
elif args.lr_scheduler == 'None':
scheduler = None
else:
raise NotImplementedError(f'Scheduler {args.lr_scheduler} is not currently supported.')
# (!) start training/evaluation
best_val_epoch = 0
valid_curve = []
test_curve = []
train_curve = []
train_loss_curve = []
params = []
if not args.untrained:
for epoch in range(1, args.epochs + 1):
# perform one epoch
print("=====Epoch {}".format(epoch))
print('Training...')
epoch_train_curve = train(model, device, train_loader, optimizer, args.task_type)
train_loss_curve += epoch_train_curve
epoch_train_loss = float(np.mean(epoch_train_curve))
# evaluate model
print('Evaluating...')
if epoch == 1 or epoch % args.train_eval_period == 0:
train_perf, _ = eval(model, device, train_loader, evaluator, args.task_type)
train_curve.append(train_perf)
valid_perf, epoch_val_loss = eval(model, device,
valid_loader, evaluator, args.task_type)#, dataset[split_idx["valid"]])
valid_curve.append(valid_perf)
if test_loader is not None:
test_perf, epoch_test_loss = eval(model, device, test_loader, evaluator,
args.task_type)
else:
test_perf = np.nan
epoch_test_loss = np.nan
test_curve.append(test_perf)
print(f'Train: {train_perf:.3f} | Validation: {valid_perf:.3f} | Test: {test_perf:.3f}'
f' | Train Loss {epoch_train_loss:.3f} | Val Loss {epoch_val_loss:.3f}'
f' | Test Loss {epoch_test_loss:.3f}')
# decay learning rate
if scheduler is not None:
if args.lr_scheduler == 'ReduceLROnPlateau':
scheduler.step(valid_perf)
# We use a strict inequality here like in the benchmarking GNNs paper code
# https://github.com/graphdeeplearning/benchmarking-gnns/blob/master/main_molecules_graph_regression.py#L217
if args.early_stop and optimizer.param_groups[0]['lr'] < args.lr_scheduler_min:
print("\n!! The minimum learning rate has been reached.")
break
else:
scheduler.step()
i = 0
new_params = []
if epoch % args.train_eval_period == 0:
print("====== Slowly changing params ======= ")
for name, param in model.named_parameters():
# print(f"Param {name}: {param.data.view(-1)[0]}")
# new_params.append(param.data.detach().clone().view(-1)[0])
new_params.append(param.data.detach().mean().item())
if len(params) > 0 and epoch % args.train_eval_period == 0:
if abs(params[i] - new_params[i]) < 1e-6:
print(f"Param {name}: {params[i] - new_params[i]}")
i += 1
params = copy.copy(new_params)
if not args.minimize:
best_val_epoch = np.argmax(np.array(valid_curve))
else:
best_val_epoch = np.argmin(np.array(valid_curve))
else:
train_loss_curve.append(np.nan)
train_curve.append(np.nan)
valid_curve.append(np.nan)
test_curve.append(np.nan)
print('Final Evaluation...')
final_train_perf = np.nan
final_val_perf = np.nan
final_test_perf = np.nan
if not args.dataset.startswith('sr'):
final_train_perf, _ = eval(model, device, train_loader, evaluator, args.task_type)
final_val_perf, _ = eval(model, device, valid_loader, evaluator, args.task_type)
if test_loader is not None:
final_test_perf, _ = eval(model, device, test_loader, evaluator, args.task_type)
# save results
curves = {
'train_loss': train_loss_curve,
'train': train_curve,
'val': valid_curve,
'test': test_curve,
'last_val': final_val_perf,
'last_test': final_test_perf,
'last_train': final_train_perf,
'best': best_val_epoch}
msg = (
f'========== Result ============\n'
f'Dataset: {args.dataset}\n'
f'------------ Best epoch -----------\n'
f'Train: {train_curve[best_val_epoch]}\n'
f'Validation: {valid_curve[best_val_epoch]}\n'
f'Test: {test_curve[best_val_epoch]}\n'
f'Best epoch: {best_val_epoch}\n'
'------------ Last epoch -----------\n'
f'Train: {final_train_perf}\n'
f'Validation: {final_val_perf}\n'
f'Test: {final_test_perf}\n'
'-------------------------------\n\n')
print(msg)
msg += str(args)
with open(filename, 'w') as handle:
handle.write(msg)
if args.dump_curves:
with open(os.path.join(result_folder, 'curves.pkl'), 'wb') as handle:
pickle.dump(curves, handle)
return curves
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
validate_args(args)
main(args)
| 26,286 | 52.977413 | 128 | py |
cwn | cwn-main/exp/count_rings.py | import sys
import numpy as np
import argparse
import time
from data.parallel import ProgressParallel
from data.data_loading import load_graph_dataset
from data.utils import get_rings
from joblib import delayed
parser = argparse.ArgumentParser(description='Ring counting experiment.')
parser.add_argument('--dataset', type=str, default="ZINC",
help='dataset name (default: ZINC)')
parser.add_argument('--n_jobs', type=int, default=4,
help='Number of jobs to use')
parser.add_argument('--max_ring_size', type=int, default=12,
help='maximum ring size to look for')
def get_ring_count_for_graph(edge_index, max_ring, keys):
rings = get_rings(edge_index, max_k=max_ring)
rings_per_graph = {key: 0 for key in keys}
for ring in rings:
k = len(ring)
rings_per_graph[k] += 1
return rings_per_graph
def combine_all_cards(*cards):
keys = cards[0].keys()
ring_cards = {key: [] for key in keys}
for card in cards:
for k in keys:
ring_cards[k].append(card[k])
return ring_cards
def get_ring_counts(dataset, max_ring, jobs):
start = time.time()
keys = list(range(3, max_ring+1))
parallel = ProgressParallel(n_jobs=jobs, use_tqdm=True, total=len(dataset))
# It is important we supply a numpy array here. tensors seem to slow joblib down significantly.
cards = parallel(delayed(get_ring_count_for_graph)(
graph.edge_index.numpy(), max_ring, keys) for graph in dataset)
end = time.time()
print(f'Done ({end - start:.2f} secs).')
return combine_all_cards(*cards)
def combine_all_counts(*stats):
all_stats = dict()
for k in stats[0].keys():
all_stats[k] = []
for stat in stats:
for k, v in stat.items():
# Extend the list
all_stats[k] += v
return all_stats
def print_stats(stats):
for k in stats:
min = np.min(stats[k])
max = np.max(stats[k])
mean = np.mean(stats[k])
med = np.median(stats[k])
sum = np.sum(stats[k])
nz = np.count_nonzero(stats[k])
print(
f'Ring {k:02d} => Min: {min:.3f}, Max: {max:.3f}, Mean:{mean:.3f}, Median: {med:.3f}, '
f'Sum: {sum:05d}, Non-zero: {nz:05d}')
def exp_main(passed_args):
args = parser.parse_args(passed_args)
print('----==== {} ====----'.format(args.dataset))
graph_list, train_ids, val_ids, test_ids, _ = load_graph_dataset(args.dataset)
graph_list = list(graph_list) # Needed to bring OGB in the right format
train = [graph_list[i] for i in train_ids]
val = [graph_list[i] for i in val_ids]
test = None
if test_ids is not None:
test = [graph_list[i] for i in test_ids]
print("Counting rings on the training set ....")
print("First, it will take a while to set up the processes...")
train_stats = get_ring_counts(train, args.max_ring_size, args.n_jobs)
print("Counting rings on the validation set ....")
val_stats = get_ring_counts(val, args.max_ring_size, args.n_jobs)
test_stats = None
if test is not None:
print("Counting rings on the test set ....")
test_stats = get_ring_counts(test, args.max_ring_size, args.n_jobs)
all_stats = combine_all_counts(train_stats, val_stats, test_stats)
else:
all_stats = combine_all_counts(train_stats, val_stats)
print("=============== Train ================")
print_stats(train_stats)
print("=============== Validation ================")
print_stats(val_stats)
if test is not None:
print("=============== Test ================")
print_stats(test_stats)
print("=============== Whole Dataset ================")
print_stats(all_stats)
if __name__ == "__main__":
passed_args = sys.argv[1:]
exp_main(passed_args)
| 3,857 | 31.15 | 99 | py |
cwn | cwn-main/exp/prepare_sr_tests.py | import os
import sys
import pickle
from data.data_loading import load_dataset, load_graph_dataset
from data.perm_utils import permute_graph, generate_permutation_matrices
from definitions import ROOT_DIR
__families__ = [
'sr16622',
'sr251256',
'sr261034',
'sr281264',
'sr291467',
'sr351668',
'sr351899',
'sr361446',
'sr401224'
]
def prepare(family, jobs, max_ring_size, permute, init, seed):
root = os.path.join(ROOT_DIR, 'datasets')
raw_dir = os.path.join(root, 'SR_graphs', 'raw')
_ = load_dataset(family, max_dim=2, max_ring_size=max_ring_size, n_jobs=jobs, init_method=init)
if permute:
graphs, _, _, _, _ = load_graph_dataset(family)
permuted_graphs = list()
for graph in graphs:
perm = generate_permutation_matrices(graph.num_nodes, 1, seed=seed)[0]
permuted_graph = permute_graph(graph, perm)
permuted_graphs.append((permuted_graph.edge_index, permuted_graph.num_nodes))
with open(os.path.join(raw_dir, f'{family}p{seed}.pkl'), 'wb') as handle:
pickle.dump(permuted_graphs, handle)
_ = load_dataset(f'{family}p{seed}', max_dim=2, max_ring_size=max_ring_size, n_jobs=jobs, init_method=init)
if __name__ == "__main__":
# Standard args
passed_args = sys.argv[1:]
jobs = int(passed_args[0])
max_ring_size = int(passed_args[1])
permute = passed_args[2].lower()
init_method = passed_args[3].lower()
assert max_ring_size > 3
# Execute
for family in __families__:
print('\n==============================================================')
print(f'[i] Preprocessing on family {family}...')
prepare(family, jobs, max_ring_size, permute=='y', init_method, 43)
| 1,765 | 33.627451 | 115 | py |
cwn | cwn-main/exp/prepare_tu_tuning.py | import sys
import yaml
from data.data_loading import load_dataset
if __name__ == "__main__":
# standard args
passed_args = sys.argv[1:]
conf_path = passed_args[0]
# parse grid from yaml
with open(conf_path, 'r') as handle:
conf = yaml.safe_load(handle)
dataset = conf['dataset']
max_dims = conf['max_dim']
max_ring_sizes = conf['max_ring_size']
init_methods = conf['init_method']
# build datasets is not present
for max_dim in max_dims:
for max_ring_size in max_ring_sizes:
for init in init_methods:
_ = load_dataset(dataset, max_dim=max_dim, init_method=init, max_ring_size=max_ring_size)
| 694 | 27.958333 | 105 | py |
cwn | cwn-main/exp/run_sr_exp.py | import os
import sys
import copy
import time
import numpy as np
import subprocess
from definitions import ROOT_DIR
from exp.parser import get_parser
from exp.run_exp import main
# python3 -m exp.run_sr_exp --task_type isomorphism --eval_metric isomorphism --untrained --model sparse_cin --nonlinearity id --emb_dim 16 --readout sum --num_layers 5
# python3 -m exp.run_sr_exp --task_type isomorphism --eval_metric isomorphism --untrained --model gin --nonlinearity id --emb_dim 16 --readout sum --num_layers 5
#--jump_mode None
__families__ = [
'sr16622',
'sr251256',
'sr261034',
'sr281264',
'sr291467',
'sr351668',
'sr351899',
'sr361446',
'sr401224'
]
__max_dim__ = [
3,
4,
3,
6,
4,
4,
6,
3,
3]
if __name__ == "__main__":
# Extract the commit sha so we can check the code that was used for each experiment
sha = subprocess.check_output(["git", "describe", "--always"]).strip().decode()
# standard args
passed_args = sys.argv[1:]
assert '--seed' not in passed_args
assert '--dataset' not in passed_args
assert '--readout_dims' not in passed_args
parser = get_parser()
args = parser.parse_args(copy.copy(passed_args))
# set result folder
folder_name = f'SR-{args.exp_name}'
if '--max_ring_size' in passed_args:
folder_name += f'-{args.max_ring_size}'
result_folder = os.path.join(args.result_folder, folder_name)
passed_args += ['--result_folder', result_folder]
# run each experiment separately and gather results
results = [list() for _ in __families__]
for f, family in enumerate(__families__):
for seed in range(args.start_seed, args.stop_seed + 1):
print(f'[i] family {family}, seed {seed}')
current_args = copy.copy(passed_args) + ['--dataset', family, '--seed', str(seed)]
if '--max_dim' not in passed_args:
if '--max_ring_size' not in passed_args:
current_args += ['--max_dim', str(__max_dim__[f])]
max_dim = __max_dim__[f]
else:
current_args += ['--max_dim', str(2)]
max_dim = 2
else:
assert '--max_ring_size' not in passed_args
max_dim = args.max_dim
readout_dims = [str(i) for i in range(max_dim + 1)]
readout_dims = ['--readout_dims'] + readout_dims
current_args += readout_dims
parsed_args = parser.parse_args(current_args)
curves = main(parsed_args)
results[f].append(curves)
msg = (
f"========= Final result ==========\n"
f'Datasets: SR\n'
f'SHA: {sha}\n')
for f, family in enumerate(__families__):
curves = results[f]
test_perfs = [curve['last_test'] for curve in curves]
assert len(test_perfs) == args.stop_seed + 1 - args.start_seed
mean = np.mean(test_perfs)
std_err = np.std(test_perfs) / float(len(test_perfs))
minim = np.min(test_perfs)
maxim = np.max(test_perfs)
msg += (
f'------------------ {family} ------------------\n'
f'Mean failure rate: {mean}\n'
f'StdErr failure rate: {std_err}\n'
f'Min failure rate: {minim}\n'
f'Max failure rate: {maxim}\n'
'-----------------------------------------------\n')
print(msg)
# additionally write msg and configuration on file
msg += str(args)
filename = os.path.join(result_folder, 'result.txt')
print('Writing results at: {}'.format(filename))
with open(filename, 'w') as handle:
handle.write(msg)
| 3,774 | 33.633028 | 168 | py |
cwn | cwn-main/exp/run_mol_exp.py | import sys
import os
import copy
import numpy as np
import subprocess
from exp.parser import get_parser
from exp.run_exp import main
from itertools import product
def exp_main(passed_args):
# Extract the commit sha so we can check the code that was used for each experiment
sha = subprocess.check_output(["git", "describe", "--always"]).strip().decode()
parser = get_parser()
args = parser.parse_args(copy.copy(passed_args))
assert args.stop_seed >= args.start_seed
# run each experiment separately and gather results
results = list()
if args.folds is None:
for seed in range(args.start_seed, args.stop_seed + 1):
current_args = copy.copy(passed_args) + ['--seed', str(seed)]
parsed_args = parser.parse_args(current_args)
curves = main(parsed_args)
results.append(curves)
else:
# Used by CSL only to run experiments across both seeds and folds
assert args.dataset == 'CSL'
for seed, fold in product(range(args.start_seed, args.stop_seed + 1), range(args.folds)):
current_args = copy.copy(passed_args) + ['--seed', str(seed)] + ['--fold', str(fold)]
parsed_args = parser.parse_args(current_args)
curves = main(parsed_args)
results.append(curves)
# Extract results
train_curves = [curves['train'] for curves in results]
val_curves = [curves['val'] for curves in results]
test_curves = [curves['test'] for curves in results]
best_idx = [curves['best'] for curves in results]
last_train = [curves['last_train'] for curves in results]
last_val = [curves['last_val'] for curves in results]
last_test = [curves['last_test'] for curves in results]
# Extract results at the best validation epoch.
best_epoch_train_results = [train_curves[i][best] for i, best in enumerate(best_idx)]
best_epoch_train_results = np.array(best_epoch_train_results, dtype=np.float)
best_epoch_val_results = [val_curves[i][best] for i, best in enumerate(best_idx)]
best_epoch_val_results = np.array(best_epoch_val_results, dtype=np.float)
best_epoch_test_results = [test_curves[i][best] for i, best in enumerate(best_idx)]
best_epoch_test_results = np.array(best_epoch_test_results, dtype=np.float)
# Compute stats for the best validation epoch
mean_train_perf = np.mean(best_epoch_train_results)
std_train_perf = np.std(best_epoch_train_results, ddof=1) # ddof=1 makes the estimator unbiased
mean_val_perf = np.mean(best_epoch_val_results)
std_val_perf = np.std(best_epoch_val_results, ddof=1) # ddof=1 makes the estimator unbiased
mean_test_perf = np.mean(best_epoch_test_results)
std_test_perf = np.std(best_epoch_test_results, ddof=1) # ddof=1 makes the estimator unbiased
min_perf = np.min(best_epoch_test_results)
max_perf = np.max(best_epoch_test_results)
# Compute stats for the last epoch
mean_final_train_perf = np.mean(last_train)
std_final_train_perf = np.std(last_train, ddof=1)
mean_final_val_perf = np.mean(last_val)
std_final_val_perf = np.std(last_val, ddof=1)
mean_final_test_perf = np.mean(last_test)
std_final_test_perf = np.std(last_test, ddof=1)
final_test_min = np.min(last_test)
final_test_max = np.max(last_test)
msg = (
f"========= Final result ==========\n"
f'Dataset: {args.dataset}\n'
f'SHA: {sha}\n'
f'----------- Best epoch ----------\n'
f'Train: {mean_train_perf} ± {std_train_perf}\n'
f'Valid: {mean_val_perf} ± {std_val_perf}\n'
f'Test: {mean_test_perf} ± {std_test_perf}\n'
f'Test Min: {min_perf}\n'
f'Test Max: {max_perf}\n'
f'----------- Last epoch ----------\n'
f'Train: {mean_final_train_perf} ± {std_final_train_perf}\n'
f'Valid: {mean_final_val_perf} ± {std_final_val_perf}\n'
f'Test: {mean_final_test_perf} ± {std_final_test_perf}\n'
f'Test Min: {final_test_min}\n'
f'Test Max: {final_test_max}\n'
f'---------------------------------\n\n')
print(msg)
# additionally write msg and configuration on file
msg += str(args)
filename = os.path.join(args.result_folder, f'{args.dataset}-{args.exp_name}/result.txt')
print('Writing results at: {}'.format(filename))
with open(filename, 'w') as handle:
handle.write(msg)
if __name__ == "__main__":
passed_args = sys.argv[1:]
assert '--seed' not in passed_args
assert '--fold' not in passed_args
exp_main(passed_args)
| 4,759 | 43.90566 | 100 | py |
cwn | cwn-main/exp/train_utils.py | import os
import torch
import numpy as np
import logging
from tqdm import tqdm
from sklearn import metrics as met
from data.complex import ComplexBatch
from ogb.graphproppred import Evaluator as OGBEvaluator
cls_criterion = torch.nn.CrossEntropyLoss()
bicls_criterion = torch.nn.BCEWithLogitsLoss()
reg_criterion = torch.nn.L1Loss()
msereg_criterion = torch.nn.MSELoss()
def train(model, device, loader, optimizer, task_type='classification', ignore_unlabeled=False):
"""
Performs one training epoch, i.e. one optimization pass over the batches of a data loader.
"""
if task_type == 'classification':
loss_fn = cls_criterion
elif task_type == 'bin_classification':
loss_fn = bicls_criterion
elif task_type == 'regression':
loss_fn = reg_criterion
elif task_type == 'mse_regression':
loss_fn = msereg_criterion
else:
raise NotImplementedError('Training on task type {} not yet supported.'.format(task_type))
curve = list()
model.train()
num_skips = 0
for step, batch in enumerate(tqdm(loader, desc="Training iteration")):
batch = batch.to(device)
if isinstance(batch, ComplexBatch):
num_samples = batch.cochains[0].x.size(0)
for dim in range(1, batch.dimension+1):
num_samples = min(num_samples, batch.cochains[dim].num_cells)
else:
# This is graph.
num_samples = batch.x.size(0)
if num_samples <= 1:
# Skip batch if it only comprises one sample (could cause problems with BN)
num_skips += 1
if float(num_skips) / len(loader) >= 0.25:
logging.warning("Warning! 25% of the batches were skipped this epoch")
continue
# (DEBUG)
if num_samples < 10:
logging.warning("Warning! BatchNorm applied on a batch "
"with only {} samples".format(num_samples))
optimizer.zero_grad()
pred = model(batch)
if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
targets = batch.y.view(-1,)
else:
targets = batch.y.to(torch.float32).view(pred.shape)
# In some ogbg-mol* datasets we may have null targets.
# When the cross entropy loss is used and targets are of shape (N,)
# the maks is broadcasted automatically to the shape of the predictions.
mask = ~torch.isnan(targets)
loss = loss_fn(pred[mask], targets[mask])
loss.backward()
optimizer.step()
curve.append(loss.detach().cpu().item())
return curve
def infer(model, device, loader):
"""
Runs inference over all the batches of a data loader.
"""
model.eval()
y_pred = list()
for step, batch in enumerate(tqdm(loader, desc="Inference iteration")):
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)
y_pred.append(pred.detach().cpu())
y_pred = torch.cat(y_pred, dim=0).numpy()
return y_pred
def eval(model, device, loader, evaluator, task_type):
"""
Evaluates a model over all the batches of a data loader.
"""
if task_type == 'classification':
loss_fn = cls_criterion
elif task_type == 'bin_classification':
loss_fn = bicls_criterion
elif task_type == 'regression':
loss_fn = reg_criterion
elif task_type == 'mse_regression':
loss_fn = msereg_criterion
else:
loss_fn = None
model.eval()
y_true = []
y_pred = []
losses = []
for step, batch in enumerate(tqdm(loader, desc="Eval iteration")):
# Cast features to double precision if that is used
if torch.get_default_dtype() == torch.float64:
for dim in range(batch.dimension + 1):
batch.cochains[dim].x = batch.cochains[dim].x.double()
assert batch.cochains[dim].x.dtype == torch.float64, batch.cochains[dim].x.dtype
batch = batch.to(device)
with torch.no_grad():
pred = model(batch)
if task_type != 'isomorphism':
if isinstance(loss_fn, torch.nn.CrossEntropyLoss):
targets = batch.y.view(-1,)
y_true.append(batch.y.detach().cpu())
else:
targets = batch.y.to(torch.float32).view(pred.shape)
y_true.append(batch.y.view(pred.shape).detach().cpu())
mask = ~torch.isnan(targets) # In some ogbg-mol* datasets we may have null targets.
loss = loss_fn(pred[mask], targets[mask])
losses.append(loss.detach().cpu().item())
else:
assert loss_fn is None
y_pred.append(pred.detach().cpu())
y_true = torch.cat(y_true, dim=0).numpy() if len(y_true) > 0 else None
y_pred = torch.cat(y_pred, dim=0).numpy()
input_dict = {'y_pred': y_pred, 'y_true': y_true}
mean_loss = float(np.mean(losses)) if len(losses) > 0 else np.nan
return evaluator.eval(input_dict), mean_loss
class Evaluator(object):
def __init__(self, metric, **kwargs):
if metric == 'isomorphism':
self.eval_fn = self._isomorphism
self.eps = kwargs.get('eps', 0.01)
self.p_norm = kwargs.get('p', 2)
elif metric == 'accuracy':
self.eval_fn = self._accuracy
elif metric == 'ap':
self.eval_fn = self._ap
elif metric == 'mae':
self.eval_fn = self._mae
elif metric.startswith('ogbg-mol'):
self._ogb_evaluator = OGBEvaluator(metric)
self._key = self._ogb_evaluator.eval_metric
self.eval_fn = self._ogb
else:
raise NotImplementedError('Metric {} is not yet supported.'.format(metric))
def eval(self, input_dict):
return self.eval_fn(input_dict)
def _isomorphism(self, input_dict):
# NB: here we return the failure percentage... the smaller the better!
preds = input_dict['y_pred']
assert preds is not None
assert preds.dtype == np.float64
preds = torch.tensor(preds, dtype=torch.float64)
mm = torch.pdist(preds, p=self.p_norm)
wrong = (mm < self.eps).sum().item()
metric = wrong / mm.shape[0]
return metric
def _accuracy(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = np.argmax(input_dict['y_pred'], axis=1)
assert y_true is not None
assert y_pred is not None
metric = met.accuracy_score(y_true, y_pred)
return metric
def _ap(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = input_dict['y_pred']
assert y_true is not None
assert y_pred is not None
metric = met.average_precision_score(y_true, y_pred)
return metric
def _mae(self, input_dict, **kwargs):
y_true = input_dict['y_true']
y_pred = input_dict['y_pred']
assert y_true is not None
assert y_pred is not None
metric = met.mean_absolute_error(y_true, y_pred)
return metric
def _ogb(self, input_dict, **kwargs):
assert 'y_true' in input_dict
assert input_dict['y_true'] is not None
assert 'y_pred' in input_dict
assert input_dict['y_pred'] is not None
return self._ogb_evaluator.eval(input_dict)[self._key]
| 7,530 | 34.523585 | 100 | py |
cwn | cwn-main/exp/test_run_exp.py | from exp.parser import get_parser
from exp.run_exp import main
def get_args_for_dummym():
args = list()
args += ['--use_coboundaries', 'True']
args += ['--graph_norm', 'id']
args += ['--lr_scheduler', 'None']
args += ['--num_layers', '3']
args += ['--emb_dim', '8']
args += ['--batch_size', '3']
args += ['--epochs', '1']
args += ['--dataset', 'DUMMYM']
args += ['--max_ring_size', '5']
args += ['--exp_name', 'dummym_test']
args += ['--readout_dims', '0', '2']
return args
def test_run_exp_on_dummym():
parser = get_parser()
args = get_args_for_dummym()
parsed_args = parser.parse_args(args)
curves = main(parsed_args)
# On this dataset the splits all coincide; we assert
# that the final performance is the same on all of them.
assert curves['last_train'] == curves['last_val']
assert curves['last_train'] == curves['last_test'] | 916 | 32.962963 | 60 | py |
cwn | cwn-main/exp/run_ring_exp.py | import os
import sys
import copy
import subprocess
import numpy as np
from exp.parser import get_parser
from exp.run_exp import main
RING_SIZES = list(range(10, 32, 2))
def exp_main(passed_args):
# Extract the commit sha so we can check the code that was used for each experiment
sha = subprocess.check_output(["git", "describe", "--always"]).strip().decode()
parser = get_parser()
args = parser.parse_args(copy.copy(passed_args))
assert args.max_ring_size is None
# run each experiment separately and gather results
train_results = {fold: [] for fold in range(len(RING_SIZES))}
val_results = {fold: [] for fold in range(len(RING_SIZES))}
for seed in range(args.start_seed, args.stop_seed + 1):
# We use the ring_size as a "fold" for the dataset.
# This is just a hack to save the results properly using our usual infrastructure.
for fold in range(len(RING_SIZES)):
max_ring_size = RING_SIZES[fold]
num_layers = 3 if args.model == 'ring_sparse_cin' else max_ring_size // 2
current_args = (copy.copy(passed_args) + ['--fold', str(fold)] +
['--max_ring_size', str(max_ring_size)] +
['--num_layers', str(num_layers)] +
['--seed', str(seed)])
parsed_args = parser.parse_args(current_args)
# Check that the default parameter value (5) was overwritten
assert parsed_args.num_layers == num_layers
curves = main(parsed_args)
# Extract results
train_results[fold].append(curves['last_train'])
val_results[fold].append(curves['last_val'])
msg = (
f"========= Final result ==========\n"
f'Dataset: {args.dataset}\n'
f'SHA: {sha}\n'
f'----------- Train ----------\n')
for fold, results in train_results.items():
mean = np.mean(results)
std = np.std(results)
msg += f'Ring size: {RING_SIZES[fold]} {mean}+-{std}\n'
msg += f'----------- Test ----------\n'
for fold, results in val_results.items():
mean = np.mean(results)
std = np.std(results)
msg += f'Ring size: {RING_SIZES[fold]} {mean}+-{std}\n'
print(msg)
# additionally write msg and configuration on file
msg += str(args)
filename = os.path.join(args.result_folder, f'{args.dataset}-{args.exp_name}/result.txt')
print('Writing results at: {}'.format(filename))
with open(filename, 'w') as handle:
handle.write(msg)
if __name__ == "__main__":
passed_args = sys.argv[1:]
assert '--fold' not in passed_args
assert '--seed' not in passed_args
exp_main(passed_args)
| 2,783 | 35.631579 | 93 | py |
cwn | cwn-main/exp/run_tu_exp.py | import sys
import os
import copy
import time
import numpy as np
from exp.parser import get_parser
from exp.run_exp import main
# python3 -m exp.run_tu_exp --dataset IMDBBINARY --model cin --drop_rate 0.0 --lr 0.0001 --max_dim 2 --emb_dim 32 --dump_curves --epochs 30 --num_layers 1 --lr_scheduler StepLR --lr_scheduler_decay_steps 5
__num_folds__ = 10
def print_summary(summary):
msg = ''
for k, v in summary.items():
msg += f'Fold {k:1d}: {v:.3f}\n'
print(msg)
def exp_main(passed_args):
parser = get_parser()
args = parser.parse_args(copy.copy(passed_args))
# run each experiment separately and gather results
results = list()
for i in range(__num_folds__):
current_args = copy.copy(passed_args) + ['--fold', str(i)]
parsed_args = parser.parse_args(current_args)
curves = main(parsed_args)
results.append(curves)
# aggregate results
val_curves = np.asarray([curves['val'] for curves in results])
avg_val_curve = val_curves.mean(axis=0)
best_index = np.argmax(avg_val_curve)
mean_perf = avg_val_curve[best_index]
std_perf = val_curves.std(axis=0)[best_index]
print(" ===== Mean performance per fold ======")
perf_per_fold = val_curves.mean(1)
perf_per_fold = {i: perf_per_fold[i] for i in range(len(perf_per_fold))}
print_summary(perf_per_fold)
print(" ===== Max performance per fold ======")
perf_per_fold = np.max(val_curves, axis=1)
perf_per_fold = {i: perf_per_fold[i] for i in range(len(perf_per_fold))}
print_summary(perf_per_fold)
print(" ===== Median performance per fold ======")
perf_per_fold = np.median(val_curves, axis=1)
perf_per_fold = {i: perf_per_fold[i] for i in range(len(perf_per_fold))}
print_summary(perf_per_fold)
print(" ===== Performance on best epoch ======")
perf_per_fold = val_curves[:, best_index]
perf_per_fold = {i: perf_per_fold[i] for i in range(len(perf_per_fold))}
print_summary(perf_per_fold)
print(" ===== Final result ======")
msg = (
f'Dataset: {args.dataset}\n'
f'Accuracy: {mean_perf} ± {std_perf}\n'
f'Best epoch: {best_index}\n'
'-------------------------------\n')
print(msg)
# additionally write msg and configuration on file
msg += str(args)
filename = os.path.join(args.result_folder, f'{args.dataset}-{args.exp_name}/result.txt')
print('Writing results at: {}'.format(filename))
with open(filename, 'w') as handle:
handle.write(msg)
if __name__ == "__main__":
# standard args
passed_args = sys.argv[1:]
assert 'fold' not in passed_args
exp_main(passed_args)
| 2,719 | 32.170732 | 205 | py |
cwn | cwn-main/exp/__init__.py | 0 | 0 | 0 | py |
|
cwn | cwn-main/exp/plot_sr_cwn_results.py | import os
import sys
import matplotlib
matplotlib.use('Agg')
import numpy as np
import seaborn as sns
sns.set_style("whitegrid", {'legend.frameon': False})
from matplotlib import cm
from matplotlib import pyplot as plt
from definitions import ROOT_DIR
def run(exps, codenames, plot_name):
# Meta
family_names = [
'SR(16,6,2,2)',
'SR(25,12,5,6)',
'SR(26,10,3,4)',
'SR(28,12,6,4)',
'SR(29,14,6,7)',
'SR(35,16,6,8)',
'SR(35,18,9,9)',
'SR(36,14,4,6)',
'SR(40,12,2,4)']
# Retrieve results
base_path = os.path.join(ROOT_DIR, 'exp', 'results')
results = list()
for e, exp_path in enumerate(exps):
path = os.path.join(base_path, exp_path, 'result.txt')
results.append(dict())
with open(path, 'r') as handle:
found = False
f = 0
for line in handle:
if not found:
if line.strip().startswith('Mean'):
mean = float(line.strip().split(':')[1].strip())
found = True
else:
continue
else:
std = float(line.strip().split(':')[1].strip())
results[-1][family_names[f]] = (mean, std)
f += 1
found = False
assert f == len(family_names)
# Set colours
colors = cm.get_cmap('tab20c').colors[1:4] + cm.get_cmap('tab20c').colors[5:9]
matplotlib.rc('axes', edgecolor='black', lw=0.25)
a = np.asarray([83, 115, 171])/255.0 +0.0
b = np.asarray([209, 135, 92])/255.0 +0.0
colors = [a, a +0.13, a +0.2, b, b +0.065, b +0.135]
# Set plotting
num_families = len(family_names)
num_experiments = len(results)
sep = 1.75
width = 0.7
disp = num_experiments * width + sep
xa = np.asarray([i*disp for i in range(num_families)])
xs = [xa + i*width for i in range(num_experiments//2)] + [xa + i*width + sep*0.25 for i in range(num_experiments//2, num_experiments)]
plt.rcParams['ytick.right'] = plt.rcParams['ytick.labelright'] = True
plt.rcParams['ytick.left'] = plt.rcParams['ytick.labelleft'] = False
print(sns.axes_style())
matplotlib.rc('axes', edgecolor='#c4c4c4', linewidth=0.9)
# Plot
plt.figure(dpi=300, figsize=(9,6.6))
plt.grid(axis='x', alpha=0.0)
for r, res in enumerate(results):
x = xs[r]
y = [10+res[family][0] for family in sorted(res)]
yerr = [res[family][1] for family in sorted(res)]
plt.bar(x, y, yerr=yerr, bottom=-10, color=colors[r], width=width,
label=codenames[r], ecolor='grey', error_kw={'lw': 0.75, 'capsize':0.7},
edgecolor='white')
# hatch=('//' if r<3 else '\\\\'))
plt.axhline(y=1.0, color='indianred', lw=1.5, label='3WL')
plt.ylim([-0.000005, 2])
plt.yscale(matplotlib.scale.SymmetricalLogScale(axis='y', linthresh=0.00001))
plt.xticks(xa+3*width, family_names, fontsize=12, rotation=315, ha='left')
plt.yticks([0.0, 0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0], fontsize=12)
handles, labels = plt.gca().get_legend_handles_labels()
order = [1, 4, 2, 5, 3, 6] + [0]
plt.legend([handles[idx] for idx in order],[labels[idx] for idx in order], fontsize=10, loc='upper center', ncol=4, bbox_to_anchor=(0.5, 1.15))
plt.xlabel('Family', fontsize=15)
plt.ylabel('Failure rate', fontsize=15, labelpad=-580, rotation=270)
plt.tight_layout()
plt.savefig(f'./sr_exp_{plot_name}.pdf', bbox_inches='tight', pad_inches=0.1)
plt.close()
if __name__ == '__main__':
# Standard args
passed_args = sys.argv[1:]
codenames = list()
exps = list()
plot_name = passed_args[0]
for a, arg in enumerate(passed_args[1:]):
if a % 2 == 0:
exps.append(arg)
else:
codenames.append(arg)
assert len(codenames) == len(exps) == 6
run(exps, codenames, plot_name)
| 4,012 | 35.816514 | 147 | py |
cwn | cwn-main/exp/evaluate_sr_cwn_emb_mag.py | import os
import sys
import torch
import numpy as np
import random
from definitions import ROOT_DIR
from exp.prepare_sr_tests import prepare
from mp.models import MessagePassingAgnostic, SparseCIN
from data.data_loading import DataLoader, load_dataset
__families__ = [
'sr16622',
'sr251256',
'sr261034',
'sr281264',
'sr291467',
'sr351668',
'sr351899',
'sr361446',
'sr401224'
]
def compute_embeddings(family, baseline, seed):
# Set the seed for everything
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
# Please set the parameters below to the ones used in SR experiments.
hidden = 16
num_layers = 3
max_ring_size = 6
use_coboundaries = True
nonlinearity = 'elu'
graph_norm = 'id'
readout = 'sum'
final_readout = 'sum'
readout_dims = (0,1,2)
init = 'sum'
jobs = 64
device = torch.device("cuda:" + str(0)) if torch.cuda.is_available() else torch.device("cpu")
# Build and dump dataset if needed
prepare(family, jobs, max_ring_size, False, init, None)
# Load reference dataset
complexes = load_dataset(family, max_dim=2, max_ring_size=max_ring_size, init_method=init)
data_loader = DataLoader(complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
# Instantiate model
if not baseline:
model = SparseCIN(num_input_features=1, num_classes=complexes.num_classes, num_layers=num_layers, hidden=hidden,
use_coboundaries=use_coboundaries, nonlinearity=nonlinearity, graph_norm=graph_norm,
readout=readout, final_readout=final_readout, readout_dims=readout_dims)
else:
hidden = 256
model = MessagePassingAgnostic(num_input_features=1, num_classes=complexes.num_classes, hidden=hidden,
nonlinearity=nonlinearity, readout=readout)
model = model.to(device)
model.eval()
# Compute complex embeddings
with torch.no_grad():
embeddings = list()
for batch in data_loader:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
embeddings.append(out)
embeddings = torch.cat(embeddings, 0) # n x d
assert embeddings.size(1) == complexes.num_classes
return embeddings
if __name__ == "__main__":
# Standard args
passed_args = sys.argv[1:]
baseline = (passed_args[0].lower() == 'true')
max_ring_size = int(passed_args[1])
assert max_ring_size > 3
# Execute
msg = f'Model: {"CIN" if not baseline else "MLP-sum"}({max_ring_size})'
print(msg)
for family in __families__:
text = f'\n======================== {family}'
msg += text+'\n'
print(text)
for seed in range(5):
embeddings = compute_embeddings(family, baseline, seed)
text = f'seed {seed}: {torch.max(torch.abs(embeddings)):.2f}'
msg += text+'\n'
print(text)
path = os.path.join(ROOT_DIR, 'exp', 'results')
if baseline:
path = os.path.join(path, f'sr-base-{max_ring_size}.txt')
else:
path = os.path.join(path, f'sr-{max_ring_size}.txt')
with open(path, 'w') as handle:
handle.write(msg)
| 3,564 | 31.409091 | 121 | py |
cwn | cwn-main/exp/run_tu_tuning.py | import itertools
import os
import copy
import yaml
import argparse
from definitions import ROOT_DIR
from exp.parser import get_parser
from exp.run_tu_exp import exp_main
__max_devices__ = 8
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='CWN tuning.')
parser.add_argument('--conf', type=str, help='path to yaml configuration')
parser.add_argument('--code', type=str, help='tuning name')
parser.add_argument('--idx', type=int, help='selection index')
t_args = parser.parse_args()
# parse grid from yaml
with open(t_args.conf, 'r') as handle:
conf = yaml.safe_load(handle)
dataset = conf['dataset']
hyper_list = list()
hyper_values = list()
for key in conf:
if key == 'dataset':
continue
hyper_list.append(key)
hyper_values.append(conf[key])
grid = itertools.product(*hyper_values)
exp_queue = list()
for h, hypers in enumerate(grid):
if h % __max_devices__ == (t_args.idx % __max_devices__):
exp_queue.append((h, hypers))
# form args
base_args = [
'--device', str(t_args.idx),
'--task_type', 'classification',
'--eval_metric', 'accuracy',
'--dataset', dataset,
'--result_folder', os.path.join(ROOT_DIR, 'exp', 'results', '{}_tuning_{}'.format(dataset, t_args.code))]
for exp in exp_queue:
args = copy.copy(base_args)
addendum = ['--exp_name', str(exp[0])]
hypers = exp[1]
for name, value in zip(hyper_list, hypers):
addendum.append('--{}'.format(name))
addendum.append('{}'.format(value))
args += addendum
exp_main(args)
| 1,712 | 30.145455 | 113 | py |
cwn | cwn-main/exp/test_sr.py | import torch
import numpy as np
import random
import pytest
from data.data_loading import DataLoader, load_dataset
from exp.prepare_sr_tests import prepare
from mp.models import MessagePassingAgnostic, SparseCIN
def _get_cwn_sr_embeddings(family, seed, baseline=False):
# Set the seed for everything
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
# Please set the parameters below to the ones used in SR experiments.
# If so, if tests pass then the experiments are deemed sound.
hidden = 16
num_layers = 3
max_ring_size = 6
use_coboundaries = True
nonlinearity = 'elu'
graph_norm = 'id'
readout = 'sum'
final_readout = 'sum'
readout_dims = (0,1,2)
init = 'sum'
jobs = 64
prepare_seed = 43
device = torch.device("cuda:" + str(0)) if torch.cuda.is_available() else torch.device("cpu")
# Build and dump dataset if needed
prepare(family, jobs, max_ring_size, True, init, prepare_seed)
# Load reference dataset
complexes = load_dataset(family, max_dim=2, max_ring_size=max_ring_size, init_method=init)
permuted_complexes = load_dataset(f'{family}p{prepare_seed}', max_dim=2, max_ring_size=max_ring_size, init_method=init)
# Instantiate model
if not baseline:
model = SparseCIN(num_input_features=1, num_classes=complexes.num_classes, num_layers=num_layers, hidden=hidden,
use_coboundaries=use_coboundaries, nonlinearity=nonlinearity, graph_norm=graph_norm,
readout=readout, final_readout=final_readout, readout_dims=readout_dims)
else:
hidden = 256
model = MessagePassingAgnostic(num_input_features=1, num_classes=complexes.num_classes, hidden=hidden,
nonlinearity=nonlinearity, readout=readout)
model = model.to(device)
model.eval()
# Compute reference complex embeddings
data_loader = DataLoader(complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
data_loader_perm = DataLoader(permuted_complexes, batch_size=8, shuffle=False, num_workers=16, max_dim=2)
with torch.no_grad():
embeddings = list()
perm_embeddings = list()
for batch in data_loader:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
embeddings.append(out)
for batch in data_loader_perm:
batch.nodes.x = batch.nodes.x.double()
batch.edges.x = batch.edges.x.double()
batch.two_cells.x = batch.two_cells.x.double()
out = model.forward(batch.to(device))
perm_embeddings.append(out)
embeddings = torch.cat(embeddings, 0) # n x d
perm_embeddings = torch.cat(perm_embeddings, 0) # n x d
assert embeddings.size(0) == perm_embeddings.size(0)
assert embeddings.size(1) == perm_embeddings.size(1) == complexes.num_classes
return embeddings, perm_embeddings
def _validate_self_iso_on_sr(embeddings, perm_embeddings):
eps = 0.01
for i in range(embeddings.size(0)):
preds = torch.stack((embeddings[i], perm_embeddings[i]), 0)
assert preds.size(0) == 2
assert preds.size(1) == embeddings.size(1)
dist = torch.pdist(preds, p=2).item()
assert dist <= eps
def _validate_magnitude_embeddings(embeddings):
# At (5)e8, the fp64 granularity is still (2**29 - 2**28) / (2**52) ≈ 0.000000059604645
# The fact that we work in such a (safe) range can also be verified by running the following:
# a = torch.DoubleTensor([2.5e8])
# d = torch.DoubleTensor([5.0e8])
# b = torch.nextafter(a, d)
# print(b - a)
# >>> tensor([2.9802e-08], dtype=torch.float64)
thresh = torch.DoubleTensor([5.0*1e8])
apex = torch.max(torch.abs(embeddings)).cpu()
print(apex)
assert apex.dtype == torch.float64
assert torch.all(apex < thresh)
@pytest.mark.slow
@pytest.mark.parametrize("family", ['sr16622', 'sr251256', 'sr261034', 'sr281264', 'sr291467', 'sr351668', 'sr351899', 'sr361446', 'sr401224'])
def test_sparse_cin0_self_isomorphism(family):
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
for seed in range(5):
embeddings, perm_embeddings = _get_cwn_sr_embeddings(family, seed)
_validate_magnitude_embeddings(embeddings)
_validate_magnitude_embeddings(perm_embeddings)
_validate_self_iso_on_sr(embeddings, perm_embeddings)
# Revert back to float32 for other tests
torch.set_default_dtype(torch.float32)
@pytest.mark.slow
@pytest.mark.parametrize("family", ['sr16622', 'sr251256', 'sr261034', 'sr281264', 'sr291467', 'sr351668', 'sr351899', 'sr361446', 'sr401224'])
def test_cwn_baseline_self_isomorphism(family):
# Perform the check in double precision
torch.set_default_dtype(torch.float64)
for seed in range(5):
embeddings, perm_embeddings = _get_cwn_sr_embeddings(family, seed, baseline=True)
_validate_magnitude_embeddings(embeddings)
_validate_magnitude_embeddings(perm_embeddings)
_validate_self_iso_on_sr(embeddings, perm_embeddings)
# Revert back to float32 for other tests
torch.set_default_dtype(torch.float32)
| 5,473 | 41.434109 | 143 | py |
ECG-Heartbeat-Classification-seq2seq-model | ECG-Heartbeat-Classification-seq2seq-model-master/seq_seq_annot_aami.py | import numpy as np
import matplotlib.pyplot as plt
import scipy.io as spio
from sklearn.preprocessing import MinMaxScaler
import random
import time
import os
from datetime import datetime
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
import argparse
random.seed(654)
def read_mitbih(filename, max_time=100, classes= ['F', 'N', 'S', 'V', 'Q'], max_nlabel=100):
def normalize(data):
data = np.nan_to_num(data) # removing NaNs and Infs
data = data - np.mean(data)
data = data / np.std(data)
return data
# read data
data = []
samples = spio.loadmat(filename + ".mat")
samples = samples['s2s_mitbih']
values = samples[0]['seg_values']
labels = samples[0]['seg_labels']
num_annots = sum([item.shape[0] for item in values])
n_seqs = num_annots / max_time
# add all segments(beats) together
l_data = 0
for i, item in enumerate(values):
l = item.shape[0]
for itm in item:
if l_data == n_seqs * max_time:
break
data.append(itm[0])
l_data = l_data + 1
# add all labels together
l_lables = 0
t_lables = []
for i, item in enumerate(labels):
if len(t_lables)==n_seqs*max_time:
break
item= item[0]
for lebel in item:
if l_lables == n_seqs * max_time:
break
t_lables.append(str(lebel))
l_lables = l_lables + 1
del values
data = np.asarray(data)
shape_v = data.shape
data = np.reshape(data, [shape_v[0], -1])
t_lables = np.array(t_lables)
_data = np.asarray([],dtype=np.float64).reshape(0,shape_v[1])
_labels = np.asarray([],dtype=np.dtype('|S1')).reshape(0,)
for cl in classes:
_label = np.where(t_lables == cl)
permute = np.random.permutation(len(_label[0]))
_label = _label[0][permute[:max_nlabel]]
# _label = _label[0][:max_nlabel]
# permute = np.random.permutation(len(_label))
# _label = _label[permute]
_data = np.concatenate((_data, data[_label]))
_labels = np.concatenate((_labels, t_lables[_label]))
data = _data[:(len(_data)/ max_time) * max_time, :]
_labels = _labels[:(len(_data) / max_time) * max_time]
# data = _data
# split data into sublist of 100=se_len values
data = [data[i:i + max_time] for i in range(0, len(data), max_time)]
labels = [_labels[i:i + max_time] for i in range(0, len(_labels), max_time)]
# shuffle
permute = np.random.permutation(len(labels))
data = np.asarray(data)
labels = np.asarray(labels)
data= data[permute]
labels = labels[permute]
print('Records processed!')
return data, labels
def evaluate_metrics(confusion_matrix):
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
# ACC_micro = (sum(TP) + sum(TN)) / (sum(TP) + sum(FP) + sum(FN) + sum(TN))
ACC_macro = np.mean(ACC) # to get a sense of effectiveness of our method on the small classes we computed this average (macro-average)
return ACC_macro, ACC, TPR, TNR, PPV
def batch_data(x, y, batch_size):
shuffle = np.random.permutation(len(x))
start = 0
# from IPython.core.debugger import Tracer; Tracer()()
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
yield x[start:start + batch_size], y[start:start + batch_size]
start += batch_size
def build_network(inputs, dec_inputs,char2numY,n_channels=10,input_depth=280,num_units=128,max_time=10,bidirectional=False):
_inputs = tf.reshape(inputs, [-1, n_channels, input_depth / n_channels])
# _inputs = tf.reshape(inputs, [-1,input_depth,n_channels])
# #(batch*max_time, 280, 1) --> (N, 280, 18)
conv1 = tf.layers.conv1d(inputs=_inputs, filters=32, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
max_pool_1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2, padding='same')
conv2 = tf.layers.conv1d(inputs=max_pool_1, filters=64, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
max_pool_2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2, padding='same')
conv3 = tf.layers.conv1d(inputs=max_pool_2, filters=128, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
shape = conv3.get_shape().as_list()
data_input_embed = tf.reshape(conv3, (-1, max_time, shape[1] * shape[2]))
# timesteps = max_time
#
# lstm_in = tf.unstack(data_input_embed, timesteps, 1)
# lstm_size = 128
# # Get lstm cell output
# # Add LSTM layers
# lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# data_input_embed, states = tf.contrib.rnn.static_rnn(lstm_cell, lstm_in, dtype=tf.float32)
# data_input_embed = tf.stack(data_input_embed, 1)
# shape = data_input_embed.get_shape().as_list()
embed_size = 10 # 128 lstm_size # shape[1]*shape[2]
# Embedding layers
output_embedding = tf.Variable(tf.random_uniform((len(char2numY), embed_size), -1.0, 1.0), name='dec_embedding')
data_output_embed = tf.nn.embedding_lookup(output_embedding, dec_inputs)
with tf.variable_scope("encoding") as encoding_scope:
if not bidirectional:
# Regular approach with LSTM units
lstm_enc = tf.contrib.rnn.LSTMCell(num_units)
_, last_state = tf.nn.dynamic_rnn(lstm_enc, inputs=data_input_embed, dtype=tf.float32)
else:
# Using a bidirectional LSTM architecture instead
enc_fw_cell = tf.contrib.rnn.LSTMCell(num_units)
enc_bw_cell = tf.contrib.rnn.LSTMCell(num_units)
((enc_fw_out, enc_bw_out), (enc_fw_final, enc_bw_final)) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=enc_fw_cell,
cell_bw=enc_bw_cell,
inputs=data_input_embed,
dtype=tf.float32)
enc_fin_c = tf.concat((enc_fw_final.c, enc_bw_final.c), 1)
enc_fin_h = tf.concat((enc_fw_final.h, enc_bw_final.h), 1)
last_state = tf.contrib.rnn.LSTMStateTuple(c=enc_fin_c, h=enc_fin_h)
with tf.variable_scope("decoding") as decoding_scope:
if not bidirectional:
lstm_dec = tf.contrib.rnn.LSTMCell(num_units)
else:
lstm_dec = tf.contrib.rnn.LSTMCell(2 * num_units)
dec_outputs, _ = tf.nn.dynamic_rnn(lstm_dec, inputs=data_output_embed, initial_state=last_state)
logits = tf.layers.dense(dec_outputs, units=len(char2numY), use_bias=True)
return logits
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--max_time', type=int, default=10)
parser.add_argument('--test_steps', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--data_dir', type=str, default='data/s2s_mitbih_aami')
parser.add_argument('--bidirectional', type=str2bool, default=str2bool('False'))
# parser.add_argument('--lstm_layers', type=int, default=2)
parser.add_argument('--num_units', type=int, default=128)
parser.add_argument('--n_oversampling', type=int, default=10000)
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints-seq2seq')
parser.add_argument('--ckpt_name', type=str, default='seq2seq_mitbih.ckpt')
parser.add_argument('--classes', nargs='+', type=chr,
default=['F','N', 'S','V'])
args = parser.parse_args()
run_program(args)
def run_program(args):
print(args)
max_time = args.max_time # 5 3 second best 10# 40 # 100
epochs = args.epochs # 300
batch_size = args.batch_size # 10
num_units = args.num_units
bidirectional = args.bidirectional
# lstm_layers = args.lstm_layers
n_oversampling = args.n_oversampling
checkpoint_dir = args.checkpoint_dir
ckpt_name = args.ckpt_name
test_steps = args.test_steps
classes= args.classes
filename = args.data_dir
X, Y = read_mitbih(filename,max_time,classes=classes,max_nlabel=100000) #11000
print ("# of sequences: ", len(X))
input_depth = X.shape[2]
n_channels = 10
classes = np.unique(Y)
char2numY = dict(zip(classes, range(len(classes))))
n_classes = len(classes)
print ('Classes: ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(Y.flatten()==cl)[0]))
# char2numX['<PAD>'] = len(char2numX)
# num2charX = dict(zip(char2numX.values(), char2numX.keys()))
# max_len = max([len(date) for date in x])
#
# x = [[char2numX['<PAD>']]*(max_len - len(date)) +[char2numX[x_] for x_ in date] for date in x]
# print(''.join([num2charX[x_] for x_ in x[4]]))
# x = np.array(x)
char2numY['<GO>'] = len(char2numY)
num2charY = dict(zip(char2numY.values(), char2numY.keys()))
Y = [[char2numY['<GO>']] + [char2numY[y_] for y_ in date] for date in Y]
Y = np.array(Y)
x_seq_length = len(X[0])
y_seq_length = len(Y[0])- 1
# Placeholders
inputs = tf.placeholder(tf.float32, [None, max_time, input_depth], name = 'inputs')
targets = tf.placeholder(tf.int32, (None, None), 'targets')
dec_inputs = tf.placeholder(tf.int32, (None, None), 'output')
# logits = build_network(inputs,dec_inputs=dec_inputs)
logits = build_network(inputs, dec_inputs, char2numY, n_channels=n_channels, input_depth=input_depth, num_units=num_units, max_time=max_time,
bidirectional=bidirectional)
# decoder_prediction = tf.argmax(logits, 2)
# confusion = tf.confusion_matrix(labels=tf.argmax(targets, 1), predictions=tf.argmax(logits, 2), num_classes=len(char2numY) - 1)# it is wrong
# mean_accuracy,update_mean_accuracy = tf.metrics.mean_per_class_accuracy(labels=targets, predictions=decoder_prediction, num_classes=len(char2numY) - 1)
with tf.name_scope("optimization"):
# Loss function
vars = tf.trainable_variables()
beta = 0.001
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars
if 'bias' not in v.name]) * beta
loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.ones([batch_size, y_seq_length]))
# Optimizer
loss = tf.reduce_mean(loss + lossL2)
optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss)
# split the dataset into the training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=42)
# over-sampling: SMOTE
X_train = np.reshape(X_train,[X_train.shape[0]*X_train.shape[1],-1])
y_train= y_train[:,1:].flatten()
nums = []
for cl in classes:
ind = np.where(classes == cl)[0][0]
nums.append(len(np.where(y_train.flatten()==ind)[0]))
# ratio={0:nums[3],1:nums[1],2:nums[3],3:nums[3]} # the best with 11000 for N
ratio={0:n_oversampling,1:nums[1],2:n_oversampling,3:n_oversampling}
sm = SMOTE(random_state=12,ratio=ratio)
X_train, y_train = sm.fit_sample(X_train, y_train)
X_train = X_train[:(X_train.shape[0]/max_time)*max_time,:]
y_train = y_train[:(X_train.shape[0]/max_time)*max_time]
X_train = np.reshape(X_train,[-1,X_test.shape[1],X_test.shape[2]])
y_train = np.reshape(y_train,[-1,y_test.shape[1]-1,])
y_train= [[char2numY['<GO>']] + [y_ for y_ in date] for date in y_train]
y_train = np.array(y_train)
print ('Classes in the training set: ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(y_train.flatten()==ind)[0]))
print ("------------------y_train samples--------------------")
for ii in range(2):
print(''.join([num2charY[y_] for y_ in list(y_train[ii+5])]))
print ("------------------y_test samples--------------------")
for ii in range(2):
print(''.join([num2charY[y_] for y_ in list(y_test[ii+5])]))
def test_model():
# source_batch, target_batch = next(batch_data(X_test, y_test, batch_size))
acc_track = []
sum_test_conf = []
for batch_i, (source_batch, target_batch) in enumerate(batch_data(X_test, y_test, batch_size)):
dec_input = np.zeros((len(source_batch), 1)) + char2numY['<GO>']
for i in range(y_seq_length):
batch_logits = sess.run(logits,
feed_dict={inputs: source_batch, dec_inputs: dec_input})
prediction = batch_logits[:, -1].argmax(axis=-1)
dec_input = np.hstack([dec_input, prediction[:, None]])
# acc_track.append(np.mean(dec_input == target_batch))
acc_track.append(dec_input[:, 1:] == target_batch[:, 1:])
y_true= target_batch[:, 1:].flatten()
y_pred = dec_input[:, 1:].flatten()
sum_test_conf.append(confusion_matrix(y_true, y_pred,labels=range(len(char2numY)-1)))
sum_test_conf= np.mean(np.array(sum_test_conf, dtype=np.float32), axis=0)
# print('Accuracy on test set is: {:>6.4f}'.format(np.mean(acc_track)))
# mean_p_class, accuracy_classes = sess.run([mean_accuracy, update_mean_accuracy],
# feed_dict={inputs: source_batch,
# dec_inputs: dec_input[:, :-1],
# targets: target_batch[:, 1:]})
# print (mean_p_class)
# print (accuracy_classes)
acc_avg, acc, sensitivity, specificity, PPV = evaluate_metrics(sum_test_conf)
print('Average Accuracy is: {:>6.4f} on test set'.format(acc_avg))
for index_ in range(n_classes):
print("\t{} rhythm -> Sensitivity: {:1.4f}, Specificity : {:1.4f}, Precision (PPV) : {:1.4f}, Accuracy : {:1.4f}".format(classes[index_],
sensitivity[
index_],
specificity[
index_],PPV[index_],
acc[index_]))
print("\t Average -> Sensitivity: {:1.4f}, Specificity : {:1.4f}, Precision (PPV) : {:1.4f}, Accuracy : {:1.4f}".format(np.mean(sensitivity),np.mean(specificity),np.mean(PPV),np.mean(acc)))
return acc_avg, acc, sensitivity, specificity, PPV
loss_track = []
def count_prameters():
print ('# of Params: ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
count_prameters()
if (os.path.exists(checkpoint_dir) == False):
os.mkdir(checkpoint_dir)
# train the graph
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
print(str(datetime.now()))
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
pre_acc_avg = 0.0
if ckpt and ckpt.model_checkpoint_path:
# # Restore
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
# saver.restore(session, os.path.join(checkpoint_dir, ckpt_name))
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
# or 'load meta graph' and restore weights
# saver = tf.train.import_meta_graph(ckpt_name+".meta")
# saver.restore(session,tf.train.latest_checkpoint(checkpoint_dir))
test_model()
else:
for epoch_i in range(epochs):
start_time = time.time()
train_acc = []
for batch_i, (source_batch, target_batch) in enumerate(batch_data(X_train, y_train, batch_size)):
_, batch_loss, batch_logits = sess.run([optimizer, loss, logits],
feed_dict = {inputs: source_batch,
dec_inputs: target_batch[:, :-1],
targets: target_batch[:, 1:]})
loss_track.append(batch_loss)
train_acc.append(batch_logits.argmax(axis=-1) == target_batch[:,1:])
# mean_p_class,accuracy_classes = sess.run([mean_accuracy,update_mean_accuracy],
# feed_dict={inputs: source_batch,
# dec_inputs: target_batch[:, :-1],
# targets: target_batch[:, 1:]})
# accuracy = np.mean(batch_logits.argmax(axis=-1) == target_batch[:,1:])
accuracy = np.mean(train_acc)
print('Epoch {:3} Loss: {:>6.3f} Accuracy: {:>6.4f} Epoch duration: {:>6.3f}s'.format(epoch_i, batch_loss,
accuracy, time.time() - start_time))
if epoch_i%test_steps==0:
acc_avg, acc, sensitivity, specificity, PPV= test_model()
print('loss {:.4f} after {} epochs (batch_size={})'.format(loss_track[-1], epoch_i + 1, batch_size))
save_path = os.path.join(checkpoint_dir, ckpt_name)
saver.save(sess, save_path)
print("Model saved in path: %s" % save_path)
# if np.nan_to_num(acc_avg) > pre_acc_avg: # save the better model based on the f1 score
# print('loss {:.4f} after {} epochs (batch_size={})'.format(loss_track[-1], epoch_i + 1, batch_size))
# pre_acc_avg = acc_avg
# save_path =os.path.join(checkpoint_dir, ckpt_name)
# saver.save(sess, save_path)
# print("The best model (till now) saved in path: %s" % save_path)
plt.plot(loss_track)
plt.show()
print(str(datetime.now()))
# test_model()
if __name__ == '__main__':
main()
| 19,548 | 44.043779 | 197 | py |
ECG-Heartbeat-Classification-seq2seq-model | ECG-Heartbeat-Classification-seq2seq-model-master/seq_seq_annot_DS1DS2.py | import numpy as np
import matplotlib.pyplot as plt
import scipy.io as spio
from sklearn.preprocessing import MinMaxScaler
import random
import time
import os
from datetime import datetime
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
import argparse
random.seed(654)
def read_mitbih(filename, max_time=100, classes= ['F', 'N', 'S', 'V', 'Q'], max_nlabel=100, trainset=1):
def normalize(data):
data = np.nan_to_num(data) # removing NaNs and Infs
data = data - np.mean(data)
data = data / np.std(data)
return data
# read data
data = []
samples = spio.loadmat(filename + ".mat")
if trainset == 1: #DS1
samples = samples['s2s_mitbih_DS1']
else: # DS2
samples = samples['s2s_mitbih_DS2']
values = samples[0]['seg_values']
labels = samples[0]['seg_labels']
items_len = len(labels)
num_annots = sum([item.shape[0] for item in values])
n_seqs = num_annots / max_time
# add all segments(beats) together
l_data = 0
for i, item in enumerate(values):
l = item.shape[0]
for itm in item:
if l_data == n_seqs * max_time:
break
data.append(itm[0])
l_data = l_data + 1
# add all labels together
l_lables = 0
t_lables = []
for i, item in enumerate(labels):
if len(t_lables)==n_seqs*max_time:
break
item= item[0]
for lebel in item:
if l_lables == n_seqs * max_time:
break
t_lables.append(str(lebel))
l_lables = l_lables + 1
del values
data = np.asarray(data)
shape_v = data.shape
data = np.reshape(data, [shape_v[0], -1])
t_lables = np.array(t_lables)
_data = np.asarray([],dtype=np.float64).reshape(0,shape_v[1])
_labels = np.asarray([],dtype=np.dtype('|S1')).reshape(0,)
for cl in classes:
_label = np.where(t_lables == cl)
permute = np.random.permutation(len(_label[0]))
_label = _label[0][permute[:max_nlabel]]
# _label = _label[0][:max_nlabel]
# permute = np.random.permutation(len(_label))
# _label = _label[permute]
_data = np.concatenate((_data, data[_label]))
_labels = np.concatenate((_labels, t_lables[_label]))
data = _data[:(len(_data)/ max_time) * max_time, :]
_labels = _labels[:(len(_data) / max_time) * max_time]
# data = _data
# split data into sublist of 100=se_len values
data = [data[i:i + max_time] for i in range(0, len(data), max_time)]
labels = [_labels[i:i + max_time] for i in range(0, len(_labels), max_time)]
# shuffle
permute = np.random.permutation(len(labels))
data = np.asarray(data)
labels = np.asarray(labels)
data= data[permute]
labels = labels[permute]
print('Records processed!')
return data, labels
def evaluate_metrics(confusion_matrix):
# https://stackoverflow.com/questions/31324218/scikit-learn-how-to-obtain-true-positive-true-negative-false-positive-and-fal
# Sensitivity, hit rate, recall, or true positive rate
FP = confusion_matrix.sum(axis=0) - np.diag(confusion_matrix)
FN = confusion_matrix.sum(axis=1) - np.diag(confusion_matrix)
TP = np.diag(confusion_matrix)
TN = confusion_matrix.sum() - (FP + FN + TP)
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# Precision or positive predictive value
PPV = TP / (TP + FP)
# Negative predictive value
NPV = TN / (TN + FN)
# Fall out or false positive rate
FPR = FP / (FP + TN)
# False negative rate
FNR = FN / (TP + FN)
# False discovery rate
FDR = FP / (TP + FP)
# Overall accuracy
ACC = (TP + TN) / (TP + FP + FN + TN)
# ACC_micro = (sum(TP) + sum(TN)) / (sum(TP) + sum(FP) + sum(FN) + sum(TN))
ACC_macro = np.mean(
ACC) # to get a sense of effectiveness of our method on the small classes we computed this average (macro-average)
return ACC_macro, ACC, TPR, TNR, PPV
def batch_data(x, y, batch_size):
shuffle = np.random.permutation(len(x))
start = 0
# from IPython.core.debugger import Tracer; Tracer()()
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
yield x[start:start+batch_size], y[start:start+batch_size]
start += batch_size
def build_network(inputs, dec_inputs,char2numY,n_channels=10,input_depth=280,num_units=128,max_time=10,bidirectional=False):
_inputs = tf.reshape(inputs, [-1, n_channels, input_depth / n_channels])
# _inputs = tf.reshape(inputs, [-1,input_depth,n_channels])
# #(batch*max_time, 280, 1) --> (N, 280, 18)
conv1 = tf.layers.conv1d(inputs=_inputs, filters=32, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
max_pool_1 = tf.layers.max_pooling1d(inputs=conv1, pool_size=2, strides=2, padding='same')
conv2 = tf.layers.conv1d(inputs=max_pool_1, filters=64, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
max_pool_2 = tf.layers.max_pooling1d(inputs=conv2, pool_size=2, strides=2, padding='same')
conv3 = tf.layers.conv1d(inputs=max_pool_2, filters=128, kernel_size=2, strides=1,
padding='same', activation=tf.nn.relu)
shape = conv3.get_shape().as_list()
data_input_embed = tf.reshape(conv3, (-1, max_time,shape[1]*shape[2]))
# timesteps = max_time
#
# lstm_in = tf.unstack(data_input_embed, timesteps, 1)
# lstm_size = 128
# # Get lstm cell output
# # Add LSTM layers
# lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# data_input_embed, states = tf.contrib.rnn.static_rnn(lstm_cell, lstm_in, dtype=tf.float32)
# data_input_embed = tf.stack(data_input_embed, 1)
# shape = data_input_embed.get_shape().as_list()
embed_size = 10 #128 lstm_size # shape[1]*shape[2]
# Embedding layers
output_embedding = tf.Variable(tf.random_uniform((len(char2numY), embed_size), -1.0, 1.0), name='dec_embedding')
data_output_embed = tf.nn.embedding_lookup(output_embedding, dec_inputs)
with tf.variable_scope("encoding") as encoding_scope:
if not bidirectional:
# Regular approach with LSTM units
lstm_enc = tf.contrib.rnn.LSTMCell(num_units)
_, last_state = tf.nn.dynamic_rnn(lstm_enc, inputs=data_input_embed, dtype=tf.float32)
else:
# Using a bidirectional LSTM architecture instead
enc_fw_cell = tf.contrib.rnn.LSTMCell(num_units)
enc_bw_cell = tf.contrib.rnn.LSTMCell(num_units)
((enc_fw_out, enc_bw_out), (enc_fw_final, enc_bw_final)) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=enc_fw_cell,
cell_bw=enc_bw_cell,
inputs=data_input_embed,
dtype=tf.float32)
enc_fin_c = tf.concat((enc_fw_final.c, enc_bw_final.c), 1)
enc_fin_h = tf.concat((enc_fw_final.h, enc_bw_final.h), 1)
last_state = tf.contrib.rnn.LSTMStateTuple(c=enc_fin_c, h=enc_fin_h)
with tf.variable_scope("decoding") as decoding_scope:
if not bidirectional:
lstm_dec = tf.contrib.rnn.LSTMCell(num_units)
else:
lstm_dec = tf.contrib.rnn.LSTMCell(2 * num_units)
dec_outputs, _ = tf.nn.dynamic_rnn(lstm_dec, inputs=data_output_embed, initial_state=last_state)
logits = tf.layers.dense(dec_outputs, units=len(char2numY), use_bias=True)
return logits
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--max_time', type=int, default=10)
parser.add_argument('--test_steps', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=20)
parser.add_argument('--data_dir', type=str, default='data/s2s_mitbih_aami_DS1DS2')
parser.add_argument('--bidirectional', type=str2bool, default=str2bool('False'))
# parser.add_argument('--lstm_layers', type=int, default=2)
parser.add_argument('--num_units', type=int, default=128)
parser.add_argument('--n_oversampling', type=int, default=6000)
parser.add_argument('--checkpoint_dir', type=str, default='checkpoints-seq2seq_DS1DS2')
parser.add_argument('--ckpt_name', type=str, default='seq2seq_mitbih_DS1DS2.ckpt')
parser.add_argument('--classes', nargs='+', type=chr,
default=['N', 'S','V'])
args = parser.parse_args()
run_program(args)
def run_program(args):
print(args)
max_time = args.max_time # 5 3 second best 10# 40 # 100
epochs = args.epochs # 300
batch_size = args.batch_size # 10
num_units = args.num_units
bidirectional = args.bidirectional
# lstm_layers = args.lstm_layers
n_oversampling = args.n_oversampling
checkpoint_dir = args.checkpoint_dir
ckpt_name = args.ckpt_name
test_steps = args.test_steps
classes= args.classes # ['N', 'S','V']
filename = args.data_dir
X_train, y_train = read_mitbih(filename, max_time, classes=classes, max_nlabel=50000,trainset=1)
X_test, y_test = read_mitbih(filename, max_time, classes=classes, max_nlabel=50000,trainset=0)
input_depth = X_train.shape[2]
n_channels = 10
print ("# of sequences: ", len(X_train))
classes = np.unique(y_train)
char2numY = dict(zip(classes, range(len(classes))))
n_classes = len(classes)
print ('Classes (training): ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(y_train.flatten() == cl)[0]))
print ('Classes (test): ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(y_test.flatten() == cl)[0]))
char2numY['<GO>'] = len(char2numY)
num2charY = dict(zip(char2numY.values(), char2numY.keys()))
y_train = [[char2numY['<GO>']] + [char2numY[y_] for y_ in date] for date in y_train]
y_test = [[char2numY['<GO>']] + [char2numY[y_] for y_ in date] for date in y_test]
y_test = np.asarray(y_test)
y_train = np.array(y_train)
x_seq_length = len(X_train[0])
y_seq_length = len(y_train[0]) - 1
# Placeholders
inputs = tf.placeholder(tf.float32, [None, max_time, input_depth], name='inputs')
targets = tf.placeholder(tf.int32, (None, None), 'targets')
dec_inputs = tf.placeholder(tf.int32, (None, None), 'output')
logits = build_network(inputs, dec_inputs, char2numY, n_channels=n_channels, input_depth=input_depth, num_units=num_units, max_time=max_time,
bidirectional=bidirectional)
# decoder_prediction = tf.argmax(logits, 2)
# confusion = tf.confusion_matrix(labels=tf.argmax(targets, 1), predictions=tf.argmax(logits, 2), num_classes=len(char2numY) - 1)# it is wrong
# mean_accuracy,update_mean_accuracy = tf.metrics.mean_per_class_accuracy(labels=targets, predictions=decoder_prediction, num_classes=len(char2numY) - 1)
with tf.name_scope("optimization"):
# Loss function
vars = tf.trainable_variables()
beta = 0.001
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vars
if 'bias' not in v.name]) * beta
loss = tf.contrib.seq2seq.sequence_loss(logits, targets, tf.ones([batch_size, y_seq_length]))
# Optimizer
loss = tf.reduce_mean(loss + lossL2)
optimizer = tf.train.RMSPropOptimizer(1e-3).minimize(loss)
# train the graph
# over-sampling: SMOTE
X_train = np.reshape(X_train,[X_train.shape[0]*X_train.shape[1],-1])
y_train= y_train[:,1:].flatten()
nums = []
for cl in classes:
ind = np.where(classes == cl)[0][0]
nums.append(len(np.where(y_train.flatten()==ind)[0]))
# ratio={0:nums[0],1:nums[0],2:nums[0]}
# ratio={0:7000,1:nums[1],2:7000,3:7000}
ratio={0:nums[0],1:n_oversampling+1000,2:n_oversampling}
sm = SMOTE(random_state=12,ratio=ratio)
X_train, y_train = sm.fit_sample(X_train, y_train)
X_train = X_train[:(X_train.shape[0]/max_time)*max_time,:]
y_train = y_train[:(X_train.shape[0]/max_time)*max_time]
X_train = np.reshape(X_train,[-1,X_test.shape[1],X_test.shape[2]])
y_train = np.reshape(y_train,[-1,y_test.shape[1]-1,])
y_train= [[char2numY['<GO>']] + [y_ for y_ in date] for date in y_train]
y_train = np.array(y_train)
print ('Classes in the training set: ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(y_train.flatten()==ind)[0]))
print ("------------------y_train samples--------------------")
for ii in range(2):
print(''.join([num2charY[y_] for y_ in list(y_train[ii+5])]))
print ('Classes in the training set: ', classes)
for cl in classes:
ind = np.where(classes == cl)[0][0]
print (cl, len(np.where(y_test.flatten()==ind)[0]))
print ("------------------y_test samples--------------------")
for ii in range(2):
print(''.join([num2charY[y_] for y_ in list(y_test[ii+5])]))
def test_model():
# source_batch, target_batch = next(batch_data(X_test, y_test, batch_size))
acc_track = []
sum_test_conf = []
for batch_i, (source_batch, target_batch) in enumerate(batch_data(X_test, y_test, batch_size)):
dec_input = np.zeros((len(source_batch), 1)) + char2numY['<GO>']
for i in range(y_seq_length):
batch_logits = sess.run(logits,
feed_dict={inputs: source_batch, dec_inputs: dec_input})
prediction = batch_logits[:, -1].argmax(axis=-1)
dec_input = np.hstack([dec_input, prediction[:, None]])
# acc_track.append(np.mean(dec_input == target_batch))
acc_track.append(dec_input[:, 1:] == target_batch[:, 1:])
y_true= target_batch[:, 1:].flatten()
y_pred = dec_input[:, 1:].flatten()
sum_test_conf.append(confusion_matrix(y_true, y_pred,labels=range(len(char2numY)-1)))
sum_test_conf= np.mean(np.array(sum_test_conf, dtype=np.float32), axis=0)
# print('Accuracy on test set is: {:>6.4f}'.format(np.mean(acc_track)))
# mean_p_class, accuracy_classes = sess.run([mean_accuracy, update_mean_accuracy],
# feed_dict={inputs: source_batch,
# dec_inputs: dec_input[:, :-1],
# targets: target_batch[:, 1:]})
# print (mean_p_class)
# print (accuracy_classes)
acc_avg, acc, sensitivity, specificity, PPV = evaluate_metrics(sum_test_conf)
print('Average Accuracy is: {:>6.4f} on test set'.format(acc_avg))
for index_ in range(n_classes):
print("\t{} rhythm -> Sensitivity: {:1.4f}, Specificity : {:1.4f}, Precision (PPV) : {:1.4f}, Accuracy : {:1.4f}".format(
classes[index_],
sensitivity[
index_],
specificity[
index_], PPV[index_],
acc[index_]))
print("\t Average -> Sensitivity: {:1.4f}, Specificity : {:1.4f}, Precision (PPV) : {:1.4f}, Accuracy : {:1.4f}".format(
np.mean(sensitivity), np.mean(specificity), np.mean(PPV), np.mean(acc)))
return acc_avg, acc, sensitivity, specificity, PPV
loss_track = []
def count_prameters():
print ('# of Params: ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
count_prameters()
if (os.path.exists(checkpoint_dir) == False):
os.mkdir(checkpoint_dir)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver = tf.train.Saver()
print(str(datetime.now()))
pre_acc_avg = 0.0
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# # Restore
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
# saver.restore(session, os.path.join(checkpoint_dir, ckpt_name))
saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir))
# or 'load meta graph' and restore weights
# saver = tf.train.import_meta_graph(ckpt_name+".meta")
# saver.restore(session,tf.train.latest_checkpoint(checkpoint_dir))
test_model()
else:
for epoch_i in range(epochs):
start_time = time.time()
train_acc = []
for batch_i, (source_batch, target_batch) in enumerate(batch_data(X_train, y_train, batch_size)):
_, batch_loss, batch_logits = sess.run([optimizer, loss, logits],
feed_dict = {inputs: source_batch,
dec_inputs: target_batch[:, :-1],
targets: target_batch[:, 1:]})
loss_track.append(batch_loss)
train_acc.append(batch_logits.argmax(axis=-1) == target_batch[:,1:])
accuracy = np.mean(train_acc)
print('Epoch {:3} Loss: {:>6.3f} Accuracy: {:>6.4f} Epoch duration: {:>6.3f}s'.format(epoch_i, batch_loss,
accuracy, time.time() - start_time))
if epoch_i%test_steps==0:
acc_avg, acc, sensitivity, specificity, PPV= test_model()
print('loss {:.4f} after {} epochs (batch_size={})'.format(loss_track[-1], epoch_i + 1, batch_size))
save_path = os.path.join(checkpoint_dir, ckpt_name)
saver.save(sess, save_path)
print("Model saved in path: %s" % save_path)
# if np.nan_to_num(acc_avg) > pre_acc_avg: # save the better model based on the f1 score
# print('loss {:.4f} after {} epochs (batch_size={})'.format(loss_track[-1], epoch_i + 1, batch_size))
# pre_acc_avg = acc_avg
# save_path =os.path.join(checkpoint_dir, ckpt_name)
# saver.save(sess, save_path)
# print("The best model (till now) saved in path: %s" % save_path)
plt.plot(loss_track)
plt.show()
print(str(datetime.now()))
# test_model()
if __name__ == '__main__':
main()
| 18,958 | 41.508969 | 157 | py |
robust-selection | robust-selection-main/setup.py | from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
with open("README.md", "r") as fh:
long_description = fh.read()
# inject numpy headers
class build_ext_robsel(build_ext):
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# `__builtins__` can be a dict
# see https://docs.python.org/2/reference/executionmodel.html
if isinstance(__builtins__, dict):
__builtins__['__NUMPY_SETUP__'] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(
name="robust-selection",
version="0.0.8",
author="Chau Tran",
author_email="[email protected]",
cmdclass={'build_ext': build_ext_robsel},
description="Distributionally Robust Formulation and Model Selection for the Graphical Lasso",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dddlab/robust-selection",
packages=['robsel'],
package_dir={'robsel':'robsel'},
python_requires='>=3.6',
install_requires=["numpy >= 1.15",
"scikit-learn >= 0.22.1"],
setup_requires=["numpy >= 1.15",
"scikit-learn >= 0.22.1"],
)
| 1,398 | 32.309524 | 98 | py |
robust-selection | robust-selection-main/robsel/robsel.py | import numpy as np
from sklearn.utils import resample
def RWP(X, orig_cov, with_diag=False):
"""
Robust Wasserstein Profile function.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate from bootrap sample.
orig_cov: ndarray of shape (n_features, n_features)
The covariance matrix of the variables from original data.
with_diagonal : bool, default=False
Whether or not to include diagonal when compute RWP function.
Returns
-------
rwp : float
rwp.
"""
n = X.shape[0]
p = X.shape[1]
X_bootstrap = resample(X, replace=True, n_samples=n)
A_s = np.cov(X_bootstrap,rowvar=False)
if with_diag:
A_s = A_s[np.tril_indices(p)]
else:
A_s = A_s[np.tril_indices(p,-1)]
return np.linalg.norm(A_s - orig_cov, ord=np.inf)
def RobustSelection(X, alpha, B=200, with_diag=False):
"""
Robust Selection algorithm for estimation of the regularization parameter.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Data from which to compute the covariance estimate
alpha : float or array_like
The confidence level: the higher alpha, the lower the order statistics,
the smaller regularization parameter.
Range is (0, 1).
B : int, default=200
Number of bootstrap samples such that (B)(1-alpha) is
also an integer.
with_diagonal : bool, default=False
Whether or not to include diagonal when compute RWP function.
Returns
-------
lambda : array_type or float
The estimated regularization parameter.
"""
p = X.shape[1]
A_n = np.cov(X, rowvar=False)
if with_diag:
A_n = A_n[np.tril_indices(p)]
else:
A_n = A_n[np.tril_indices(p,-1)]
R_vec = np.zeros(B)
for i in range(B):
R_vec[i] = RWP(X, A_n, with_diag)
R_vec = np.sort(R_vec)
index = (B)*(1-alpha) - 1
index = np.array(index)
return R_vec[(index).astype(int)]
| 2,074 | 28.642857 | 80 | py |
robust-selection | robust-selection-main/robsel/__init__.py | from . import robsel
from .robsel import * | 42 | 20.5 | 21 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/volume_plots_py.py | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
X, Y, Z = np.mgrid[0:2*np.pi:16j, 0:2*np.pi:16j, 0:2*np.pi:16j]
values = np.sin(X) * np.cos(Z) * np.sin(Y)
m_phys = ["phys_inf_W2ab_theta_po_liv_Pi", "phys_inf_Wab_theta_po_liv_Pi",
"phys_inf_Wliu_theta_po_liv_Pi", "phys_inf_theta_po_liv_Pi"]
m_nns = ["node_norm_theta_liv_Pi", "nnsum2_norm_theta_liv_Pi", "rot_inv_theta_liv_Pi",
"eos_nn_theta_alpha_beta_liv_Pi", "grad_p_theta_alpha_beta_liv_Pi"]
# method=m_phys[0]
# method=m_nns[4]
# method="dns"
t = 1
f1 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
t2 = 70
f2 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
t3 = 3*70
f3 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
t4 = 5*70
f4 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
t5 = 7*70
f5 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
def plot_volume_over_t(op, f1, f2, f3, f4, f5):
win = 0.2
fig = make_subplots(
rows=1, cols=5,
column_widths=[0.4, 0.4, 0.4, 0.4, 0.4],
specs=[[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}]])
# subplot_titles=("t = 0(s)", "t = 2.8(s)", "t = 8.4(s)", "t = 14.0(s)", "t = 19.6(s)"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f2.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f3.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f3),
# isomax=np.max(f3),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f4.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=1, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f5.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=1, col=5)
h = 500
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=5*h,
height=h,
margin=dict(
l=20,
r=20,
b=15,
t=15,
pad=26
),
font=dict(
size=14,
color="Black"
)
)
fig.update_layout(scene = dict(
xaxis_title=r'x',
yaxis_title=r'y',
zaxis_title=r'DNS: Truth'))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
fig.update_layout(scene3_xaxis_showticklabels=False,
scene3_yaxis_showticklabels=False,
scene3_zaxis_showticklabels=False)
fig.update_layout(scene4_xaxis_showticklabels=False,
scene4_yaxis_showticklabels=False,
scene4_zaxis_showticklabels=False)
fig.update_layout(scene5_xaxis_showticklabels=False,
scene5_yaxis_showticklabels=False,
scene5_zaxis_showticklabels=False)
fig.update_annotations(font_size=28)
fig.write_image(f"volume_figures/vf_u_" + method + ".png")
plot_volume_over_t(0.2, f1, f2, f3, f4, f5)
def plot_volume_field(field_data):
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=field_data.flatten(),
# isomin=np.min(field_data),
# isomax=np.max(field_data),
isomin=-0.18,
isomax=0.18,
opacity=0.3, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='RdBu'
))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(width=550, margin=dict(r=10, l=10, b=10, t=10))
fig.write_image(f"volume_figures/vf_u_t{t}_" + method + ".png")
# fig.show()
# plot_volume_field(vf_u1)
# def plot_volume_over_t(field1, field2):
# fig = make_subplots(
# rows=2, cols=2,
# specs=[[{'type': 'volume'}, {'type': 'volume'}],
# [{'type': 'volume'}, {'type': 'volume'}]])
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field1.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# showscale=False,
# colorscale='RdBu'
# )), row=1, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=1, col=2)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=2)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field1.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # showscale=False,
# # colorscale='RdBu')),
# # row=1, col=1)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field2.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # colorscale='RdBu')),
# # row=1, col=2)
# fig.update_layout(height=600, width=1200, title_text="Side By Side Subplots")
# fig.write_image(f"volume_figures/vf_u_" + method + ".png")
# #this does not work
# # plot_volume_over_t(vf_u1, vf_u2)
# def plot_iso_surface(field_data):
# fig = go.Figure(data=go.Isosurface(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field_data.flatten(),
# isomin=-0.17,
# isomax=0.17,
# colorscale='RdBu',
# caps=dict(x_show=False, y_show=False)
# ))
# fig.show()
# # plot_iso_surface(vf_u)
# # fig = go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=values.flatten(),
# # isomin=-0.1,
# # isomax=0.9,
# # opacity=1.0, # needs to be small to see through all surfaces
# # surface_count=30, # needs to be a large number for good volume rendering
# # colorscale='RdBu'
# # ))
# # fig.update_layout(scene_xaxis_showticklabels=False,
# # scene_yaxis_showticklabels=False,
# # scene_zaxis_showticklabels=False)
# # fig.show()
| 9,569 | 29.477707 | 113 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/volume_plots_t20_lf_sequence_pngs.py | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
X, Y, Z = np.mgrid[0:2*np.pi:16j, 0:2*np.pi:16j, 0:2*np.pi:16j]
#methods and file names
m_phys = ["phys_inf_W2ab_theta_po_liv_Pi", "phys_inf_Wab_theta_po_liv_Pi",
"phys_inf_Wliu_theta_po_liv_Pi", "phys_inf_theta_po_liv_Pi"]
m_nns = ["node_norm_theta_liv_Pi", "nnsum2_norm_theta_liv_Pi", "rot_inv_theta_liv_Pi",
"eos_nn_theta_alpha_beta_liv_Pi", "grad_p_theta_alpha_beta_liv_Pi"]
def plot_volume_field_at_t(m_, f1, t):
op = 0.2
fig = make_subplots(
rows=1, cols=1,
column_widths=[1],
specs=[[{'type': 'volume'}]])
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=-0.2,
isomax=0.2,
opacity=op, # needs to be small to see through all surfaces
surface_count=80, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
h = 400
fig.update_layout(
autosize=False,
width=h,
height=h,
margin=dict(
l=1,
r=1,
b=1,
t=1,
pad=1
),
font=dict(
size=1,
color="Black"
)
)
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
# camera = dict(eye=dict(x=6, y=5, z=4))
camera = dict(eye=dict(x=1.32, y=1.32, z=1.1)) #looks best
fig.update_layout(scene_camera=camera)
fig.update_annotations(font_size=32)
fig.write_image(f"volume_figures_seq/vf_u_" + m_ + "_" + t + ".png")
def plot_volume_field_t(field_data, method, t):
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=field_data.flatten(),
isomin=-0.12,
isomax=0.12,
opacity=0.2, # needs to be small to see through all surfaces
surface_count=100, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet'
))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
h=400
fig.update_layout(
autosize=False,
width=h,
height=h,
margin=dict(
l=0,
r=0,
b=0,
t=0,
pad=0
),
font=dict(
size=1,
color="Black"
)
)
camera = dict(eye=dict(x=1.32, y=1.32, z=1.1)) #looks best
fig.update_layout(scene_camera=camera)
fig.write_image(f"volume_figures_seq/avgx_V/"+str(t).zfill(3) + "avgx_V_" + method + ".png")
def obtain_plots_over_all_t(method_in):
t_frame = np.arange(2, 500, 2)
for t in t_frame:
print("saving fig at t = ", t)
t_in = str(t)
f = np.load(f"./field_data_snapshots/avgx_V_t"+t_in+"_"+method_in+".npy")
print("max f = ", np.max(f))
plot_volume_field_t(f, method_in, t)
# obtain_plots_over_all_t("dns")
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
def plot_volume_over_mphys_t(t, op, f1, f2, f3, f4):
win = 0.2
sc = 90
fig = make_subplots(
rows=1, cols=4,
column_widths=[0.25, 0.25, 0.25, 0.25],
specs=[[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}]],
# subplot_titles=("DNS", "SPH-W2", "EoS", "Grad P"))
# subplot_titles=("DNS", "Rot-Inv", "NN Sum", "NODE"))
subplot_titles=("DNS", "SPH-W2", "SPH-W1", "SPH-Wq"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f2.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f3.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f4.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=1, col=4)
h = 350
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=4*h,
height=h,
margin=dict(
l=1,
r=1,
b=1,
t=34,
# t = 34,
pad=1
),
font=dict(
size=1,
color="Black"
)
)
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
fig.update_layout(scene3_xaxis_showticklabels=False,
scene3_yaxis_showticklabels=False,
scene3_zaxis_showticklabels=False)
fig.update_layout(scene4_xaxis_showticklabels=False,
scene4_yaxis_showticklabels=False,
scene4_zaxis_showticklabels=False)
# camera = dict(eye=dict(x=5, y=4, z=3.5))
camera = dict(eye=dict(x=1.32, y=1.22, z=1.1)) #looks best
fig.update_layout(scene_camera=camera)
fig.update_layout(scene2_camera=camera)
fig.update_layout(scene3_camera=camera)
fig.update_layout(scene4_camera=camera)
fig.update_annotations(font_size=22)
# fig.write_image(f"volume_figures_seq/all_m/phys/"+str(t).zfill(3) +"vf_u_physm_t.png")
fig.write_image(f"volume_figures_seq/all_m/sph/"+str(t).zfill(3) +"vf_u_physm_t.png")
def obtain_plots_over_mphys_and_t():
t_frame = np.arange(422, 500, 2)
for t in t_frame:
print("saving fig at t = ", t)
t_in = str(t)
f1 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_dns.npy")
f2 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_phys_inf_W2ab_theta_po_liv_Pi.npy")
f3 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_eos_nn_theta_alpha_beta_liv_Pi.npy")
f4 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_grad_p_theta_alpha_beta_liv_Pi.npy")
# f5 = np.load(f"./field_data_snapshots/avgx_V_t"+t_in+"_rot_inv_theta_liv_Pi.npy")
# f6 = np.load(f"./field_data_snapshots/avgx_V_t"+t_in+"_node_norm_theta_liv_Pi.npy")
plot_volume_over_mphys_t(t, 0.2, f1, f2, f3, f4)
# obtain_plots_over_mphys_and_t()
def obtain_plots_over_msph_and_t():
t_frame = np.arange(2, 502, 2)
for t in t_frame:
print("saving fig at t = ", t)
t_in = str(t)
f1 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_dns.npy")
f2 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_phys_inf_W2ab_theta_po_liv_Pi.npy")
f3 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_phys_inf_Wab_theta_po_liv_Pi.npy")
f4 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_phys_inf_Wliu_theta_po_liv_Pi.npy")
# f5 = np.load(f"./field_data_snapshots/avgx_V_t"+t_in+"_rot_inv_theta_liv_Pi.npy")
# f6 = np.load(f"./field_data_snapshots/avgx_V_t"+t_in+"_node_norm_theta_liv_Pi.npy")
plot_volume_over_mphys_t(t, 0.2, f1, f2, f3, f4)
obtain_plots_over_msph_and_t()
def obtain_plots_over_mnns_and_t():
t_frame = np.arange(446, 500, 2)
for t in t_frame:
print("saving fig at t = ", t)
t_in = str(t)
f1 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_dns.npy")
# f2 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_phys_inf_W2ab_theta_po_liv_Pi.npy")
# f3 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_eos_nn_theta_alpha_beta_liv_Pi.npy")
# f4 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_grad_p_theta_alpha_beta_liv_Pi.npy")
f2 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_rot_inv_theta_liv_Pi.npy")
f3 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_nnsum2_norm_theta_liv_Pi.npy")
f4 = np.load(f"./field_data_snapshots/vf_u_t"+t_in+"_node_norm_theta_liv_Pi.npy")
plot_volume_over_mphys_t(t, 0.2, f1, f2, f3, f4)
# obtain_plots_over_mnns_and_t() | 9,697 | 31.763514 | 99 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/volume_plots_t20_convergence.py | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
X, Y, Z = np.mgrid[0:2*np.pi:16j, 0:2*np.pi:16j, 0:2*np.pi:16j]
#methods and file names
m_phys = ["phys_inf_W2ab_theta_po_liv_Pi", "phys_inf_Wab_theta_po_liv_Pi",
"phys_inf_Wliu_theta_po_liv_Pi", "phys_inf_theta_po_liv_Pi"]
m_nns = ["node_norm_theta_liv_Pi", "nnsum2_norm_theta_liv_Pi", "rot_inv_theta_liv_Pi",
"eos_nn_theta_alpha_beta_liv_Pi", "grad_p_theta_alpha_beta_liv_Pi"]
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
#======================================================
def plot_volume_over_mphys_t(t, op, f1, f2):
win = 0.2
sc = 90
fig = make_subplots(
rows=1, cols=2,
column_widths=[0.5, 0.5],
specs=[[{'type': 'volume'}, {'type': 'volume'}]],
subplot_titles=("DNS", "SPH-W2"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f2.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=sc, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
h = 350
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=2*h,
height=h,
margin=dict(
l=1,
r=1,
b=1,
t=34,
pad=1
),
font=dict(
size=1,
color="Black"
)
)
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
# camera = dict(eye=dict(x=5, y=4, z=3.5))
camera = dict(eye=dict(x=1.32, y=1.22, z=1.1)) #looks best
fig.update_layout(scene_camera=camera)
fig.update_layout(scene2_camera=camera)
fig.update_annotations(font_size=22)
fig.write_image(f"volume_figures_conv_70/"+str(t).zfill(3) +"vf_u_physm_conv.png")
def obtain_plots_over_mphys_and_t():
idx_n = np.arange(20, 41, 1)
for i in idx_n:
print("saving fig at i = ", i)
i_in = str(i)
f1 = np.load(f"./vf_u_t70_dns.npy")
f2 = np.load(f"./3d_conv_data/vf_u_t70_phys_inf_W2ab_theta_po_liv_Pi_itr"+i_in+".npy")
plot_volume_over_mphys_t(i, 0.2, f1, f2)
obtain_plots_over_mphys_and_t() | 3,315 | 28.345133 | 94 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/animate.py | import os
def save():
os.system("ffmpeg -framerate 16 -pattern_type glob -i '*.png' -c:v libx264 -pix_fmt yuv420p u_over_t.mp4")
save() | 142 | 19.428571 | 110 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/volume_plots_py_t50_lf.py | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
X, Y, Z = np.mgrid[0:2*np.pi:16j, 0:2*np.pi:16j, 0:2*np.pi:16j]
values = np.sin(X) * np.cos(Z) * np.sin(Y)
m_phys = ["phys_inf_W2ab_theta_po_liv_Pi", "phys_inf_Wab_theta_po_liv_Pi",
"phys_inf_Wliu_theta_po_liv_Pi", "phys_inf_theta_po_liv_Pi"]
m_nns = ["node_norm_theta_liv_Pi", "nnsum2_norm_theta_liv_Pi", "rot_inv_theta_liv_Pi",
"eos_nn_theta_alpha_beta_liv_Pi", "grad_p_theta_alpha_beta_liv_Pi"]
# method=m_phys[0]
method=m_nns[2]
# method="dns"
t = 1
f1 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
t2 = 70
f2 = np.load(f"./field_data_snapshots_t50/vf_u_t{t2}_" + method + ".npy")
t3 = 2*70
f3 = np.load(f"./field_data_snapshots_t50/vf_u_t{t3}_" + method + ".npy")
t4 = 4*70
f4 = np.load(f"./field_data_snapshots_t50/vf_u_t{t4}_" + method + ".npy")
t5 = 6*70
f5 = np.load(f"./field_data_snapshots_t50/vf_u_t{t5}_" + method + ".npy")
def plot_volume_over_t(op, f1, f2, f3, f4, f5):
win = 0.2
fig = make_subplots(
rows=1, cols=5,
column_widths=[0.4, 0.4, 0.4, 0.4, 0.4],
specs=[[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}]])
# subplot_titles=("t = 0(s)", "t = 2.8(s)", "t = 8.4(s)", "t = 14.0(s)", "t = 19.6(s)"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f2.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f3.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f3),
# isomax=np.max(f3),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f4.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=1, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f5.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=1, col=5)
h = 500
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=5*h,
height=h,
margin=dict(
l=20,
r=20,
b=15,
t=15,
pad=26
),
font=dict(
size=14,
color="Black"
)
)
fig.update_layout(scene = dict(
xaxis_title=r'x',
yaxis_title=r'y',
zaxis_title=r'DNS: Truth'))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
fig.update_layout(scene3_xaxis_showticklabels=False,
scene3_yaxis_showticklabels=False,
scene3_zaxis_showticklabels=False)
fig.update_layout(scene4_xaxis_showticklabels=False,
scene4_yaxis_showticklabels=False,
scene4_zaxis_showticklabels=False)
fig.update_layout(scene5_xaxis_showticklabels=False,
scene5_yaxis_showticklabels=False,
scene5_zaxis_showticklabels=False)
fig.update_annotations(font_size=28)
fig.write_image(f"volume_figures_t50/vf_u_" + method + ".png")
plot_volume_over_t(0.2, f1, f2, f3, f4, f5)
def plot_volume_field(field_data):
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=field_data.flatten(),
# isomin=np.min(field_data),
# isomax=np.max(field_data),
isomin=-0.18,
isomax=0.18,
opacity=0.3, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='RdBu'
))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(width=550, margin=dict(r=10, l=10, b=10, t=10))
fig.write_image(f"volume_figures/vf_u_t{t}_" + method + ".png")
# fig.show()
# plot_volume_field(vf_u1)
# def plot_volume_over_t(field1, field2):
# fig = make_subplots(
# rows=2, cols=2,
# specs=[[{'type': 'volume'}, {'type': 'volume'}],
# [{'type': 'volume'}, {'type': 'volume'}]])
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field1.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# showscale=False,
# colorscale='RdBu'
# )), row=1, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=1, col=2)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=2)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field1.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # showscale=False,
# # colorscale='RdBu')),
# # row=1, col=1)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field2.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # colorscale='RdBu')),
# # row=1, col=2)
# fig.update_layout(height=600, width=1200, title_text="Side By Side Subplots")
# fig.write_image(f"volume_figures/vf_u_" + method + ".png")
# #this does not work
# # plot_volume_over_t(vf_u1, vf_u2)
# def plot_iso_surface(field_data):
# fig = go.Figure(data=go.Isosurface(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field_data.flatten(),
# isomin=-0.17,
# isomax=0.17,
# colorscale='RdBu',
# caps=dict(x_show=False, y_show=False)
# ))
# fig.show()
# # plot_iso_surface(vf_u)
# # fig = go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=values.flatten(),
# # isomin=-0.1,
# # isomax=0.9,
# # opacity=1.0, # needs to be small to see through all surfaces
# # surface_count=30, # needs to be a large number for good volume rendering
# # colorscale='RdBu'
# # ))
# # fig.update_layout(scene_xaxis_showticklabels=False,
# # scene_yaxis_showticklabels=False,
# # scene_zaxis_showticklabels=False)
# # fig.show()
| 9,587 | 29.535032 | 113 | py |
LearningSPH | LearningSPH-main/learning_dns_data_Re80/hierarchy_post_process/volume_plots_all.py | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
X, Y, Z = np.mgrid[0:2*np.pi:16j, 0:2*np.pi:16j, 0:2*np.pi:16j]
values = np.sin(X) * np.cos(Z) * np.sin(Y)
m_phys = ["phys_inf_W2ab_theta_po_liv_Pi", "phys_inf_Wab_theta_po_liv_Pi",
"phys_inf_Wliu_theta_po_liv_Pi", "phys_inf_theta_po_liv_Pi"]
m_nns = ["node_norm_theta_liv_Pi", "nnsum2_norm_theta_liv_Pi", "rot_inv_theta_liv_Pi",
"eos_nn_theta_alpha_beta_liv_Pi", "grad_p_theta_alpha_beta_liv_Pi"]
# method=m_phys[0]
# method=m_nns[4]
t = 1
method="dns"
f11 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_phys[1]
# f21 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_nns[4]
# f31 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_nns[5]
# f41 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_nns[3]
# f51 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_nns[2]
# f61 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
# method=m_nns[1]
# f71 = np.load(f"./field_data_snapshots/vf_u_t{t}_" + method + ".npy")
t2 = 70
method="dns"
f12 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_phys[0]
f22 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_nns[4-1]
f32 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_nns[5-1]
f42 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_nns[3-1]
f52 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_nns[2-1]
f62 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
method=m_nns[1-1]
f72 = np.load(f"./field_data_snapshots/vf_u_t{t2}_" + method + ".npy")
t3 = 2*70
method="dns"
f13 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_phys[1-1]
f23 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_nns[4-1]
f33 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_nns[5-1]
f43 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_nns[3-1]
f53 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_nns[2-1]
f63 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
method=m_nns[1-1]
f73 = np.load(f"./field_data_snapshots/vf_u_t{t3}_" + method + ".npy")
t4 = 4*70
method="dns"
f14 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_phys[1-1]
f24 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_nns[4-1]
f34 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_nns[5-1]
f44 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_nns[3-1]
f54 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_nns[2-1]
f64 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
method=m_nns[1-1]
f74 = np.load(f"./field_data_snapshots/vf_u_t{t4}_" + method + ".npy")
t5 = 6*70
method="dns"
f15 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_phys[1-1]
f25 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_nns[4-1]
f35 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_nns[5-1]
f45 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_nns[3-1]
f55 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_nns[2-1]
f65 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
method=m_nns[1-1]
f75 = np.load(f"./field_data_snapshots/vf_u_t{t5}_" + method + ".npy")
def plot_volume_over_t(op, f1, f2, f3, f4, f5):
win = 0.2
fig = make_subplots(
rows=1, cols=5,
column_widths=[0.4, 0.4, 0.4, 0.4, 0.4],
specs=[[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}]])#,
# subplot_titles=("t = 0(s)", "t = 2.8(s)", "t = 5.6(s)", "t = 11.2(s)", "t = 16.8(s)"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f1.flatten(),
isomin=np.min(f1),
isomax=np.max(f1),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f2.flatten(),
# isomin=-win,
# isomax=win,
isomin=np.min(f2),
isomax=np.max(f2),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f3.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f3),
# isomax=np.max(f3),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f4.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=1, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f5.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=1, col=5)
h = 450
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=5*h,
height=h,
margin=dict(
l=20,
r=20,
b=20,
t=28,
pad=26
),
font=dict(
size=14,
color="Black"
)
)
fig.update_layout(scene = dict(
xaxis_title=r'x',
yaxis_title=r'y',
zaxis_title=r'DNS: Truth'))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
fig.update_layout(scene3_xaxis_showticklabels=False,
scene3_yaxis_showticklabels=False,
scene3_zaxis_showticklabels=False)
fig.update_layout(scene4_xaxis_showticklabels=False,
scene4_yaxis_showticklabels=False,
scene4_zaxis_showticklabels=False)
fig.update_layout(scene5_xaxis_showticklabels=False,
scene5_yaxis_showticklabels=False,
scene5_zaxis_showticklabels=False)
fig.update_annotations(font_size=28)
fig.write_image(f"volume_figures/vf_u_" + method + ".png")
# plot_volume_over_t(0.2, vf_u1, vf_u2, vf_u3, vf_u4, vf_u5)
def plot_volume_over_all_t(op, f11, f12, f13, f14, f15,
f21, f22, f23, f24, f25,
f31, f32, f33, f34, f35,
f41, f42, f43, f44, f45,
f51, f52, f53, f54, f55,
f61, f62, f63, f64, f65,
f71, f72, f73, f74, f75):
win = 0.2
fig = make_subplots(
rows=7, cols=5,
column_widths=[0.4, 0.4, 0.4, 0.4, 0.4],
specs=[[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],
[{'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}, {'type': 'volume'}],])#,
# subplot_titles=("t = 0(s)", "t = 2.8(s)", "t = 5.6(s)", "t = 11.2(s)", "t = 16.8(s)"))
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f11.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f12.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f13.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f3),
# isomax=np.max(f3),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=1, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f14.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=1, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f15.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=1, col=5)
#======================f2*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f21.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=2, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f22.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=2, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f23.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=2, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f24.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=2, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f25.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=2, col=5)
#======================f3*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f31.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=3, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f32.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=3, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f33.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=3, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f34.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=3, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f35.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=3, col=5)
#======================f4*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f41.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=4, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f42.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=4, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f43.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=4, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f44.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=4, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f45.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=4, col=5)
#======================f5*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f51.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=5, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f52.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=5, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f53.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=5, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f54.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=5, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f55.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=5, col=5)
#======================f6*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f61.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=6, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f62.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=6, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f63.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=6, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f64.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=6, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f65.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=6, col=5)
#======================f7*====================================
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f71.flatten(),
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=7, col=1)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f72.flatten(),
# isomin=-win,
# isomax=win,
isomin=-0.18,
isomax=0.18,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=7, col=2)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f73.flatten(),
isomin=-win,
isomax=win,
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='jet',
), row=7, col=3)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f74.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f4),
# isomax=np.max(f4),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
# colorscale='RdBu',
colorscale='jet'
), row=7, col=4)
fig.add_trace(go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=f75.flatten(),
isomin=-win,
isomax=win,
# isomin=np.min(f5),
# isomax=np.max(f5),
opacity=op, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
colorscale='jet',
), row=7, col=5)
h = 500
fig.update_layout(
# template="plotly_dark",
autosize=False,
width=5*h,
height=7*h,
margin=dict(
l=20,
r=20,
b=20,
t=28,
pad=26
),
font=dict(
size=14,
color="Black"
)
)
fig.update_layout(scene = dict(
xaxis_title=r'x',
yaxis_title=r'y',
zaxis_title=r'Method'))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(scene2_xaxis_showticklabels=False,
scene2_yaxis_showticklabels=False,
scene2_zaxis_showticklabels=False)
fig.update_layout(scene3_xaxis_showticklabels=False,
scene3_yaxis_showticklabels=False,
scene3_zaxis_showticklabels=False)
fig.update_layout(scene4_xaxis_showticklabels=False,
scene4_yaxis_showticklabels=False,
scene4_zaxis_showticklabels=False)
fig.update_layout(scene5_xaxis_showticklabels=False,
scene5_yaxis_showticklabels=False,
scene5_zaxis_showticklabels=False)
fig.update_annotations(font_size=28)
fig.write_image(f"volume_figures/vf_u_all.png")
plot_volume_over_all_t(0.2, f11, f12, f13, f14, f15,
f11, f22, f23, f24, f25,
f11, f32, f33, f34, f35,
f11, f42, f43, f44, f45,
f11, f52, f53, f54, f55,
f11, f62, f63, f64, f65,
f11, f72, f73, f74, f75)
def plot_volume_field(field_data):
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=field_data.flatten(),
# isomin=np.min(field_data),
# isomax=np.max(field_data),
isomin=-0.18,
isomax=0.18,
opacity=0.3, # needs to be small to see through all surfaces
surface_count=60, # needs to be a large number for good volume rendering
showscale=False,
colorscale='RdBu'
))
fig.update_layout(scene_xaxis_showticklabels=False,
scene_yaxis_showticklabels=False,
scene_zaxis_showticklabels=False)
fig.update_layout(width=550, margin=dict(r=10, l=10, b=10, t=10))
fig.write_image(f"volume_figures/vf_u_t{t}_" + method + ".png")
# fig.show()
# plot_volume_field(vf_u1)
# def plot_volume_over_t(field1, field2):
# fig = make_subplots(
# rows=2, cols=2,
# specs=[[{'type': 'volume'}, {'type': 'volume'}],
# [{'type': 'volume'}, {'type': 'volume'}]])
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field1.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# showscale=False,
# colorscale='RdBu'
# )), row=1, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=1, col=2)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=1)
# fig.append_trace(go.Figure(data=go.Volume(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field2.flatten(),
# isomin=-0.18,
# isomax=0.18,
# opacity=0.3, # needs to be small to see through all surfaces
# surface_count=60, # needs to be a large number for good volume rendering
# colorscale='RdBu'
# )), row=2, col=2)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field1.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # showscale=False,
# # colorscale='RdBu')),
# # row=1, col=1)
# # fig.add_trace(go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=field2.flatten(),
# # isomin=-0.18,
# # isomax=0.18,
# # opacity=0.3, # needs to be small to see through all surfaces
# # surface_count=60, # needs to be a large number for good volume rendering
# # colorscale='RdBu')),
# # row=1, col=2)
# fig.update_layout(height=600, width=1200, title_text="Side By Side Subplots")
# fig.write_image(f"volume_figures/vf_u_" + method + ".png")
# #this does not work
# # plot_volume_over_t(vf_u1, vf_u2)
# def plot_iso_surface(field_data):
# fig = go.Figure(data=go.Isosurface(
# x=X.flatten(),
# y=Y.flatten(),
# z=Z.flatten(),
# value=field_data.flatten(),
# isomin=-0.17,
# isomax=0.17,
# colorscale='RdBu',
# caps=dict(x_show=False, y_show=False)
# ))
# fig.show()
# # plot_iso_surface(vf_u)
# # fig = go.Figure(data=go.Volume(
# # x=X.flatten(),
# # y=Y.flatten(),
# # z=Z.flatten(),
# # value=values.flatten(),
# # isomin=-0.1,
# # isomax=0.9,
# # opacity=1.0, # needs to be small to see through all surfaces
# # surface_count=30, # needs to be a large number for good volume rendering
# # colorscale='RdBu'
# # ))
# # fig.update_layout(scene_xaxis_showticklabels=False,
# # scene_yaxis_showticklabels=False,
# # scene_zaxis_showticklabels=False)
# # fig.show()
| 31,012 | 30.840862 | 117 | py |
imsat | imsat-master/calculate_distance.py | import argparse
import sys
import cPickle as pickle
import datetime, math, sys, time
from sklearn.datasets import fetch_mldata
import numpy as np
import cupy as cp
import chainer
import chainer.functions as F
import chainer.links as L
from chainer import FunctionSet, Variable, optimizers, cuda, serializers
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, help = 'which gpu device to use', default = 1)
parser.add_argument('--dataset', type=str, default = 'mnist')
args = parser.parse_args()
chainer.cuda.get_device(args.gpu).use()
if args.dataset == 'mnist':
sys.path.append('mnist')
from load_mnist import *
whole = load_mnist_whole(PATH = 'mnist/', scale=1.0/128.0, shift=-1.0)
else:
print 'The dataset is not supported.'
exit(-1)
data = cuda.to_gpu(whole.data)
num_data = [10]
print num_data
dist_accum = 0
dist_list = [[] for i in range(len(num_data))]
for i in range(len(data)):
if i % 1000 == 0:
print i
dist = cp.sqrt(cp.sum((data - data[i])**2, axis = 1))
dist[i] = 1000
sorted_dist = np.sort(cuda.to_cpu(dist))
for j in range(len(num_data)):
dist_list[j].append(sorted_dist[num_data[j]])
for i in range(len(num_data)):
np.savetxt(args.dataset + '/' + str(num_data[i]) + 'th_neighbor.txt', np.array(dist_list[i]))
| 1,293 | 23.415094 | 94 | py |
imsat | imsat-master/imsat_hash.py | import argparse, sys
import numpy as np
import chainer
import chainer.functions as F
from chainer import FunctionSet, Variable, optimizers, cuda, serializers
from sklearn import metrics
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, help='which gpu device to use', default=0)
parser.add_argument('--lam', type=float, help='trade-off parameter for mutual information and smooth regularization',
default=0.1)
parser.add_argument('--prop_eps', type=float, help='epsilon', default=0.25)
parser.add_argument('--n_bit', type=int, help='number of bits', default=16)
parser.add_argument('--hidden_list', type=str, help='hidden size list', default='400-400')
parser.add_argument('--seed', type=int, help='seed for random variable', default=0)
parser.add_argument('--dataset', type=str, default='mnist')
args = parser.parse_args()
lam = args.lam
n_bit = args.n_bit
N_query = 1000
args = parser.parse_args()
if args.dataset == 'mnist':
sys.path.append('mnist')
from load_mnist import *
whole = load_mnist_whole(PATH='mnist/', scale=1.0 / 128.0, shift=-1.0)
else:
print 'The dataset is not supported.'
raise NotImplementedError
n_class = np.max(whole.label) + 1
print n_class
dim = whole.data.shape[1]
data = whole.data
target = whole.label
np.random.seed(args.seed)
perm = np.random.permutation(len(target))
cnt_query = [0] * 10
ind_query = []
ind_gallary = []
for i in range(len(target)):
l = target[perm[i]]
if cnt_query[l] < 100:
ind_query.append(perm[i])
cnt_query[l] += 1
else:
ind_gallary.append(perm[i])
x_query = data[ind_query]
x_gallary = data[ind_gallary]
y_query = target[ind_query]
y_gallary = target[ind_gallary]
print x_query.shape
print x_gallary.shape
query = Data(x_query, y_query)
gallary = Data(x_gallary, y_gallary)
print 'use gpu'
chainer.cuda.get_device(args.gpu).use()
print 'query data: ' + str(N_query)
xp = cuda.cupy
hidden_list = map(int, args.hidden_list.split('-'))
def call_bn(bn, x, test=False, update_batch_stats=True):
if not update_batch_stats:
return F.batch_normalization(x, bn.gamma, bn.beta, use_cudnn=False)
if test:
return F.fixed_batch_normalization(x, bn.gamma, bn.beta, bn.avg_mean, bn.avg_var, use_cudnn=False)
else:
return bn(x)
def distance(y0, y1):
p0 = F.sigmoid(y0)
p1 = F.sigmoid(y1)
return F.sum(p0 * F.log((p0 + 1e-8) / (p1 + 1e-8)) + (1 - p0) * F.log((1 - p0 + 1e-8) / (1 - p1 + 1e-8))) / \
p0.data.shape[0]
def vat(forward, distance, x, eps_list, xi=10, Ip=1):
y = forward(Variable(x))
y.unchain_backward()
# calc adversarial direction
d = xp.random.normal(size=x.shape, dtype=np.float32)
d = d / xp.sqrt(xp.sum(d ** 2, axis=1)).reshape((x.shape[0], 1))
for ip in range(Ip):
d_var = Variable(d.astype(np.float32))
y2 = forward(x + xi * d_var)
kl_loss = distance(y, y2)
kl_loss.backward()
d = d_var.grad
d = d / xp.sqrt(xp.sum(d ** 2, axis=1)).reshape((x.shape[0], 1))
d_var = Variable(d.astype(np.float32))
eps = args.prop_eps * eps_list
y2 = forward(x + F.transpose(eps * F.transpose(d_var)))
return distance(y, y2)
class Encoder(chainer.Chain):
def __init__(self):
super(Encoder, self).__init__(
l1=F.Linear(dim, hidden_list[0], wscale=0.1),
l2=F.Linear(hidden_list[0], hidden_list[1], wscale=0.1),
l3=F.Linear(hidden_list[1], n_bit, wscale=0.0001),
bn1=F.BatchNormalization(hidden_list[0]),
bn2=F.BatchNormalization(hidden_list[1])
)
def __call__(self, x, test=False, update_batch_stats=True):
h = F.relu(call_bn(self.bn1, self.l1(x), test=test, update_batch_stats=update_batch_stats))
h = F.relu(call_bn(self.bn2, self.l2(h), test=test, update_batch_stats=update_batch_stats))
y = self.l3(h)
return y
def enc_aux_noubs(x):
return enc(x, test=False, update_batch_stats=False)
def enc_test(x):
return enc(x, test=True)
def loss_unlabeled(x, eps_list):
L = vat(enc_aux_noubs, distance, x.data, eps_list)
return L
def loss_test(x_query, t_query, x_gallary, t_gallary):
query_hash = F.sigmoid(enc(x_query, test=True)).data > 0.5
gallary_hash = F.sigmoid(enc(x_gallary, test=True)).data > 0.5
t_query = cuda.to_cpu(t_query.data)
t_gallary = cuda.to_cpu(t_gallary.data)
withinN_precision_label = 0
withinR_precision_label = 0
mAP = 0
for i in range(N_query):
hamming_distance = cuda.to_cpu(xp.sum((1 - query_hash[i]) == gallary_hash, axis=1))
mAP += metrics.average_precision_score(t_gallary == t_query[i], 1. / (1 + hamming_distance))
nearestN_index = np.argsort(hamming_distance)[:500]
withinN_precision_label += float(np.sum(t_gallary[nearestN_index] == t_query[i])) / 500
withinR_label = t_gallary[hamming_distance < 3]
num_withinR = len(withinR_label)
if not num_withinR == 0:
withinR_precision_label += np.sum(withinR_label == t_query[i]) / float(num_withinR)
return mAP / N_query, withinN_precision_label / N_query, withinR_precision_label / N_query
def loss_information(enc, x):
p_logit = enc(x)
p = F.sigmoid(p_logit)
p_ave = F.sum(p, axis=0) / x.data.shape[0]
cond_ent = F.sum(- p * F.log(p + 1e-8) - (1 - p) * F.log(1 - p + 1e-8)) / p.data.shape[0]
marg_ent = F.sum(- p_ave * F.log(p_ave + 1e-8) - (1 - p_ave) * F.log(1 - p_ave + 1e-8))
p_ave = F.reshape(p_ave, (1, len(p_ave.data)))
p_ave_separated = F.separate(p_ave, axis=1)
p_separated = F.separate(F.expand_dims(p, axis=2), axis=1)
p_ave_list_i = []
p_ave_list_j = []
p_list_i = []
p_list_j = []
for i in range(n_bit - 1):
p_ave_list_i.extend(list(p_ave_separated[i + 1:]))
p_list_i.extend(list(p_separated[i + 1:]))
p_ave_list_j.extend([p_ave_separated[i] for n in range(n_bit - i - 1)])
p_list_j.extend([p_separated[i] for n in range(n_bit - i - 1)])
p_ave_pair_i = F.expand_dims(F.concat(tuple(p_ave_list_i), axis=0), axis=1)
p_ave_pair_j = F.expand_dims(F.concat(tuple(p_ave_list_j), axis=0), axis=1)
p_pair_i = F.expand_dims(F.concat(tuple(p_list_i), axis=1), axis=2)
p_pair_j = F.expand_dims(F.concat(tuple(p_list_j), axis=1), axis=2)
p_pair_stacked_i = F.concat((p_pair_i, 1 - p_pair_i, p_pair_i, 1 - p_pair_i), axis=2)
p_pair_stacked_j = F.concat((p_pair_j, p_pair_j, 1 - p_pair_j, 1 - p_pair_j), axis=2)
p_ave_pair_stacked_i = F.concat((p_ave_pair_i, 1 - p_ave_pair_i, p_ave_pair_i, 1 - p_ave_pair_i), axis=1)
p_ave_pair_stacked_j = F.concat((p_ave_pair_j, p_ave_pair_j, 1 - p_ave_pair_j, 1 - p_ave_pair_j), axis=1)
p_product = F.sum(p_pair_stacked_i * p_pair_stacked_j, axis=0) / len(p.data)
p_ave_product = p_ave_pair_stacked_i * p_ave_pair_stacked_j
pairwise_mi = 2 * F.sum(p_product * F.log((p_product + 1e-8) / (p_ave_product + 1e-8)))
return cond_ent, marg_ent, pairwise_mi
enc = Encoder()
enc.to_gpu()
o_enc = optimizers.Adam(alpha=0.002, beta1=0.9)
o_enc.setup(enc)
batchsize = 250
N_gallary = len(gallary.data)
nearest_dist = np.loadtxt(args.dataset + '/10th_neighbor.txt').astype(np.float32)
x_query, t_query = cuda.to_gpu(query.data), cuda.to_gpu(query.label)
x_gallary, t_gallary = cuda.to_gpu(gallary.data), cuda.to_gpu(gallary.label)
n_epoch = 50
for epoch in range(n_epoch):
print epoch
sum_cond_ent = 0
sum_marg_ent = 0
sum_pairwise_mi = 0
sum_vat = 0
for it in range(N_gallary / batchsize):
x, _, ind = whole.get(batchsize, need_index=True)
cond_ent, marg_ent, pairwise_mi = loss_information(enc, Variable(x))
sum_cond_ent += cond_ent.data
sum_marg_ent += marg_ent.data
sum_pairwise_mi += pairwise_mi.data
loss_info = cond_ent - marg_ent + pairwise_mi
loss_ul = loss_unlabeled(Variable(x), cuda.to_gpu(nearest_dist[ind]))
sum_vat += loss_ul.data
o_enc.zero_grads()
(loss_ul + lam * loss_info).backward()
o_enc.update()
loss_ul.unchain_backward()
loss_info.unchain_backward()
condent = sum_cond_ent / (N_gallary / batchsize)
margent = sum_marg_ent / (N_gallary / batchsize)
pairwise = sum_pairwise_mi / (N_gallary / batchsize)
print 'conditional entropy: ' + str(condent)
print 'marginal entropy: ' + str(margent)
print 'pairwise mi: ' + str(pairwise)
print 'vat loss: ' + str(sum_vat / (N_gallary / batchsize))
sys.stdout.flush()
mAP, withNpreclabel, withRpreclabel = loss_test(Variable(x_query, volatile=True), Variable(t_query, volatile=True),
Variable(x_gallary, volatile=True), Variable(t_gallary, volatile=True))
print 'mAP: ', mAP
print 'withNpreclabel: ', withNpreclabel
print 'withRpreclabel: ', withRpreclabel
| 8,948 | 32.267658 | 119 | py |
imsat | imsat-master/imsat_cluster.py | import argparse, sys
import numpy as np
import chainer
import chainer.functions as F
from chainer import FunctionSet, Variable, optimizers, cuda, serializers
from munkres import Munkres, print_matrix
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, help='which gpu device to use', default=1)
parser.add_argument('--lam', type=float, help='trade-off parameter for mutual information and smooth regularization',
default=0.1)
parser.add_argument('--mu', type=float, help='trade-off parameter for entropy minimization and entropy maximization',
default=4)
parser.add_argument('--prop_eps', type=float, help='epsilon', default=0.25)
parser.add_argument('--dataset', type=str, help='which dataset to use', default='mnist')
parser.add_argument('--hidden_list', type=str, help='hidden size list', default='1200-1200')
args = parser.parse_args()
if args.dataset == 'mnist':
sys.path.append('mnist')
from load_mnist import *
whole = load_mnist_whole(PATH='mnist/', scale=1.0 / 128.0, shift=-1.0)
else:
print 'The dataset is not supported.'
raise NotImplementedError
n_data = len(whole.data)
n_class = np.max(whole.label) + 1
print n_class
dim = whole.data.shape[1]
print 'use gpu'
chainer.cuda.get_device(args.gpu).use()
xp = cuda.cupy
hidden_list = map(int, args.hidden_list.split('-'))
def call_bn(bn, x, test=False, update_batch_stats=True):
if not update_batch_stats:
return F.batch_normalization(x, bn.gamma, bn.beta, use_cudnn=False)
if test:
return F.fixed_batch_normalization(x, bn.gamma, bn.beta, bn.avg_mean, bn.avg_var, use_cudnn=False)
else:
return bn(x)
def kl(p, q):
return F.sum(p * F.log((p + 1e-8) / (q + 1e-8))) / float(len(p.data))
def distance(y0, y1):
return kl(F.softmax(y0), F.softmax(y1))
def entropy(p):
if p.data.ndim == 2:
return - F.sum(p * F.log(p + 1e-8)) / float(len(p.data))
elif p.data.ndim == 1:
return - F.sum(p * F.log(p + 1e-8))
else:
raise NotImplementedError
def vat(forward, distance, x, eps_list, xi=10, Ip=1):
y = forward(Variable(x))
y.unchain_backward()
d = xp.random.normal(size=x.shape, dtype=np.float32)
d = d / xp.sqrt(xp.sum(d ** 2, axis=1)).reshape((x.shape[0], 1))
for ip in range(Ip):
d_var = Variable(d.astype(np.float32))
y2 = forward(x + xi * d_var)
kl_loss = distance(y, y2)
kl_loss.backward()
d = d_var.grad
d = d / xp.sqrt(xp.sum(d ** 2, axis=1)).reshape((x.shape[0], 1))
d_var = Variable(d.astype(np.float32))
eps = args.prop_eps * eps_list
y2 = forward(x + F.transpose(eps * F.transpose(d_var)))
return distance(y, y2)
class Encoder(chainer.Chain):
def __init__(self):
super(Encoder, self).__init__(
l1=F.Linear(dim, hidden_list[0], wscale=0.1),
l2=F.Linear(hidden_list[0], hidden_list[1], wscale=0.1),
l3=F.Linear(hidden_list[1], n_class, wscale=0.0001),
bn1=F.BatchNormalization(hidden_list[0]),
bn2=F.BatchNormalization(hidden_list[1])
)
def __call__(self, x, test=False, update_batch_stats=True):
h = F.relu(call_bn(self.bn1, self.l1(x), test=test, update_batch_stats=update_batch_stats))
h = F.relu(call_bn(self.bn2, self.l2(h), test=test, update_batch_stats=update_batch_stats))
y = self.l3(h)
return y
def enc_aux_noubs(x):
return enc(x, test=False, update_batch_stats=False)
def loss_unlabeled(x, eps_list):
L = vat(enc_aux_noubs, distance, x.data, eps_list)
return L
def loss_test(x, t):
prob = F.softmax(enc(x, test=True)).data
pmarg = cuda.to_cpu(xp.sum(prob, axis=0) / len(prob))
ent = np.sum(-pmarg * np.log(pmarg + 1e-8))
pred = cuda.to_cpu(np.argmax(prob, axis=1))
tt = cuda.to_cpu(t.data)
m = Munkres()
mat = np.zeros((n_class, n_class))
for i in range(n_class):
for j in range(n_class):
mat[i][j] = np.sum(np.logical_and(pred == i, tt == j))
indexes = m.compute(-mat)
corresp = []
for i in range(n_class):
corresp.append(indexes[i][1])
pred_corresp = [corresp[int(predicted)] for predicted in pred]
acc = np.sum(pred_corresp == tt) / float(len(tt))
return acc, ent
def loss_equal(enc, x):
p_logit = enc(x)
p = F.softmax(p_logit)
p_ave = F.sum(p, axis=0) / x.data.shape[0]
ent = entropy(p)
return ent, -F.sum(p_ave * F.log(p_ave + 1e-8))
enc = Encoder()
enc.to_gpu()
o_enc = optimizers.Adam(alpha=0.002, beta1=0.9)
o_enc.setup(enc)
batchsize_ul = 250
n_epoch = 50
nearest_dist = np.loadtxt(args.dataset + '/10th_neighbor.txt').astype(np.float32)
for epoch in range(n_epoch):
print epoch
sum_loss_entmax = 0
sum_loss_entmin = 0
vatt = 0
for it in range(n_data / batchsize_ul):
x_u, _, ind = whole.get(batchsize_ul, need_index=True)
loss_eq1, loss_eq2 = loss_equal(enc, Variable(x_u))
loss_eq = loss_eq1 - args.mu * loss_eq2
sum_loss_entmin += loss_eq1.data
sum_loss_entmax += loss_eq2.data
loss_ul = loss_unlabeled(Variable(x_u), cuda.to_gpu(nearest_dist[ind]))
o_enc.zero_grads()
(loss_ul + args.lam * loss_eq).backward()
o_enc.update()
vatt += loss_ul.data
loss_ul.unchain_backward()
print 'entmax ', sum_loss_entmax / (n_data / batchsize_ul)
print 'entmin ', sum_loss_entmin / (n_data / batchsize_ul)
print 'vatt ', vatt / (n_data / batchsize_ul)
x_ul, t_ul = cuda.to_gpu(whole.data), cuda.to_gpu(whole.label)
acc, ment = loss_test(Variable(x_ul, volatile=True), Variable(t_ul, volatile=True))
print "ment: ", ment
print "accuracy: ", acc
sys.stdout.flush()
| 5,794 | 29.824468 | 117 | py |
imsat | imsat-master/mnist/load_mnist.py | import sys
import cPickle as pickle
import datetime, math, sys, time
from sklearn.datasets import fetch_mldata
import numpy as np
from chainer import cuda
class Data:
def __init__(self, data, label):
self.data = data
self.label = label
self.index = np.arange(len(data))
def get_index_data(self, index_list):
return cuda.to_gpu(self.data[index_list])
def get(self, n, need_index = False):
ind = np.random.permutation(self.data.shape[0])
if need_index:
return cuda.to_gpu(self.data[ind[:n],:].astype(np.float32)), cuda.to_gpu(self.label[ind[:n]].astype(np.int32)), self.index[ind[:n]].astype(np.int32)
else:
return cuda.to_gpu(self.data[ind[:n],:].astype(np.float32)), cuda.to_gpu(self.label[ind[:n]].astype(np.int32))
def load_mnist_whole(scale, shift, PATH = '.'):
print 'fetch MNIST dataset'
mnist = fetch_mldata('MNIST original', data_home=PATH)
mnist.data = mnist.data.astype(np.float32)*scale + shift
mnist.target = mnist.target.astype(np.int32)
whole = Data(mnist.data, mnist.target)
print "load mnist done", whole.data.shape
return whole
| 1,173 | 30.72973 | 160 | py |
TCDF | TCDF-master/runTCDF.py | import TCDF
import argparse
import torch
import pandas as pd
import numpy as np
import networkx as nx
import pylab
import copy
import matplotlib.pyplot as plt
import os
import sys
# os.chdir(os.path.dirname(sys.argv[0])) #uncomment this line to run in VSCode
def check_positive(value):
"""Checks if argument is positive integer (larger than zero)."""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
def check_zero_or_positive(value):
"""Checks if argument is positive integer (larger than or equal to zero)."""
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
class StoreDictKeyPair(argparse.Action):
"""Creates dictionary containing datasets as keys and ground truth files as values."""
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def getextendeddelays(gtfile, columns):
"""Collects the total delay of indirect causal relationships."""
gtdata = pd.read_csv(gtfile, header=None)
readgt=dict()
effects = gtdata[1]
causes = gtdata[0]
delays = gtdata[2]
gtnrrelations = 0
pairdelays = dict()
for k in range(len(columns)):
readgt[k]=[]
for i in range(len(effects)):
key=effects[i]
value=causes[i]
readgt[key].append(value)
pairdelays[(key, value)]=delays[i]
gtnrrelations+=1
g = nx.DiGraph()
g.add_nodes_from(readgt.keys())
for e in readgt:
cs = readgt[e]
for c in cs:
g.add_edge(c, e)
extendedreadgt = copy.deepcopy(readgt)
for c1 in range(len(columns)):
for c2 in range(len(columns)):
paths = list(nx.all_simple_paths(g, c1, c2, cutoff=2)) #indirect path max length 3, no cycles
if len(paths)>0:
for path in paths:
for p in path[:-1]:
if p not in extendedreadgt[path[-1]]:
extendedreadgt[path[-1]].append(p)
extendedgtdelays = dict()
for effect in extendedreadgt:
causes = extendedreadgt[effect]
for cause in causes:
if (effect, cause) in pairdelays:
delay = pairdelays[(effect, cause)]
extendedgtdelays[(effect, cause)]=[delay]
else:
#find extended delay
paths = list(nx.all_simple_paths(g, cause, effect, cutoff=2)) #indirect path max length 3, no cycles
extendedgtdelays[(effect, cause)]=[]
for p in paths:
delay=0
for i in range(len(p)-1):
delay+=pairdelays[(p[i+1], p[i])]
extendedgtdelays[(effect, cause)].append(delay)
return extendedgtdelays, readgt, extendedreadgt
def evaluate(gtfile, validatedcauses, columns):
"""Evaluates the results of TCDF by comparing it to the ground truth graph, and calculating precision, recall and F1-score. F1'-score, precision' and recall' include indirect causal relationships."""
extendedgtdelays, readgt, extendedreadgt = getextendeddelays(gtfile, columns)
FP=0
FPdirect=0
TPdirect=0
TP=0
FN=0
FPs = []
FPsdirect = []
TPsdirect = []
TPs = []
FNs = []
for key in readgt:
for v in validatedcauses[key]:
if v not in extendedreadgt[key]:
FP+=1
FPs.append((key,v))
else:
TP+=1
TPs.append((key,v))
if v not in readgt[key]:
FPdirect+=1
FPsdirect.append((key,v))
else:
TPdirect+=1
TPsdirect.append((key,v))
for v in readgt[key]:
if v not in validatedcauses[key]:
FN+=1
FNs.append((key, v))
print("Total False Positives': ", FP)
print("Total True Positives': ", TP)
print("Total False Negatives: ", FN)
print("Total Direct False Positives: ", FPdirect)
print("Total Direct True Positives: ", TPdirect)
print("TPs': ", TPs)
print("FPs': ", FPs)
print("TPs direct: ", TPsdirect)
print("FPs direct: ", FPsdirect)
print("FNs: ", FNs)
precision = recall = 0.
if float(TP+FP)>0:
precision = TP / float(TP+FP)
print("Precision': ", precision)
if float(TP + FN)>0:
recall = TP / float(TP + FN)
print("Recall': ", recall)
if (precision + recall) > 0:
F1 = 2 * (precision * recall) / (precision + recall)
else:
F1 = 0.
print("F1' score: ", F1,"(includes direct and indirect causal relationships)")
precision = recall = 0.
if float(TPdirect+FPdirect)>0:
precision = TPdirect / float(TPdirect+FPdirect)
print("Precision: ", precision)
if float(TPdirect + FN)>0:
recall = TPdirect / float(TPdirect + FN)
print("Recall: ", recall)
if (precision + recall) > 0:
F1direct = 2 * (precision * recall) / (precision + recall)
else:
F1direct = 0.
print("F1 score: ", F1direct,"(includes only direct causal relationships)")
return FP, TP, FPdirect, TPdirect, FN, FPs, FPsdirect, TPs, TPsdirect, FNs, F1, F1direct
def evaluatedelay(extendedgtdelays, alldelays, TPs, receptivefield):
"""Evaluates the delay discovery of TCDF by comparing the discovered time delays with the ground truth."""
zeros = 0
total = 0.
for i in range(len(TPs)):
tp=TPs[i]
discovereddelay = alldelays[tp]
gtdelays = extendedgtdelays[tp]
for d in gtdelays:
if d <= receptivefield:
total+=1.
error = d - discovereddelay
if error == 0:
zeros+=1
else:
next
if zeros==0:
return 0.
else:
return zeros/float(total)
def runTCDF(datafile):
"""Loops through all variables in a dataset and return the discovered causes, time delays, losses, attention scores and variable names."""
df_data = pd.read_csv(datafile)
allcauses = dict()
alldelays = dict()
allreallosses=dict()
allscores=dict()
columns = list(df_data)
for c in columns:
idx = df_data.columns.get_loc(c)
causes, causeswithdelay, realloss, scores = TCDF.findcauses(c, cuda=cuda, epochs=nrepochs,
kernel_size=kernel_size, layers=levels, log_interval=loginterval,
lr=learningrate, optimizername=optimizername,
seed=seed, dilation_c=dilation_c, significance=significance, file=datafile)
allscores[idx]=scores
allcauses[idx]=causes
alldelays.update(causeswithdelay)
allreallosses[idx]=realloss
return allcauses, alldelays, allreallosses, allscores, columns
def plotgraph(stringdatafile,alldelays,columns):
"""Plots a temporal causal graph showing all discovered causal relationships annotated with the time delay between cause and effect."""
G = nx.DiGraph()
for c in columns:
G.add_node(c)
for pair in alldelays:
p1,p2 = pair
nodepair = (columns[p2], columns[p1])
G.add_edges_from([nodepair],weight=alldelays[pair])
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
pos=nx.circular_layout(G)
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
nx.draw(G,pos, node_color = 'white', edge_color='black',node_size=1000,with_labels = True)
ax = plt.gca()
ax.collections[0].set_edgecolor("#000000")
pylab.show()
def main(datafiles, evaluation):
if evaluation:
totalF1direct = [] #contains F1-scores of all datasets
totalF1 = [] #contains F1'-scores of all datasets
receptivefield=1
for l in range(0, levels):
receptivefield+=(kernel_size-1) * dilation_c**(l)
for datafile in datafiles.keys():
stringdatafile = str(datafile)
if '/' in stringdatafile:
stringdatafile = str(datafile).rsplit('/', 1)[1]
print("\n Dataset: ", stringdatafile)
# run TCDF
allcauses, alldelays, allreallosses, allscores, columns = runTCDF(datafile) #results of TCDF containing indices of causes and effects
print("\n===================Results for", stringdatafile,"==================================")
for pair in alldelays:
print(columns[pair[1]], "causes", columns[pair[0]],"with a delay of",alldelays[pair],"time steps.")
if evaluation:
# evaluate TCDF by comparing discovered causes with ground truth
print("\n===================Evaluation for", stringdatafile,"===============================")
FP, TP, FPdirect, TPdirect, FN, FPs, FPsdirect, TPs, TPsdirect, FNs, F1, F1direct = evaluate(datafiles[datafile], allcauses, columns)
totalF1.append(F1)
totalF1direct.append(F1direct)
# evaluate delay discovery
extendeddelays, readgt, extendedreadgt = getextendeddelays(datafiles[datafile], columns)
percentagecorrect = evaluatedelay(extendeddelays, alldelays, TPs, receptivefield)*100
print("Percentage of delays that are correctly discovered: ", percentagecorrect,"%")
print("==================================================================================")
if args.plot:
plotgraph(stringdatafile, alldelays, columns)
# In case of multiple datasets, calculate average F1-score over all datasets and standard deviation
if len(datafiles.keys())>1 and evaluation:
print("\nOverall Evaluation: \n")
print("F1' scores: ")
for f in totalF1:
print(f)
print("Average F1': ", np.mean(totalF1))
print("Standard Deviation F1': ", np.std(totalF1),"\n")
print("F1 scores: ")
for f in totalF1direct:
print(f)
print("Average F1: ", np.mean(totalF1direct))
print("Standard Deviation F1: ", np.std(totalF1direct))
parser = argparse.ArgumentParser(description='TCDF: Temporal Causal Discovery Framework')
parser.add_argument('--cuda', action="store_true", default=False, help='Use CUDA (GPU) (default: False)')
parser.add_argument('--epochs', type=check_positive, default=1000, help='Number of epochs (default: 1000)')
parser.add_argument('--kernel_size', type=check_positive, default=4, help='Size of kernel, i.e. window size. Maximum delay to be found is kernel size - 1. Recommended to be equal to dilation coeffient (default: 4)')
parser.add_argument('--hidden_layers', type=check_zero_or_positive, default=0, help='Number of hidden layers in the depthwise convolution (default: 0)')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate (default: 0.01)')
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'RMSprop'], help='Optimizer to use (default: Adam)')
parser.add_argument('--log_interval', type=check_positive, default=500, help='Epoch interval to report loss (default: 500)')
parser.add_argument('--seed', type=check_positive, default=1111, help='Random seed (default: 1111)')
parser.add_argument('--dilation_coefficient', type=check_positive, default=4, help='Dilation coefficient, recommended to be equal to kernel size (default: 4)')
parser.add_argument('--significance', type=float, default=0.8, help="Significance number stating when an increase in loss is significant enough to label a potential cause as true (validated) cause. See paper for more details (default: 0.8)")
parser.add_argument('--plot', action="store_true", default=False, help='Show causal graph (default: False)')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--ground_truth',action=StoreDictKeyPair, help='Provide dataset(s) and the ground truth(s) to evaluate the results of TCDF. Argument format: DataFile1=GroundtruthFile1,Key2=Value2,... with a key for each dataset containing multivariate time series (required file format: csv, a column with header for each time series) and a value for the corresponding ground truth (required file format: csv, no header, index of cause in first column, index of effect in second column, time delay between cause and effect in third column)')
group.add_argument('--data', nargs='+', help='(Path to) one or more datasets to analyse by TCDF containing multiple time series. Required file format: csv with a column (incl. header) for each time series')
args = parser.parse_args()
print("Arguments:", args)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, you should probably run with --cuda to speed up training.")
if args.kernel_size != args.dilation_coefficient:
print("WARNING: The dilation coefficient is not equal to the kernel size. Multiple paths can lead to the same delays. Set kernel_size equal to dilation_c to have exaxtly one path for each delay.")
kernel_size = args.kernel_size
levels = args.hidden_layers+1
nrepochs = args.epochs
learningrate = args.learning_rate
optimizername = args.optimizer
dilation_c = args.dilation_coefficient
loginterval = args.log_interval
seed=args.seed
cuda=args.cuda
significance=args.significance
if args.ground_truth is not None:
datafiles = args.ground_truth
main(datafiles, evaluation=True)
else:
datafiles = dict()
for dataset in args.data:
datafiles[dataset]=""
main(datafiles, evaluation=False)
| 13,848 | 39.612903 | 544 | py |
TCDF | TCDF-master/TCDF.py | import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from model import ADDSTCN
import random
import pandas as pd
import numpy as np
import heapq
import copy
import os
import sys
def preparedata(file, target):
"""Reads data from csv file and transforms it to two PyTorch tensors: dataset x and target time series y that has to be predicted."""
df_data = pd.read_csv(file)
df_y = df_data.copy(deep=True)[[target]]
df_x = df_data.copy(deep=True)
df_yshift = df_y.copy(deep=True).shift(periods=1, axis=0)
df_yshift[target]=df_yshift[target].fillna(0.)
df_x[target] = df_yshift
data_x = df_x.values.astype('float32').transpose()
data_y = df_y.values.astype('float32').transpose()
data_x = torch.from_numpy(data_x)
data_y = torch.from_numpy(data_y)
x, y = Variable(data_x), Variable(data_y)
return x, y
def train(epoch, traindata, traintarget, modelname, optimizer,log_interval,epochs):
"""Trains model by performing one epoch and returns attention scores and loss."""
modelname.train()
x, y = traindata[0:1], traintarget[0:1]
optimizer.zero_grad()
epochpercentage = (epoch/float(epochs))*100
output = modelname(x)
attentionscores = modelname.fs_attention
loss = F.mse_loss(output, y)
loss.backward()
optimizer.step()
if epoch % log_interval ==0 or epoch % epochs == 0 or epoch==1:
print('Epoch: {:2d} [{:.0f}%] \tLoss: {:.6f}'.format(epoch, epochpercentage, loss))
return attentionscores.data, loss
def findcauses(target, cuda, epochs, kernel_size, layers,
log_interval, lr, optimizername, seed, dilation_c, significance, file):
"""Discovers potential causes of one target time series, validates these potential causes with PIVM and discovers the corresponding time delays"""
print("\n", "Analysis started for target: ", target)
torch.manual_seed(seed)
X_train, Y_train = preparedata(file, target)
X_train = X_train.unsqueeze(0).contiguous()
Y_train = Y_train.unsqueeze(2).contiguous()
input_channels = X_train.size()[1]
targetidx = pd.read_csv(file).columns.get_loc(target)
model = ADDSTCN(targetidx, input_channels, layers, kernel_size=kernel_size, cuda=cuda, dilation_c=dilation_c)
if cuda:
model.cuda()
X_train = X_train.cuda()
Y_train = Y_train.cuda()
optimizer = getattr(optim, optimizername)(model.parameters(), lr=lr)
scores, firstloss = train(1, X_train, Y_train, model, optimizer,log_interval,epochs)
firstloss = firstloss.cpu().data.item()
for ep in range(2, epochs+1):
scores, realloss = train(ep, X_train, Y_train, model, optimizer,log_interval,epochs)
realloss = realloss.cpu().data.item()
s = sorted(scores.view(-1).cpu().detach().numpy(), reverse=True)
indices = np.argsort(-1 *scores.view(-1).cpu().detach().numpy())
#attention interpretation to find tau: the threshold that distinguishes potential causes from non-causal time series
if len(s)<=5:
potentials = []
for i in indices:
if scores[i]>1.:
potentials.append(i)
else:
potentials = []
gaps = []
for i in range(len(s)-1):
if s[i]<1.: #tau should be greater or equal to 1, so only consider scores >= 1
break
gap = s[i]-s[i+1]
gaps.append(gap)
sortgaps = sorted(gaps, reverse=True)
for i in range(0, len(gaps)):
largestgap = sortgaps[i]
index = gaps.index(largestgap)
ind = -1
if index<((len(s)-1)/2): #gap should be in first half
if index>0:
ind=index #gap should have index > 0, except if second score <1
break
if ind<0:
ind = 0
potentials = indices[:ind+1].tolist()
print("Potential causes: ", potentials)
validated = copy.deepcopy(potentials)
#Apply PIVM (permutes the values) to check if potential cause is true cause
for idx in potentials:
random.seed(seed)
X_test2 = X_train.clone().cpu().numpy()
random.shuffle(X_test2[:,idx,:][0])
shuffled = torch.from_numpy(X_test2)
if cuda:
shuffled=shuffled.cuda()
model.eval()
output = model(shuffled)
testloss = F.mse_loss(output, Y_train)
testloss = testloss.cpu().data.item()
diff = firstloss-realloss
testdiff = firstloss-testloss
if testdiff>(diff*significance):
validated.remove(idx)
weights = []
#Discover time delay between cause and effect by interpreting kernel weights
for layer in range(layers):
weight = model.dwn.network[layer].net[0].weight.abs().view(model.dwn.network[layer].net[0].weight.size()[0], model.dwn.network[layer].net[0].weight.size()[2])
weights.append(weight)
causeswithdelay = dict()
for v in validated:
totaldelay=0
for k in range(len(weights)):
w=weights[k]
row = w[v]
twolargest = heapq.nlargest(2, row)
m = twolargest[0]
m2 = twolargest[1]
if m > m2:
index_max = len(row) - 1 - max(range(len(row)), key=row.__getitem__)
else:
#take first filter
index_max=0
delay = index_max *(dilation_c**k)
totaldelay+=delay
if targetidx != v:
causeswithdelay[(targetidx, v)]=totaldelay
else:
causeswithdelay[(targetidx, v)]=totaldelay+1
print("Validated causes: ", validated)
return validated, causeswithdelay, realloss, scores.view(-1).cpu().detach().numpy().tolist()
| 5,903 | 33.729412 | 166 | py |
TCDF | TCDF-master/depthwise.py | import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from torch.autograd import Variable
class Chomp1d(nn.Module):
"""PyTorch does not offer native support for causal convolutions, so it is implemented (with some inefficiency) by simply using a standard convolution with zero padding on both sides, and chopping off the end of the sequence."""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class FirstBlock(nn.Module):
def __init__(self, target, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(FirstBlock, self).__init__()
self.target = target
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.relu = nn.PReLU(n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.conv1.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
return self.relu(out)
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(TemporalBlock, self).__init__()
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.relu = nn.PReLU(n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.conv1.weight.data.normal_(0, 0.1)
def forward(self, x):
out = self.net(x)
return self.relu(out+x) #residual connection
class LastBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding):
super(LastBlock, self).__init__()
self.conv1 = nn.Conv1d(n_inputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=n_outputs)
self.chomp1 = Chomp1d(padding)
self.net = nn.Sequential(self.conv1, self.chomp1)
self.linear = nn.Linear(n_inputs, n_inputs)
self.init_weights()
def init_weights(self):
"""Initialize weights"""
self.linear.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
return self.linear(out.transpose(1,2)+x.transpose(1,2)).transpose(1,2) #residual connection
class DepthwiseNet(nn.Module):
def __init__(self, target, num_inputs, num_levels, kernel_size=2, dilation_c=2):
super(DepthwiseNet, self).__init__()
layers = []
in_channels = num_inputs
out_channels = num_inputs
for l in range(num_levels):
dilation_size = dilation_c ** l
if l==0:
layers += [FirstBlock(target, in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
elif l==num_levels-1:
layers+=[LastBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
else:
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
| 3,952 | 39.752577 | 232 | py |
TCDF | TCDF-master/model.py | import torch as th
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from depthwise import DepthwiseNet
from torch.nn.utils import weight_norm
import numpy as np
class ADDSTCN(nn.Module):
def __init__(self, target, input_size, num_levels, kernel_size, cuda, dilation_c):
super(ADDSTCN, self).__init__()
self.target=target
self.dwn = DepthwiseNet(self.target, input_size, num_levels, kernel_size=kernel_size, dilation_c=dilation_c)
self.pointwise = nn.Conv1d(input_size, 1, 1)
self._attention = th.ones(input_size,1)
self._attention = Variable(self._attention, requires_grad=False)
self.fs_attention = th.nn.Parameter(self._attention.data)
if cuda:
self.dwn = self.dwn.cuda()
self.pointwise = self.pointwise.cuda()
self._attention = self._attention.cuda()
def init_weights(self):
self.pointwise.weight.data.normal_(0, 0.1)
def forward(self, x):
y1=self.dwn(x*F.softmax(self.fs_attention, dim=0))
y1 = self.pointwise(y1)
return y1.transpose(1,2) | 1,175 | 34.636364 | 116 | py |
TCDF | TCDF-master/evaluate_predictions_TCDF.py | import TCDF
import argparse
import torch
import torch.optim as optim
from model import ADDSTCN
import pandas as pd
import numpy as np
import networkx as nx
import pylab
import copy
import matplotlib.pyplot as plt
import os
import sys
# os.chdir(os.path.dirname(sys.argv[0])) #uncomment this line to run in VSCode
def check_positive(value):
"""Checks if argument is positive integer (larger than zero)."""
ivalue = int(value)
if ivalue <= 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
def check_zero_or_positive(value):
"""Checks if argument is positive integer (larger than or equal to zero)."""
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("%s should be positive" % value)
return ivalue
class StoreDictKeyPair(argparse.Action):
"""Creates dictionary containing datasets as keys and ground truth files as values."""
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values.split(","):
k,v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def check_between_zero_and_one(value):
"""Checks if argument is float between zero and 1."""
fvalue = float(value)
if fvalue < 0.0 or fvalue > 1.0:
raise argparse.ArgumentTypeError("%s should be a float between 0 and 1" % value)
return fvalue
def evaluate_prediction(target, cuda, epochs, kernel_size, layers,
loginterval, lr, optimizername, seed, dilation_c, split, file):
"""Runs first part of TCDF to predict one time series and evaluate its accuracy (MASE)."""
print("\n", "Analysis started for target: ", target)
torch.manual_seed(seed)
X, Y = TCDF.preparedata(file, target)
X = X.unsqueeze(0).contiguous()
Y = Y.unsqueeze(2).contiguous()
timesteps = X.size()[2]
if timesteps!=Y.size()[1]:
print("WARNING: Time series do not have the same length.")
X_train = X[:,:,:int(split*timesteps)]
Y_train = Y[:,:int(split*timesteps),:]
X_test = X[:,:,int(split*timesteps):]
Y_test = Y[:,int(split*timesteps):,:]
input_channels = X_train.size()[1]
targetidx = pd.read_csv(file).columns.get_loc(target)
model = ADDSTCN(targetidx, input_channels, levels, kernel_size=kernel_size, cuda=cuda, dilation_c=dilation_c)
if cuda:
model.cuda()
X_train = X_train.cuda()
Y_train = Y_train.cuda()
X_test = X_test.cuda()
Y_test = Y_test.cuda()
optimizer = getattr(optim, optimizername)(model.parameters(), lr=lr)
for ep in range(1, epochs+1):
scores, realloss = TCDF.train(ep, X_train, Y_train, model, optimizer,loginterval,epochs)
realloss = realloss.cpu().data.item()
model.eval()
output = model(X_test)
prediction=output.cpu().detach().numpy()[0,:,0]
T = output.size()[1]
total_e = 0.
for t in range(T):
real = Y_test[:,t,:]
predicted = output[:,t,:]
e = abs(real - predicted)
total_e+=e
total_e = total_e.cpu().data.item()
total = 0.
for t in range(1,T):
temp = abs(Y_test[:,t,:] - Y_test[:,t-1,:])
total+=temp
denom = (T/float(T-1))*total
denom = denom.cpu().data.item()
if denom!=0.:
MASE = total_e/float(denom)
else:
MASE = 0.
return MASE, prediction
def plot_predictions(predictions, file):
"""Plots the predicted values of all time series in the dataset"""
for c in predictions:
p = predictions[c]
plt.plot(p,label=c)
plt.xlabel('Time')
plt.ylabel('Predicted value')
plt.title('Dataset %s'%file)
plt.legend()
plt.show()
def evaluate(datafile):
"""Collects the predictions of all time series in a dataset and returns overall results."""
stringdatafile = str(datafile)
if '/' in stringdatafile:
stringdatafile = str(datafile).rsplit('/', 1)[1]
df_data = pd.read_csv(datafile)
columns = list(df_data)
MASEs = []
predictions = dict()
for c in columns:
MASE, prediction = evaluate_prediction(c, cuda=cuda, epochs=nrepochs,
kernel_size=kernel_size, layers=levels, loginterval=loginterval,
lr=learningrate, optimizername=optimizername,
seed=seed, dilation_c=dilation_c, split=split, file=datafile)
predictions[c]= prediction
MASEs.append(MASE)
allres.append(MASE)
avg = np.mean(MASEs)
std = np.std(MASEs)
return allres, avg, std, predictions
parser = argparse.ArgumentParser(description='TCDF: Temporal Causal Discovery Framework')
parser.add_argument('--cuda', action="store_true", default=False, help='Use CUDA (GPU) (default: False)')
parser.add_argument('--epochs', type=check_positive, default=1000, help='Number of epochs (default: 1000)')
parser.add_argument('--kernel_size', type=check_positive, default=4, help='Size of sliding kernel (default: 4)')
parser.add_argument('--hidden_layers', type=check_zero_or_positive, default=0, help='Number of hidden layers in the depthwise convolution (default: 0)')
parser.add_argument('--learning_rate', type=float, default=0.01, help='Learning rate (default: 0.01)')
parser.add_argument('--optimizer', type=str, default='Adam', choices=['Adam', 'RMSprop'], help='Optimizer to use: Adam or RMSprop (default: Adam)')
parser.add_argument('--log_interval', type=check_positive, default=500, help='Epoch interval to report loss (default: 500)')
parser.add_argument('--seed', type=check_positive, default=1111, help='Random seed (default: 1111)')
parser.add_argument('--dilation_coefficient', type=check_positive, default=4, help='Dilation coefficient, recommended to be equal to kernel size (default: 4)')
parser.add_argument('--plot', action="store_true", default=False, help='Plot predicted time series (default: False)')
parser.add_argument('--train_test_split', type=check_between_zero_and_one, default=0.8, help="Portion of dataset to use for training (default 0.8)")
parser.add_argument('--data', nargs='+', required=True, help='(Path to) Dataset(s) to predict by TCDF containing multiple time series. Required file format: csv with a column (incl. header) for each time series')
args = parser.parse_args()
print("Arguments:", args)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, you should probably run with --cuda to speed up training.")
if args.kernel_size != args.dilation_coefficient:
print("WARNING: The dilation coefficient is not equal to the kernel size. Multiple paths can lead to the same delays. Set kernel_size equal to dilation_c to have exaxtly one path for each delay.")
kernel_size = args.kernel_size
levels = args.hidden_layers+1
nrepochs = args.epochs
learningrate = args.learning_rate
optimizername = args.optimizer
dilation_c = args.dilation_coefficient
loginterval = args.log_interval
seed=args.seed
cuda=args.cuda
split=args.train_test_split
plot = args.plot
datasets = args.data
evalresults = dict()
allres = []
for datafile in datasets:
allres,avg,std,predictions = evaluate(datafile)
evalresults[datafile]=(avg, std)
print("\nMean Absolute Scaled Error (MASE) averaged over all time series in", datafile,":",evalresults[datafile][0],"with standard deviation",evalresults[datafile][1])
if plot:
plot_predictions(predictions,datafile)
if len(datasets)>1:
overallavg = np.mean(allres)
overallstd = np.std(allres)
print("=========================Overall Evaluation====================================")
print("Average MASE over all datasets: ", overallavg)
print("Standard Deviation MASE over all datasets: ", overallstd)
| 7,764 | 38.820513 | 212 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/run.py | #!/usr/bin/env python3
import argparse
import random
import os
import numpy as np
import torch
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
import habitat_extensions # noqa: F401
import vlnce_baselines # noqa: F401
from vlnce_baselines.config.default import get_config
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp_name",
type=str,
default="test",
required=True,
help="experiment id that matches to exp-id in Notion log",
)
parser.add_argument(
"--run-type",
choices=["train", "eval", "inference"],
required=True,
help="run type of the experiment (train, eval, inference)",
)
parser.add_argument(
"--exp-config",
type=str,
required=True,
help="path to config yaml containing info about experiment",
)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options from command line",
)
parser.add_argument('--local_rank', type=int, default=0, help="local gpu id")
args = parser.parse_args()
run_exp(**vars(args))
def run_exp(exp_name: str, exp_config: str,
run_type: str, opts=None, local_rank=None) -> None:
r"""Runs experiment given mode and config
Args:
exp_config: path to config file.
run_type: "train" or "eval.
opts: list of strings of additional config options.
Returns:
None.
"""
config = get_config(exp_config, opts)
config.defrost()
config.TENSORBOARD_DIR += exp_name
config.CHECKPOINT_FOLDER += exp_name
if os.path.isdir(config.EVAL_CKPT_PATH_DIR):
config.EVAL_CKPT_PATH_DIR += exp_name
config.RESULTS_DIR += exp_name
config.LOG_FILE = exp_name + '_' + config.LOG_FILE
config.TASK_CONFIG.SEED = 0
config.local_rank = local_rank
config.freeze()
# logger.info(f"config: {config}") # print all configs
logger.add_filehandler('logs/running_log/'+config.LOG_FILE)
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = False
if torch.cuda.is_available():
torch.set_num_threads(1)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
trainer = trainer_init(config)
if run_type == "train":
trainer.train()
elif run_type == "eval":
trainer.eval()
elif run_type == "inference":
trainer.inference()
if __name__ == "__main__":
main()
| 2,787 | 27.742268 | 81 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/ss_trainer_CMA.py | import gc
import os
import random
import warnings
from collections import defaultdict
import lmdb
import msgpack_numpy
import numpy as np
import math
import time
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import tqdm
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job
from vlnce_baselines.common.utils import extract_instruction_tokens
from vlnce_baselines.utils import reduce_loss
from .utils import get_camera_orientations
from .models.utils import (
length2mask, dir_angle_feature,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
import torch.distributed as distr
import gzip
import json
from copy import deepcopy
@baseline_registry.register_trainer(name="schedulesampler-CMA")
class SSTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
super().__init__(config)
self.max_len = int(config.IL.max_traj_len)
def _make_dirs(self) -> None:
self._make_ckpt_dir()
# os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def save_checkpoint(self, epoch: int, step_id: int) -> None:
torch.save(
obj={
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"epoch": epoch,
"step_id": step_id,
},
f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.{epoch}.pth"),
)
def allocate_allowed_episode_by_scene(self):
''' discrete waypoints coordinates directly projected from MP3D '''
with gzip.open(
self.config.TASK_CONFIG.DATASET.DATA_PATH.format(
split=self.split)
) as f:
data = json.load(f) # dict_keys(['episodes', 'instruction_vocab'])
''' continuous waypoints coordinates by shortest paths in Habitat '''
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=self.split)
) as f:
gt_data = json.load(f)
data = data['episodes']
# long_episode_ids = [int(k) for k,v in gt_data.items() if len(v['actions']) > self.config.IL.max_traj_len]
long_episode_ids = []
average_length = (len(data) - len(long_episode_ids))//self.world_size
episodes_by_scene = {}
for ep in data:
scan = ep['scene_id'].split('/')[1]
if scan not in episodes_by_scene.keys():
episodes_by_scene[scan] = []
if ep['episode_id'] not in long_episode_ids:
episodes_by_scene[scan].append(ep['episode_id'])
else:
continue
''' split data in each environments evenly to different GPUs '''
values_to_scenes = {}
values = []
for k,v in episodes_by_scene.items():
values.append(len(v))
if len(v) not in values_to_scenes.keys():
values_to_scenes[len(v)] = []
values_to_scenes[len(v)].append(k)
groups = self.world_size
values.sort(reverse=True)
last_scene_episodes = episodes_by_scene[values_to_scenes[values[0]].pop()]
values = values[1:]
load_balance_groups = [[] for grp in range(groups)]
scenes_groups = [[] for grp in range(groups)]
for v in values:
current_total = [sum(grp) for grp in load_balance_groups]
min_index = np.argmin(current_total)
load_balance_groups[min_index].append(v)
scenes_groups[min_index] += episodes_by_scene[values_to_scenes[v].pop()]
for grp in scenes_groups:
add_number = average_length - len(grp)
grp += last_scene_episodes[:add_number]
last_scene_episodes = last_scene_episodes[add_number:]
return scenes_groups[self.local_rank]
def train_ml(self, in_train=True, train_tf=False):
self.envs.resume_all()
observations = self.envs.reset()
shift_index = 0
for i, ep in enumerate(self.envs.current_episodes()):
if ep.episode_id in self.trained_episodes:
i = i - shift_index
observations.pop(i)
self.envs.pause_at(i)
shift_index += 1
if self.envs.num_envs == 0:
break
else:
self.trained_episodes.append(ep.episode_id)
if self.envs.num_envs == 0:
return -1
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
ml_loss = 0.
total_weight = 0.
losses = []
not_done_index = list(range(self.envs.num_envs))
not_done_masks = torch.zeros(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
# encoding instructions
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
self.envs.num_envs,
self.num_recurrent_layers,
self.config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
init_num_envs = self.envs.num_envs
il_loss = 0.0
for stepk in range(self.max_len):
language_features = instruction_embedding[not_done_index]
lang_masks = all_lang_masks[not_done_index]
# agent's current position and heading
positions = []; headings = []
for ob_i in range(len(observations)):
agent_state_i = self.envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
if 'CMA' in self.config.MODEL.policy_name:
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = in_train,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = language_features,
text_mask = lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
total_weight += len(candidate_lengths)
# get resulting distance by execting candidate actions
# the last value in each list is the current distance
if train_tf:
cand_dists_to_goal = [[] for _ in range(len(batch_angles))]
oracle_cand_idx = []
oracle_stop = []
for j in range(len(batch_angles)):
for k in range(len(batch_angles[j])):
angle_k = batch_angles[j][k]
forward_k = batch_distances[j][k]
dist_k = self.envs.call_at(j,
"cand_dist_to_goal", {
"angle": angle_k, "forward": forward_k,
})
cand_dists_to_goal[j].append(dist_k)
curr_dist_to_goal = self.envs.call_at(
j, "current_dist_to_goal")
# if within target range (default 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx.append(candidate_lengths[j] - 1)
oracle_stop.append(True)
else:
oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j]))
oracle_stop.append(False)
if train_tf:
oracle_actions = torch.tensor(oracle_cand_idx, device=self.device).unsqueeze(1)
actions = logits.argmax(dim=-1, keepdim=True)
actions = torch.where(
torch.rand_like(actions, dtype=torch.float) <= self.ratio,
oracle_actions, actions)
current_loss = F.cross_entropy(logits, oracle_actions.squeeze(1), reduction="none")
ml_loss += torch.sum(current_loss)
else:
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = self.envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in
zip(*outputs)]
if sum(dones) > 0:
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = rnn_states[np.array(dones)==False]
shift_index = 0
for i in range(self.envs.num_envs):
if dones[i]:
i = i - shift_index
not_done_index.pop(i)
self.envs.pause_at(i)
if self.envs.num_envs == 0:
break
observations.pop(i)
infos.pop(i)
shift_index += 1
if self.envs.num_envs == 0:
break
not_done_masks = torch.ones(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
if train_tf:
il_loss = ml_loss / total_weight
return il_loss
def train(self) -> None:
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len
if (
self.config.IL.DAGGER.expert_policy_sensor
not in self.config.TASK_CONFIG.TASK.SENSORS
):
self.config.TASK_CONFIG.TASK.SENSORS.append(
self.config.IL.DAGGER.expert_policy_sensor
)
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.NUM_ENVIRONMENTS = self.config.IL.batch_size // len(
self.config.SIMULATOR_GPU_IDS)
self.config.use_pbar = not is_slurm_batch_job()
''' *** if choosing image '''
if self.config.MODEL.policy_name == 'PolicyViewSelectionCMA':
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
self.world_size = self.config.GPU_NUMBERS
self.local_rank = self.config.local_rank
self.batch_size = self.config.IL.batch_size
torch.cuda.set_device(self.device)
if self.world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.split = split
episode_ids = self.allocate_allowed_episode_by_scene()
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
num_epoches_per_ratio = int(np.ceil(self.config.IL.epochs/self.config.IL.decay_time))
print('\nFinished constructing environments')
dataset_length = sum(self.envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
observation_space = self.envs.observation_spaces[0]
action_space = self.envs.action_spaces[0]
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
print('\nInitializing policy network ...')
self._initialize_policy(
self.config,
self.config.IL.load_from_ckpt,
observation_space=observation_space,
action_space=action_space,
)
print('\nTraining starts ...')
with TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=0,
) as writer:
AuxLosses.activate()
batches_per_epoch = int(np.ceil(dataset_length/self.batch_size))
for epoch in range(self.start_epoch, self.config.IL.epochs):
epoch_str = f"{epoch + 1}/{self.config.IL.epochs}"
t_ = (
tqdm.trange(
batches_per_epoch, leave=False, dynamic_ncols=True
)
if self.config.use_pbar & (self.local_rank < 1)
else range(batches_per_epoch)
)
self.ratio = np.power(self.config.IL.schedule_ratio, epoch//num_epoches_per_ratio + 1)
self.trained_episodes = []
# reconstruct env for every epoch to ensure load same data
if epoch != self.start_epoch:
self.envs = None
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
for batch_idx in t_:
loss = self.train_ml(
in_train=True, train_tf=True)
if loss == -1:
break
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses = [loss]
if self.world_size > 1:
for i in range(len(losses)):
reduce_loss(losses[i], self.local_rank, self.world_size)
losses[i] = losses[i].item()
else:
for i in range(len(losses)):
losses[i] = losses[i].item()
loss = losses[0]
if self.config.use_pbar:
if self.local_rank < 1: # seems can be removed
t_.set_postfix(
{
"epoch": epoch_str,
"loss": round(loss, 4),
}
)
writer.add_scalar("loss", loss, self.step_id)
self.step_id += 1 # noqa: SIM113
if self.local_rank < 1: # and epoch % 3 == 0:
self.save_checkpoint(epoch, self.step_id)
AuxLosses.deactivate()
| 18,334 | 39.474614 | 115 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/utils.py | import torch
import torch.distributed as dist
import numpy as np
import math
import copy
class ARGS():
def __init__(self):
self.local_rank = 0
def reduce_loss(tensor, rank, world_size):
with torch.no_grad():
dist.reduce(tensor, dst=0)
if rank == 0:
tensor /= world_size
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def repeat_allocation(allocations, max_number):
if torch.is_tensor(max_number):
max_number = max_number.long().item()
else:
max_number = max_number.long()
allocation_number = len(allocations)
repeat_time, res = max_number // allocation_number, max_number % allocation_number
allocations_ = []
for i in range(repeat_time):
allocations_ += copy.deepcopy(allocations)
allocations_ += copy.deepcopy(allocations)[:res]
return allocations_
def allocate(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
index[position] = length_indexes[:position_length]
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def allocate_instructions(instruction_lengths, allocations,ep_length, instruction_ids):
instruction_ids_copy = copy.deepcopy(instruction_ids)
allocations_copy = copy.deepcopy(allocations)
instruction_lengths_copy = copy.deepcopy(instruction_lengths)
values = []
value_indexes = []
weights = []
for i in range(len(instruction_lengths)):
instruction_length = instruction_lengths[i]
values += instruction_length
value_indexes += len(instruction_length)*[i]
weights += [ep_length[i]] * len(instruction_length)
values = np.array(values)
weights = np.array(weights)
value_indexes = np.array(value_indexes)
sorted_index = np.argsort(values*weights)[::-1]
values = values[sorted_index]
value_indexes = value_indexes[sorted_index]
weights = weights[sorted_index]
groups = len(allocations)
load_balance_groups = [[] for grp in range(groups)]
group_weights = [[] for grp in range(groups)]
instruction_allocations = [[] for grp in range(groups)]
for j in range(len(values)):
summation = np.array([np.sum(np.array(load_balance_groups[i])*np.array(group_weights[i])) for i in range(groups)])
sorted_index = np.argsort(summation)
for i in sorted_index:
index = value_indexes[j]
value = values[j]
if index in allocations_copy[i]:
allocations_copy[i].remove(index)
load_balance_groups[i].append(value)
group_weights[i].append(weights[j])
index_in_length = np.where(np.array(instruction_lengths_copy[index]) == value)[0][0]
instruction_lengths_copy[index].pop(index_in_length)
instruction_allocations[i].append(instruction_ids_copy[index].pop(index_in_length))
break
return instruction_allocations
def allocate_by_scene_for_ddp(number, ep_length, size_per_time):
length_to_indexes = {ep_length[i]: [] for i in
range(len(ep_length))}
for i in range(len(ep_length)):
length_to_indexes[ep_length[i]] += [i]*number[i]
values = []
for i in range(len(number)):
values += [ep_length[i]] * number[i]
groups = int((len(values) - 0.01) // size_per_time + 1)
values.sort(reverse=True)
load_balance_groups = [[] for grp in range(groups)]
for v in values:
load_balance_groups.sort(key=lambda x: sum(x))
load_balance_groups[0].append(v)
indexes = []
set_length = list(set(ep_length))
for i in range(groups):
index = np.zeros(len(load_balance_groups[i]),dtype=int)
for j in range(len(set_length)):
length_indexes = length_to_indexes[set_length[j]]
position = np.where(np.array(load_balance_groups[i]) ==
set_length[j])[0]
position_length = len(position)
index[position] = length_indexes[:position_length]
length_to_indexes[set_length[j]] = length_indexes[position_length:]
indexes.append((index).tolist())
return indexes
def get_camera_orientations(num_views):
assert isinstance(num_views, int)
base_angle_deg = 360 / num_views
base_angle_rad = math.pi / 6
orient_dict = {}
for k in range(1,num_views):
orient_dict[str(base_angle_deg*k)] = [0.0, base_angle_rad*k, 0.0]
return orient_dict
| 5,848 | 34.883436 | 122 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/ss_trainer_VLNBERT.py | import gc
import os
import random
import warnings
from collections import defaultdict
import lmdb
import msgpack_numpy
import numpy as np
import math
import time
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import tqdm
from habitat import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.base_il_trainer import BaseVLNCETrainer
from vlnce_baselines.common.env_utils import construct_envs, construct_envs_for_rl, is_slurm_batch_job
from vlnce_baselines.common.utils import extract_instruction_tokens
from vlnce_baselines.utils import reduce_loss
from .utils import get_camera_orientations
from .models.utils import (
length2mask, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
import torch.distributed as distr
import gzip
import json
from copy import deepcopy
@baseline_registry.register_trainer(name="schedulesampler-VLNBERT")
class SSTrainer(BaseVLNCETrainer):
def __init__(self, config=None):
super().__init__(config)
self.max_len = int(config.IL.max_traj_len) # * 0.97 transfered gt path got 0.96 spl
def _make_dirs(self) -> None:
self._make_ckpt_dir()
# os.makedirs(self.lmdb_features_dir, exist_ok=True)
if self.config.EVAL.SAVE_RESULTS:
self._make_results_dir()
def save_checkpoint(self, epoch: int, step_id: int) -> None:
torch.save(
obj={
"state_dict": self.policy.state_dict(),
"config": self.config,
"optim_state": self.optimizer.state_dict(),
"epoch": epoch,
"step_id": step_id,
},
f=os.path.join(self.config.CHECKPOINT_FOLDER, f"ckpt.{epoch}.pth"),
)
def allocate_allowed_episode_by_scene(self):
''' discrete waypoints coordinates directly projected from MP3D '''
with gzip.open(
self.config.TASK_CONFIG.DATASET.DATA_PATH.format(
split=self.split)
) as f:
data = json.load(f) # dict_keys(['episodes', 'instruction_vocab'])
''' continuous waypoints coordinates by shortest paths in Habitat '''
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=self.split)
) as f:
gt_data = json.load(f)
data = data['episodes']
# long_episode_ids = [int(k) for k,v in gt_data.items() if len(v['actions']) > self.config.IL.max_traj_len]
long_episode_ids = []
average_length = (len(data) - len(long_episode_ids))//self.world_size
episodes_by_scene = {}
for ep in data:
scan = ep['scene_id'].split('/')[1]
if scan not in episodes_by_scene.keys():
episodes_by_scene[scan] = []
if ep['episode_id'] not in long_episode_ids:
episodes_by_scene[scan].append(ep['episode_id'])
else:
continue
''' split data in each environments evenly to different GPUs ''' # averaging number set problem
values_to_scenes = {}
values = []
for k,v in episodes_by_scene.items():
values.append(len(v))
if len(v) not in values_to_scenes.keys():
values_to_scenes[len(v)] = []
values_to_scenes[len(v)].append(k)
groups = self.world_size
values.sort(reverse=True)
last_scene_episodes = episodes_by_scene[values_to_scenes[values[0]].pop()]
values = values[1:]
load_balance_groups = [[] for grp in range(groups)]
scenes_groups = [[] for grp in range(groups)]
for v in values:
current_total = [sum(grp) for grp in load_balance_groups]
min_index = np.argmin(current_total)
load_balance_groups[min_index].append(v)
scenes_groups[min_index] += episodes_by_scene[values_to_scenes[v].pop()]
for grp in scenes_groups:
add_number = average_length - len(grp)
grp += last_scene_episodes[:add_number]
last_scene_episodes = last_scene_episodes[add_number:]
# episode_ids = [ep['episode_id'] for ep in data if
# ep['episode_id'] not in long_episode_ids]
# scenes_groups[self.local_rank] = episode_ids[
# self.local_rank:self.world_size * average_length:self.world_size]
return scenes_groups[self.local_rank]
def train_ml(self, in_train=True, train_tf=False, train_rl=False):
self.envs.resume_all()
observations = self.envs.reset()
shift_index = 0
for i, ep in enumerate(self.envs.current_episodes()):
if ep.episode_id in self.trained_episodes:
i = i - shift_index
observations.pop(i)
self.envs.pause_at(i)
shift_index += 1
if self.envs.num_envs == 0:
break
else:
self.trained_episodes.append(ep.episode_id)
if self.envs.num_envs == 0:
return -1
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
# expert_uuid = self.config.IL.DAGGER.expert_policy_sensor_uuid
not_done_masks = torch.zeros(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device)
ml_loss = 0.
total_weight = 0.
losses = []
not_done_index = list(range(self.envs.num_envs))
# encoding instructions
if 'VLNBERT' in self.config.MODEL.policy_name:
lang_idx_tokens = batch['instruction']
padding_idx = 0
all_lang_masks = (lang_idx_tokens != padding_idx)
lang_lengths = all_lang_masks.sum(1)
lang_token_type_ids = torch.zeros_like(all_lang_masks,
dtype=torch.long, device=self.device)
h_t, all_language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=all_lang_masks,
)
init_num_envs = self.envs.num_envs
# Init the reward shaping
# last_dist = np.zeros(len(observations), np.float32)
# last_ndtw = np.zeros(len(observations), np.float32)
# for i in range(len(observations)):
# info = self.envs.call_at(i, "get_metrics", {})
# last_dist[i] = info['distance_to_goal']
# last_ndtw[i] = info['ndtw']
init_bs = len(observations)
state_not_dones = np.array([True] * init_bs)
# rewards = []
# hidden_states = []
# policy_log_probs = []
# critic_masks = []
# entropys = []
# # RL waypoint predictor
# way_log_probs = []
# way_rewards = []
# way_rl_masks = []
il_loss = 0.0
for stepk in range(self.max_len):
language_features = all_language_features[not_done_index]
lang_masks = all_lang_masks[not_done_index]
# instruction_embedding = all_instr_embed[not_done_index]
if 'VLNBERT' in self.config.MODEL.policy_name:
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# agent's current position and heading
positions = []; headings = []
for ob_i in range(len(observations)):
agent_state_i = self.envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
if 'VLNBERT' in self.config.MODEL.policy_name:
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = in_train,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
# step_rnn_states = torch.zeros(init_bs, 768, device=self.device)
# step_rnn_states[state_not_dones] = h_t
# hidden_states.append(step_rnn_states)
logits = logits.masked_fill_(cand_mask, -float('inf'))
total_weight += len(candidate_lengths)
# get resulting distance by execting candidate actions
# the last value in each list is the current distance
if train_tf:
cand_dists_to_goal = [[] for _ in range(len(batch_angles))]
oracle_cand_idx = []
oracle_stop = []
for j in range(len(batch_angles)):
for k in range(len(batch_angles[j])):
angle_k = batch_angles[j][k]
forward_k = batch_distances[j][k]
dist_k = self.envs.call_at(j,
"cand_dist_to_goal", {
"angle": angle_k, "forward": forward_k,
})
cand_dists_to_goal[j].append(dist_k)
curr_dist_to_goal = self.envs.call_at(
j, "current_dist_to_goal")
# if within target range (which def as 3.0)
if curr_dist_to_goal < 1.5:
oracle_cand_idx.append(candidate_lengths[j] - 1)
oracle_stop.append(True)
else:
oracle_cand_idx.append(np.argmin(cand_dists_to_goal[j]))
oracle_stop.append(False)
if train_rl:
probs = F.softmax(logits, 1) # sampling an action from model
c = torch.distributions.Categorical(probs)
actions = c.sample().detach()
rl_entropy = torch.zeros(init_bs, device=self.device)
rl_entropy[state_not_dones] = c.entropy()
entropys.append(rl_entropy)
rl_policy_log_probs = torch.zeros(init_bs, device=self.device)
rl_policy_log_probs[state_not_dones] = c.log_prob(actions)
policy_log_probs.append(rl_policy_log_probs)
elif train_tf:
oracle_actions = torch.tensor(oracle_cand_idx, device=self.device).unsqueeze(1)
actions = logits.argmax(dim=-1, keepdim=True)
actions = torch.where(
torch.rand_like(actions, dtype=torch.float) <= self.ratio,
oracle_actions, actions)
current_loss = F.cross_entropy(logits, oracle_actions.squeeze(1), reduction="none")
ml_loss += torch.sum(current_loss)
else:
actions = logits.argmax(dim=-1, keepdim=True)
# # REINFORCE waypoint predictor action
# way_step_mask = np.zeros(init_num_envs, np.float32)
# way_step_reward = np.zeros(init_num_envs, np.float32)
# way_step_logp = torch.zeros(init_num_envs, requires_grad=True).cuda()
# for j in range(logits.size(0)):
# perm_index = not_done_index[j]
# way_step_mask[perm_index] = 1.0
# if ( # for all the non-stopping cases
# actions[j].item() != candidate_lengths[j]-1
# ):
# way_step_logp[perm_index] = \
# batch_way_log_prob[j][actions[j].item()]
# # time penalty
# way_step_reward[perm_index] = -1.0
# else:
# if oracle_stop[j]:
# # nav success reward
# way_step_reward[perm_index] = 3.0
# else:
# way_step_reward[perm_index] = -3.0
# way_rl_masks.append(way_step_mask)
# way_rewards.append(way_step_reward)
# way_log_probs.append(way_step_logp)
# action_angles = []
# action_distances = []
env_actions = []
# rl_actions = np.array([-100] * init_bs)
for j in range(logits.size(0)):
if train_rl and (actions[j].item() == candidate_lengths[j]-1 or stepk == self.max_len-1):
# if RL, force stop at the max step
# action_angles.append(0)
# action_distances.append(0)
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
elif actions[j].item() == candidate_lengths[j]-1:
# action_angles.append(0)
# action_distances.append(0)
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
# action_angles.append(batch_angles[j][actions[j].item()])
# action_distances.append(batch_distances[j][actions[j].item()])
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
# self.envs.step(env_actions)
outputs = self.envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in
zip(*outputs)]
h_t = h_t[np.array(dones)==False]
# print('infos', infos)
# import pdb; pdb.set_trace()
if train_rl:
rl_actions[state_not_dones] = np.array([sk['action']['action'] for sk in env_actions])
# Calculate the mask and reward
current_dist = np.zeros(init_bs, np.float32)
# ndtw_score = np.zeros(init_bs, np.float32)
reward = np.zeros(init_bs, np.float32)
ct_mask = np.ones(init_bs, np.float32)
sbi = 0
for si in range(init_bs):
if state_not_dones[si]:
info = self.envs.call_at(sbi, "get_metrics", {})
current_dist[si] = info['distance_to_goal']
# ndtw_score[si] = info['ndtw']
sbi += 1
if not state_not_dones[si]:
reward[si] = 0.0
ct_mask[si] = 0.0
else:
action_idx = rl_actions[si]
# Target reward
if action_idx == 0: # If the action now is end
if current_dist[si] < 3.0: # Correct
reward[si] = 2.0 # + ndtw_score[si] * 2.0
else: # Incorrect
reward[si] = -2.0
elif action_idx != -100: # The action is not end
# Path fidelity rewards (distance & nDTW)
reward[si] = - (current_dist[si] - last_dist[si])
# ndtw_reward = ndtw_score[si] - last_ndtw[si]
if reward[si] > 0.0: # Quantification
reward[si] = 1.0 # + ndtw_reward
else:
reward[si] = -1.0 # + ndtw_reward
# # Miss the target penalty
# if (last_dist[i] <= 1.0) and (current_dist[i]-last_dist[i] > 0.0):
# reward[i] -= (1.0 - last_dist[i]) * 2.0
rewards.append(reward)
critic_masks.append(ct_mask)
last_dist[:] = current_dist
# last_ndtw[:] = ndtw_score
state_not_dones[state_not_dones] = np.array(dones) == False
if sum(dones) > 0:
shift_index = 0
for i in range(self.envs.num_envs):
if dones[i]:
# print(k, self.local_rank)
i = i - shift_index
not_done_index.pop(i)
self.envs.pause_at(i)
if self.envs.num_envs == 0:
break
# def pop_helper(data, index):
# dim = list(data.shape)
# data = data.tolist()
# data.pop(index)
# dim[0] -= 1
# return torch.tensor(data).view(dim).cuda()
# # prev_actions = pop_helper(prev_actions, i)
# # prev_oracle_actions = pop_helper(prev_oracle_actions, i)
# if 'CMA' in self.config.MODEL.policy_name:
# rnn_states = pop_helper(rnn_states, i)
observations.pop(i)
shift_index += 1
if self.envs.num_envs == 0:
break
not_done_masks = torch.ones(
self.envs.num_envs, 1, dtype=torch.bool, device=self.device
)
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, self.obs_transforms)
# # REINFORCE waypoint prediction
# way_rl_loss = 0.0
# way_rl_total = 0.0
# way_rl_length = len(way_rewards)
# way_discount_reward = np.zeros(init_num_envs, np.float32)
# for t in range(way_rl_length-1, -1, -1):
# way_discount_reward = way_discount_reward * 0.90 + way_rewards[t]
# way_r_ = Variable(torch.from_numpy(way_discount_reward.copy()),
# requires_grad=False).cuda()
# way_mask_ = Variable(torch.from_numpy(way_rl_masks[t]),
# requires_grad=False).cuda()
# way_rl_loss += (-way_log_probs[t] * way_r_ * way_mask_).sum()
# way_rl_total = way_rl_total + np.sum(way_rl_masks[t])
# way_rl_loss /= way_rl_total
# A2C
if train_rl:
rl_loss = 0.
length = len(rewards)
discount_reward = np.zeros(init_bs, np.float32)
rl_total = 0
for t in range(length-1, -1, -1):
discount_reward = discount_reward * 0.90 + rewards[t] # If it ended, the reward will be 0
mask_ = Variable(torch.from_numpy(critic_masks[t]), requires_grad=False).to(self.device)
clip_reward = discount_reward.copy()
r_ = Variable(torch.from_numpy(clip_reward), requires_grad=False).to(self.device)
v_ = self.policy.net(
mode = 'critic',
post_states = hidden_states[t])
a_ = (r_ - v_).detach()
rl_loss += (-policy_log_probs[t] * a_ * mask_).sum()
rl_loss += (((r_ - v_) ** 2) * mask_).sum() * 0.5 # 1/2 L2 loss
rl_loss += (- 0.01 * entropys[t] * mask_).sum()
rl_total = rl_total + np.sum(critic_masks[t])
rl_loss = rl_loss / rl_total
il_loss += rl_loss
elif train_tf:
il_loss = ml_loss / total_weight # 0.20 factor
return il_loss #, way_rl_loss
def train(self) -> None:
split = self.config.TASK_CONFIG.DATASET.SPLIT
self.config.defrost()
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = self.config.IL.max_traj_len
if (
self.config.IL.DAGGER.expert_policy_sensor
not in self.config.TASK_CONFIG.TASK.SENSORS
):
self.config.TASK_CONFIG.TASK.SENSORS.append(
self.config.IL.DAGGER.expert_policy_sensor
)
self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
self.config.NUM_ENVIRONMENTS = self.config.IL.batch_size // len(
self.config.SIMULATOR_GPU_IDS)
self.config.use_pbar = not is_slurm_batch_job()
''' *** if choosing image '''
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
# print('deal with choosing images')
# import pdb; pdb.set_trace()
self.config.freeze()
self.world_size = self.config.GPU_NUMBERS
self.local_rank = self.config.local_rank
self.batch_size = self.config.IL.batch_size
torch.cuda.set_device(self.device)
if self.world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
torch.cuda.set_device(self.device)
# print(self.local_rank,self.device)
self.split = split
episode_ids = self.allocate_allowed_episode_by_scene()
# self.temp_envs = get_env_class(self.config.ENV_NAME)(self.config)
# self.temp_envs.episodes contains all 10819 GT samples
# episodes_allowed is slightly smaller -- 10783 valid episodes
# check the usage of self.temp_envs._env.sim.is_navigable([0,0,0])
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
num_epoches_per_ratio = int(np.ceil(self.config.IL.epochs/self.config.IL.decay_time))
print('\nFinished constructing environments')
dataset_length = sum(self.envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
observation_space = self.envs.observation_spaces[0]
action_space = self.envs.action_spaces[0]
self.obs_transforms = get_active_obs_transforms(self.config)
observation_space = apply_obs_transforms_obs_space(
observation_space, self.obs_transforms
)
# self.inflection_weight = torch.tensor([1.0,
# self.config.IL.inflection_weight_coef], device=self.device)
# import pdb; pdb.set_trace()
print('\nInitializing policy network ...')
self._initialize_policy(
self.config,
self.config.IL.load_from_ckpt,
observation_space=observation_space,
action_space=action_space,
)
# import pdb; pdb.set_trace()
print('\nTraining starts ...')
with TensorboardWriter(
self.config.TENSORBOARD_DIR,
flush_secs=self.flush_secs,
purge_step=0,
) as writer:
AuxLosses.activate()
batches_per_epoch = int(np.ceil(dataset_length/self.batch_size))
for epoch in range(self.start_epoch, self.config.IL.epochs):
epoch_str = f"{epoch + 1}/{self.config.IL.epochs}"
t_ = (
tqdm.trange(
batches_per_epoch, leave=False, dynamic_ncols=True
)
if self.config.use_pbar & (self.local_rank < 1)
else range(batches_per_epoch)
)
self.ratio = np.power(self.config.IL.schedule_ratio, epoch//num_epoches_per_ratio + 1)
self.trained_episodes = []
# reconstruct env for every epoch to ensure load same data
if epoch != self.start_epoch:
self.envs = None
self.envs = construct_envs(
self.config, get_env_class(self.config.ENV_NAME),
episodes_allowed=episode_ids,
auto_reset_done=False
)
for batch_idx in t_:
# if batch_idx % 2 == 0:
# loss = self.train_ml(train_rl=False)
# if batch_idx != len(t_)-1:
# continue
# else:
loss = self.train_ml( # way_rl_loss
in_train=True,
train_tf=True, train_rl=False)
# loss += self.train_ml(train_rl=False)
if loss == -1:
break
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
losses = [loss]
# self.way_rl_optimizer.zero_grad()
# way_rl_loss.backward()
# self.way_rl_optimizer.step()
if self.world_size > 1:
for i in range(len(losses)):
reduce_loss(losses[i], self.local_rank, self.world_size)
losses[i] = losses[i].item()
else:
for i in range(len(losses)):
losses[i] = losses[i].item()
loss = losses[0]
if self.config.use_pbar:
if self.local_rank < 1: # seems can be removed
t_.set_postfix(
{
"epoch": epoch_str,
"loss": round(loss, 4),
}
)
writer.add_scalar("loss", loss, self.step_id)
self.step_id += 1 # noqa: SIM113
if self.local_rank < 1: # and epoch % 3 == 0:
self.save_checkpoint(epoch, self.step_id)
AuxLosses.deactivate()
| 28,887 | 42.310345 | 116 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/__init__.py | from vlnce_baselines import ss_trainer_CMA, ss_trainer_VLNBERT
from vlnce_baselines.common import environments
from vlnce_baselines.models import (
Policy_ViewSelection_CMA,
Policy_ViewSelection_VLNBERT,
)
| 215 | 26 | 62 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/config/__init__.py | 0 | 0 | 0 | py |
|
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/config/default.py | from typing import List, Optional, Union
import habitat_baselines.config.default
from habitat.config.default import CONFIG_FILE_SEPARATOR
from habitat.config.default import Config as CN
from habitat_extensions.config.default import (
get_extended_config as get_task_config,
)
# -----------------------------------------------------------------------------
# EXPERIMENT CONFIG
# -----------------------------------------------------------------------------
_C = CN()
_C.BASE_TASK_CONFIG_PATH = "habitat_extensions/config/vlnce_task.yaml"
_C.TASK_CONFIG = CN() # task_config will be stored as a config node
_C.TRAINER_NAME = "dagger"
_C.ENV_NAME = "VLNCEDaggerEnv"
_C.SIMULATOR_GPU_IDS = [0]
_C.VIDEO_OPTION = [] # options: "disk", "tensorboard"
_C.VIDEO_DIR = "videos/debug"
_C.TENSORBOARD_DIR = "data/tensorboard_dirs/debug"
_C.RESULTS_DIR = "data/checkpoints/pretrained/evals"
# -----------------------------------------------------------------------------
# EVAL CONFIG
# -----------------------------------------------------------------------------
_C.EVAL = CN()
# The split to evaluate on
_C.EVAL.SPLIT = "val_seen"
_C.EVAL.EPISODE_COUNT = -1
_C.EVAL.LANGUAGES = ["en-US", "en-IN"]
_C.EVAL.SAMPLE = False
_C.EVAL.SAVE_RESULTS = True
_C.EVAL.EVAL_NONLEARNING = False
_C.EVAL.NONLEARNING = CN()
_C.EVAL.NONLEARNING.AGENT = "RandomAgent"
# -----------------------------------------------------------------------------
# INFERENCE CONFIG
# -----------------------------------------------------------------------------
_C.INFERENCE = CN()
_C.INFERENCE.SPLIT = "test"
_C.INFERENCE.LANGUAGES = ["en-US", "en-IN"]
_C.INFERENCE.SAMPLE = False
_C.INFERENCE.USE_CKPT_CONFIG = True
_C.INFERENCE.CKPT_PATH = "data/checkpoints/CMA_PM_DA_Aug.pth"
_C.INFERENCE.PREDICTIONS_FILE = "predictions.json"
_C.INFERENCE.INFERENCE_NONLEARNING = False
_C.INFERENCE.NONLEARNING = CN()
_C.INFERENCE.NONLEARNING.AGENT = "RandomAgent"
_C.INFERENCE.FORMAT = "r2r" # either 'rxr' or 'r2r'
# -----------------------------------------------------------------------------
# IMITATION LEARNING CONFIG
# -----------------------------------------------------------------------------
_C.IL = CN()
_C.IL.lr = 2.5e-4
_C.IL.batch_size = 5
_C.IL.epochs = 4
_C.IL.use_iw = True
# inflection coefficient for RxR training set GT trajectories (guide): 1.9
# inflection coefficient for R2R training set GT trajectories: 3.2
_C.IL.inflection_weight_coef = 3.2
# load an already trained model for fine tuning
_C.IL.load_from_ckpt = False
_C.IL.ckpt_to_load = "data/checkpoints/ckpt.0.pth"
# if True, loads the optimizer state, epoch, and step_id from the ckpt dict.
_C.IL.is_requeue = False
# it True, start training from the saved epoch
# -----------------------------------------------------------------------------
# IL: RXR TRAINER CONFIG
# -----------------------------------------------------------------------------
_C.IL.RECOLLECT_TRAINER = CN()
_C.IL.RECOLLECT_TRAINER.preload_trajectories_file = True
_C.IL.RECOLLECT_TRAINER.trajectories_file = (
"data/trajectories_dirs/debug/trajectories.json.gz"
)
# if set to a positive int, episodes with longer paths are ignored in training
_C.IL.RECOLLECT_TRAINER.max_traj_len = -1
# if set to a positive int, effective_batch_size must be some multiple of
# IL.batch_size. Gradient accumulation enables an arbitrarily high "effective"
# batch size.
_C.IL.RECOLLECT_TRAINER.effective_batch_size = -1
_C.IL.RECOLLECT_TRAINER.preload_size = 30
_C.IL.RECOLLECT_TRAINER.use_iw = True
_C.IL.RECOLLECT_TRAINER.gt_file = (
"data/datasets/RxR_VLNCE_v0/{split}/{split}_{role}_gt.json.gz"
)
# -----------------------------------------------------------------------------
# IL: DAGGER CONFIG
# -----------------------------------------------------------------------------
_C.IL.DAGGER = CN()
_C.IL.DAGGER.iterations = 10
_C.IL.DAGGER.update_size = 5000
_C.IL.DAGGER.p = 0.75
_C.IL.DAGGER.expert_policy_sensor = "SHORTEST_PATH_SENSOR"
_C.IL.DAGGER.expert_policy_sensor_uuid = "shortest_path_sensor"
_C.IL.DAGGER.load_space = False
# if True, load saved observation space and action space
_C.IL.DAGGER.lmdb_map_size = 1.0e12
# if True, saves data to disk in fp16 and converts back to fp32 when loading.
_C.IL.DAGGER.lmdb_fp16 = False
# How often to commit the writes to the DB, less commits is
# better, but everything must be in memory until a commit happens/
_C.IL.DAGGER.lmdb_commit_frequency = 500
# If True, load precomputed features directly from lmdb_features_dir.
_C.IL.DAGGER.preload_lmdb_features = False
_C.IL.DAGGER.lmdb_features_dir = (
"data/trajectories_dirs/debug/trajectories.lmdb"
)
# -----------------------------------------------------------------------------
# RL CONFIG
# -----------------------------------------------------------------------------
_C.RL = CN()
_C.RL.POLICY = CN()
_C.RL.POLICY.OBS_TRANSFORMS = CN()
_C.RL.POLICY.OBS_TRANSFORMS.ENABLED_TRANSFORMS = [
"CenterCropperPerSensor",
]
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR = CN()
_C.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR.SENSOR_CROPS = [
("rgb", (224, 224)),
("depth", (256, 256)),
]
_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR = CN()
_C.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = [
("rgb", (224, 224)),
("depth", (256, 256)),
]
# -----------------------------------------------------------------------------
# MODELING CONFIG
# -----------------------------------------------------------------------------
_C.MODEL = CN()
_C.MODEL.policy_name = "CMAPolicy" # or "Seq2SeqPolicy"
_C.MODEL.ablate_depth = False
_C.MODEL.ablate_rgb = False
_C.MODEL.ablate_instruction = False
_C.MODEL.INSTRUCTION_ENCODER = CN()
_C.MODEL.INSTRUCTION_ENCODER.sensor_uuid = "instruction"
_C.MODEL.INSTRUCTION_ENCODER.vocab_size = 2504
_C.MODEL.INSTRUCTION_ENCODER.use_pretrained_embeddings = True
_C.MODEL.INSTRUCTION_ENCODER.embedding_file = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/embeddings.json.gz"
)
_C.MODEL.INSTRUCTION_ENCODER.dataset_vocab = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/train/train.json.gz"
)
_C.MODEL.INSTRUCTION_ENCODER.fine_tune_embeddings = False
_C.MODEL.INSTRUCTION_ENCODER.embedding_size = 50
_C.MODEL.INSTRUCTION_ENCODER.hidden_size = 128
_C.MODEL.INSTRUCTION_ENCODER.rnn_type = "LSTM"
_C.MODEL.INSTRUCTION_ENCODER.final_state_only = True
_C.MODEL.INSTRUCTION_ENCODER.bidirectional = False
_C.MODEL.spatial_output = True
_C.MODEL.RGB_ENCODER = CN()
_C.MODEL.RGB_ENCODER.cnn_type = "TorchVisionResNet50"
_C.MODEL.RGB_ENCODER.output_size = 256
_C.MODEL.DEPTH_ENCODER = CN()
_C.MODEL.DEPTH_ENCODER.cnn_type = "VlnResnetDepthEncoder"
_C.MODEL.DEPTH_ENCODER.output_size = 128
# type of resnet to use
_C.MODEL.DEPTH_ENCODER.backbone = "resnet50"
# path to DDPPO resnet weights
_C.MODEL.DEPTH_ENCODER.ddppo_checkpoint = (
"data/ddppo-models/gibson-2plus-resnet50.pth"
)
_C.MODEL.STATE_ENCODER = CN()
_C.MODEL.STATE_ENCODER.hidden_size = 512
_C.MODEL.STATE_ENCODER.rnn_type = "GRU"
_C.MODEL.SEQ2SEQ = CN()
_C.MODEL.SEQ2SEQ.use_prev_action = False
_C.MODEL.PROGRESS_MONITOR = CN()
_C.MODEL.PROGRESS_MONITOR.use = False
_C.MODEL.PROGRESS_MONITOR.alpha = 1.0 # loss multiplier
def purge_keys(config: CN, keys: List[str]) -> None:
for k in keys:
del config[k]
config.register_deprecated_key(k)
def get_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values. Initialized from the
habitat_baselines default config. Overwritten by values from
`config_paths` and overwritten by options from `opts`.
Args:
config_paths: List of config paths or string that contains comma
separated list of config paths.
opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example, `opts = ['FOO.BAR',
0.5]`. Argument can be used for parameter sweeping or quick tests.
"""
config = CN()
config.merge_from_other_cfg(habitat_baselines.config.default._C)
purge_keys(config, ["SIMULATOR_GPU_ID", "TEST_EPISODE_COUNT"])
config.merge_from_other_cfg(_C.clone())
if config_paths:
if isinstance(config_paths, str):
if CONFIG_FILE_SEPARATOR in config_paths:
config_paths = config_paths.split(CONFIG_FILE_SEPARATOR)
else:
config_paths = [config_paths]
prev_task_config = ""
for config_path in config_paths:
config.merge_from_file(config_path)
if config.BASE_TASK_CONFIG_PATH != prev_task_config:
config.TASK_CONFIG = get_task_config(
config.BASE_TASK_CONFIG_PATH
)
prev_task_config = config.BASE_TASK_CONFIG_PATH
if opts:
config.CMD_TRAILING_OPTS = opts
config.merge_from_list(opts)
config.freeze()
return config
| 8,920 | 37.786957 | 79 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/aux_losses.py | import torch
class _AuxLosses:
def __init__(self):
self._losses = {}
self._loss_alphas = {}
self._is_active = False
def clear(self):
self._losses.clear()
self._loss_alphas.clear()
def register_loss(self, name, loss, alpha=1.0):
assert self.is_active()
assert name not in self._losses
self._losses[name] = loss
self._loss_alphas[name] = alpha
def get_loss(self, name):
return self._losses[name]
def reduce(self, mask):
assert self.is_active()
total = torch.tensor(0.0).cuda()
for k in self._losses.keys():
k_loss = torch.masked_select(self._losses[k], mask).mean()
total = total + self._loss_alphas[k] * k_loss
return total
def is_active(self):
return self._is_active
def activate(self):
self._is_active = True
def deactivate(self):
self._is_active = False
AuxLosses = _AuxLosses()
| 987 | 20.955556 | 70 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/recollection_dataset.py | import gzip
import json
from collections import defaultdict, deque
import numpy as np
import torch
import tqdm
from gym import Space
from habitat.config.default import Config
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
from vlnce_baselines.common.env_utils import construct_envs
from vlnce_baselines.common.utils import extract_instruction_tokens
class TeacherRecollectionDataset(torch.utils.data.IterableDataset):
def __init__(self, config: Config):
super().__init__()
self.config = config
# self._preload = []
self._preload = deque()
self.world_size = self.config.GPU_NUMBERS
self.rank = self.config.local_rank
assert (
config.IL.RECOLLECT_TRAINER.preload_size >= config.IL.batch_size
), "preload size must be greater than batch size."
self.envs = None
self._env_observations = None
if config.IL.use_iw:
self.inflec_weights = torch.tensor(
[1.0, config.IL.inflection_weight_coef]
)
else:
self.inflec_weights = torch.tensor([1.0, 1.0])
if self.config.IL.RECOLLECT_TRAINER.preload_trajectories_file:
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[
:-8] + '_w' + \
str(self.world_size) + '_r' + str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
config.IL.RECOLLECT_TRAINER.trajectories_file, "rt"
) as f:
self.trajectories = json.load(f)
else:
self.trajectories = self.collect_dataset()
self.initialize_sims()
def initialize_sims(self):
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.MEASUREMENTS = []
config.freeze()
self.envs = construct_envs(
config,
get_env_class(config.ENV_NAME),
episodes_allowed=list(self.trajectories.keys()),
)
self.length = sum(self.envs.number_of_episodes)
self.obs_transforms = get_active_obs_transforms(self.config)
self._observation_space = apply_obs_transforms_obs_space(
self.envs.observation_spaces[0], self.obs_transforms
)
self.env_step = [0 for _ in range(self.envs.num_envs)]
self._env_observations = [[] for _ in range(self.envs.num_envs)]
observations = self.envs.reset()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
for i, ep in enumerate(self.envs.current_episodes()):
path_step = self.trajectories[str(ep.episode_id)][0]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
@property
def batch_size(self):
return self.config.IL.batch_size
@property
def observation_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
assert self._observation_space is not None
return self._observation_space
@property
def action_space(self) -> Space:
assert self.envs is not None, "Simulator must first be loaded."
return self.envs.action_spaces[0]
def close_sims(self):
self.envs.close()
del self.envs
del self._env_observations
self.envs = None
self._env_observations = None
def collect_dataset(self):
r"""Uses the ground truth trajectories to create a teacher forcing
datset for a given split. Loads both guide and follower episodes.
"""
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(split=split)
) as f:
gt_data = json.load(f)
t = (
tqdm.tqdm(gt_data.items(), "GT Collection")
if self.config.use_pbar
else gt_data.items()
)
for episode_id, trajectory in t:
if (
self.config.IL.RECOLLECT_TRAINER.max_traj_len != -1
and len(trajectory["actions"])
> self.config.IL.RECOLLECT_TRAINER.max_traj_len
) or (
self.config.IL.RECOLLECT_TRAINER.min_traj_len != -1
and len(trajectory["actions"])
< self.config.IL.RECOLLECT_TRAINER.min_traj_len
):
continue
for i, action in enumerate(trajectory["actions"]):
prev_action = (
trajectories[episode_id][i - 1][1]
if i
else HabitatSimActions.STOP
)
# [prev_action, action, oracle_action]
trajectories[episode_id].append([prev_action, action, action])
trajectories = dict(list(trajectories.items())[self.rank::self.world_size])
self.config.defrost()
self.config.IL.RECOLLECT_TRAINER.trajectories_file = \
self.config.IL.RECOLLECT_TRAINER.trajectories_file[:-8]+'_w'+ \
str(self.world_size)+'_r'+str(self.rank) + '.json.gz'
self.config.freeze()
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.trajectories_file, "wt"
) as f:
f.write(json.dumps(trajectories))
return trajectories
def _load_next(self):
"""
Episode length is currently not considered. We were previously batching episodes
together with similar lengths. Not sure if we need to bring that back.
"""
# self.rank = 0
if len(self._preload):
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
while (
len(self._preload) < self.config.IL.RECOLLECT_TRAINER.preload_size
):
current_episodes = self.envs.current_episodes()
prev_eps = current_episodes
# get the next action for each env
actions = [
self.trajectories[str(ep.episode_id)][self.env_step[i]][1]
for i, ep in enumerate(current_episodes)
]
outputs = self.envs.step(actions)
observations, _, dones, _ = [list(x) for x in zip(*outputs)]
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
current_episodes = self.envs.current_episodes()
for i in range(self.envs.num_envs):
self.env_step[i] += 1
if dones[i]:
assert len(self._env_observations[i]) == len(
self.trajectories[str(prev_eps[i].episode_id)]
), "Collected episode does not match the step count of trajectory"
self._preload.append(
(
[o[0] for o in self._env_observations[i]],
[o[1] for o in self._env_observations[i]],
[o[2] for o in self._env_observations[i]],
)
)
self._env_observations[i] = []
self.env_step[i] = 0
path_step = self.trajectories[
str(current_episodes[i].episode_id)
][self.env_step[i]]
self._env_observations[i].append(
(
observations[i],
path_step[0], # prev_action
path_step[2], # oracle_action
)
)
assert (
len(self._env_observations[i])
<= self.config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS
), "Trajectories should be no more than the maximum episode steps."
# out = self._preload[self.rank]
# self._preload = self._preload[self.world_size:]
# return out
return self._preload.popleft()
def __next__(self):
"""Takes about 1s to once self._load_next() has finished with a batch
size of 5. For this reason, we probably don't need to use extra workers.
"""
x = self._load_next()
obs, prev_actions, oracle_actions = x
# transpose obs
obs_t = defaultdict(list)
for k in obs[0]:
for i in range(len(obs)):
obs_t[k].append(obs[i][k])
obs_t[k] = np.array(obs_t[k])
for k, v in obs_t.items():
obs_t[k] = torch.from_numpy(np.copy(v))
prev_actions = torch.from_numpy(np.copy(prev_actions))
oracle_actions = torch.from_numpy(np.copy(oracle_actions))
inflections = torch.cat(
[
torch.tensor([1], dtype=torch.long),
(oracle_actions[1:] != oracle_actions[:-1]).long(),
]
)
return (
obs_t,
prev_actions,
oracle_actions,
self.inflec_weights[inflections],
)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
assert (
worker_info.num_workers == 1
), "multiple workers not supported."
return self
| 10,692 | 34.88255 | 88 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/utils.py | from typing import Any, Dict, List
import torch
import torch.distributed as dist
import numpy as np
import copy
def extract_instruction_tokens(
observations: List[Dict],
instruction_sensor_uuid: str,
tokens_uuid: str = "tokens",
) -> Dict[str, Any]:
r"""Extracts instruction tokens from an instruction sensor if the tokens
exist and are in a dict structure.
"""
for i in range(len(observations)):
if (
isinstance(observations[i][instruction_sensor_uuid], dict)
and tokens_uuid in observations[i][instruction_sensor_uuid]
):
observations[i][instruction_sensor_uuid] = observations[i][
instruction_sensor_uuid
]["tokens"]
else:
break
return observations
def gather_list_and_concat(list_of_nums,world_size):
if not torch.is_tensor(list_of_nums):
tensor = torch.Tensor(list_of_nums).cuda()
else:
if list_of_nums.is_cuda == False:
tensor = list_of_nums.cuda()
else:
tensor = list_of_nums
gather_t = [torch.ones_like(tensor) for _ in
range(world_size)]
dist.all_gather(gather_t, tensor)
return gather_t
def dis_to_con(path, amount=0.25):
starts = path[:-1]
ends = path[1:]
new_path = [path[0]]
for s, e in zip(starts,ends):
vec = np.array(e) - np.array(s)
ratio = amount/np.linalg.norm(vec[[0,2]])
unit = vec*ratio
times = int(1/ratio)
for i in range(times):
if i != times - 1:
location = np.array(new_path[-1])+unit
new_path.append(location.tolist())
new_path.append(e)
return new_path | 1,716 | 30.218182 | 76 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/environments.py | from typing import Any, Dict, Optional, Tuple, List, Union
import habitat
import numpy as np
from habitat import Config, Dataset
from habitat.core.simulator import Observations
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat.sims.habitat_simulator.actions import HabitatSimActions
@baseline_registry.register_env(name="VLNCEDaggerEnv")
class VLNCEDaggerEnv(habitat.RLEnv):
def __init__(self, config: Config, dataset: Optional[Dataset] = None):
super().__init__(config.TASK_CONFIG, dataset)
def get_reward_range(self) -> Tuple[float, float]:
# We don't use a reward for DAgger, but the baseline_registry requires
# we inherit from habitat.RLEnv.
return (0.0, 0.0)
def get_reward(self, observations: Observations) -> float:
return 0.0
def get_done(self, observations: Observations) -> bool:
return self._env.episode_over
def get_info(self, observations: Observations) -> Dict[Any, Any]:
return self.habitat_env.get_metrics()
def get_metrics(self):
return self.habitat_env.get_metrics()
def get_geodesic_dist(self,
node_a: List[float], node_b: List[float]):
return self._env.sim.geodesic_distance(node_a, node_b)
def check_navigability(self, node: List[float]):
return self._env.sim.is_navigable(node)
def get_agent_info(self):
agent_state = self._env.sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": self._env.task.is_stop_called,
}
def get_observation_at(self,
source_position: List[float],
source_rotation: List[Union[int, np.float64]],
keep_agent_at_new_pose: bool = False):
return self._env.sim.get_observations_at(
source_position,
source_rotation,
keep_agent_at_new_pose)
def observations_by_angles(self, angle_list: List[float]):
r'''for getting observations from desired angles
requires rad, positive represents anticlockwise'''
obs = []
sim = self._env.sim
init_state = sim.get_agent_state()
prev_angle = 0
left_action = HabitatSimActions.TURN_LEFT
init_amount = sim.get_agent(0).agent_config.action_space[left_action].actuation.amount # turn left
for angle in angle_list:
sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = (angle-prev_angle)*180/np.pi
obs.append(sim.step(left_action))
prev_angle = angle
sim.set_agent_state(init_state.position, init_state.rotation)
sim.get_agent(0).agent_config.action_space[left_action].actuation.amount = init_amount
return obs
def current_dist_to_goal(self):
sim = self._env.sim
init_state = sim.get_agent_state()
init_distance = self._env.sim.geodesic_distance(
init_state.position, self._env.current_episode.goals[0].position,
)
return init_distance
def cand_dist_to_goal(self, angle: float, forward: float):
r'''get resulting distance to goal by executing
a candidate action'''
sim = self._env.sim
init_state = sim.get_agent_state()
forward_action = HabitatSimActions.MOVE_FORWARD
init_forward = sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
sim.set_agent_state(init_state.position, rotation)
ksteps = int(forward//init_forward)
for k in range(ksteps):
sim.step_without_obs(forward_action)
post_state = sim.get_agent_state()
post_distance = self._env.sim.geodesic_distance(
post_state.position, self._env.current_episode.goals[0].position,
)
# reset agent state
sim.set_agent_state(init_state.position, init_state.rotation)
return post_distance
def change_current_path(self, new_path: Any, collisions: Any):
'''just for recording current path in high to low'''
if 'current_path' not in self._env.current_episode.info.keys():
self._env.current_episode.info['current_path'] = [np.array(self._env.current_episode.start_position)]
self._env.current_episode.info['current_path'] += new_path
if 'collisions' not in self._env.current_episode.info.keys():
self._env.current_episode.info['collisions'] = []
self._env.current_episode.info['collisions'] += collisions
@baseline_registry.register_env(name="VLNCEInferenceEnv")
class VLNCEInferenceEnv(habitat.RLEnv):
def __init__(self, config: Config, dataset: Optional[Dataset] = None):
super().__init__(config.TASK_CONFIG, dataset)
def get_reward_range(self):
return (0.0, 0.0)
def get_reward(self, observations: Observations):
return 0.0
def get_done(self, observations: Observations):
return self._env.episode_over
def get_info(self, observations: Observations):
agent_state = self._env.sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": self._env.task.is_stop_called,
}
| 5,996 | 38.453947 | 115 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/env_utils.py | import os
import random
from typing import List, Optional, Type, Union
import habitat
from habitat import Config, Env, RLEnv, VectorEnv, make_dataset
from habitat_baselines.utils.env_utils import make_env_fn
random.seed(0)
SLURM_JOBID = os.environ.get("SLURM_JOB_ID", None)
def is_slurm_job() -> bool:
return SLURM_JOBID is not None
def is_slurm_batch_job() -> bool:
r"""Heuristic to determine if a slurm job is a batch job or not. Batch jobs
will have a job name that is not a shell unless the user specifically set the job
name to that of a shell. Interactive jobs have a shell name as their job name.
"""
return is_slurm_job() and os.environ.get("SLURM_JOB_NAME", None) not in (
None,
"bash",
"zsh",
"fish",
"tcsh",
"sh",
)
def construct_envs(
config: Config,
env_class: Type[Union[Env, RLEnv]],
workers_ignore_signals: bool = False,
auto_reset_done: bool = True,
episodes_allowed: Optional[List[str]] = None,
) -> VectorEnv:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
:param config: configs that contain num_environments as well as information
:param necessary to create individual environments.
:param env_class: class type of the envs to be created.
:param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor
:param auto_reset_done: Whether or not to automatically reset the env on done
:return: VectorEnv object created according to specification.
"""
num_envs_per_gpu = config.NUM_ENVIRONMENTS
if isinstance(config.SIMULATOR_GPU_IDS, list):
gpus = config.SIMULATOR_GPU_IDS
else:
gpus = [config.SIMULATOR_GPU_IDS]
num_gpus = len(gpus)
num_envs = num_gpus * num_envs_per_gpu
if episodes_allowed is not None:
config.defrost()
config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed
config.freeze()
configs = []
env_classes = [env_class for _ in range(num_envs)]
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
if num_envs > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multi-process logic relies on being able"
" to split scenes uniquely between processes"
)
if len(scenes) < num_envs and len(scenes) != 1:
raise RuntimeError(
"reduce the number of GPUs or envs as there"
" aren't enough number of scenes"
)
random.shuffle(scenes)
if len(scenes) == 1:
scene_splits = [[scenes[0]] for _ in range(num_envs)]
else:
scene_splits = [[] for _ in range(num_envs)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_gpus):
for j in range(num_envs_per_gpu):
proc_config = config.clone()
proc_config.defrost()
proc_id = (i * num_envs_per_gpu) + j
task_config = proc_config.TASK_CONFIG
task_config.SEED += proc_id
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i]
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
proc_config.freeze()
configs.append(proc_config)
envs = habitat.VectorEnv(
make_env_fn=make_env_fn,
env_fn_args=tuple(zip(configs, env_classes)),
auto_reset_done=auto_reset_done,
workers_ignore_signals=workers_ignore_signals,
)
return envs
def construct_envs_auto_reset_false(
config: Config, env_class: Type[Union[Env, RLEnv]]
) -> VectorEnv:
return construct_envs(config, env_class, auto_reset_done=False)
def construct_envs_for_rl(
config: Config,
env_class: Type[Union[Env, RLEnv]],
workers_ignore_signals: bool = False,
auto_reset_done: bool = True,
episodes_allowed: Optional[List[str]] = None,
) -> VectorEnv:
r"""Create VectorEnv object with specified config and env class type.
To allow better performance, dataset are split into small ones for
each individual env, grouped by scenes.
:param config: configs that contain num_environments as well as information
:param necessary to create individual environments.
:param env_class: class type of the envs to be created.
:param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor
:param auto_reset_done: Whether or not to automatically reset the env on done
:return: VectorEnv object created according to specification.
"""
num_envs_per_gpu = config.NUM_ENVIRONMENTS
if isinstance(config.SIMULATOR_GPU_IDS, list):
gpus = config.SIMULATOR_GPU_IDS
else:
gpus = [config.SIMULATOR_GPU_IDS]
num_gpus = len(gpus)
num_envs = num_gpus * num_envs_per_gpu
if episodes_allowed is not None:
config.defrost()
config.TASK_CONFIG.DATASET.EPISODES_ALLOWED = episodes_allowed
config.freeze()
configs = []
env_classes = [env_class for _ in range(num_envs)]
dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)
if num_envs > 1:
if len(scenes) == 0:
raise RuntimeError(
"No scenes to load, multi-process logic relies on being able"
" to split scenes uniquely between processes"
)
if len(scenes) < num_envs and len(scenes) != 1:
raise RuntimeError(
"reduce the number of GPUs or envs as there"
" aren't enough number of scenes"
)
if len(scenes) == 1:
scene_splits = [[scenes[0]] for _ in range(num_envs)]
else:
scene_splits = [[] for _ in range(num_envs)]
for idx, scene in enumerate(scenes):
scene_splits[idx % len(scene_splits)].append(scene)
assert sum(map(len, scene_splits)) == len(scenes)
for i in range(num_gpus):
for j in range(num_envs_per_gpu):
proc_config = config.clone()
proc_config.defrost()
proc_id = (i * num_envs_per_gpu) + j
task_config = proc_config.TASK_CONFIG
task_config.SEED += proc_id
if len(scenes) > 0:
task_config.DATASET.CONTENT_SCENES = scene_splits[proc_id]
task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpus[i]
task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS
proc_config.freeze()
configs.append(proc_config)
envs = habitat.VectorEnv(
make_env_fn=make_env_fn,
env_fn_args=tuple(zip(configs, env_classes)),
auto_reset_done=auto_reset_done,
workers_ignore_signals=workers_ignore_signals,
)
return envs
| 7,426 | 34.033019 | 85 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/common/base_il_trainer.py | import json
import jsonlines
import os
import time
import warnings
from collections import defaultdict
from typing import Dict, List
import torch
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as distr
import torch.multiprocessing as mp
import gzip
import math
from copy import deepcopy
import tqdm
from gym import Space
from habitat import Config, logger
from habitat.utils.visualizations.utils import append_text_to_image
from habitat_baselines.common.base_il_trainer import BaseILTrainer
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.environments import get_env_class
from habitat_baselines.common.obs_transformers import (
apply_obs_transforms_batch,
apply_obs_transforms_obs_space,
get_active_obs_transforms,
)
from habitat_extensions.measures import Position
from habitat_baselines.common.tensorboard_utils import TensorboardWriter
from habitat_baselines.utils.common import batch_obs, generate_video
from habitat_baselines.utils.common import (
get_checkpoint_id,
poll_checkpoint_folder,
)
from habitat_extensions.utils import observations_to_image
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.common.env_utils import (
construct_envs_auto_reset_false,
construct_envs,
is_slurm_batch_job,
)
from vlnce_baselines.common.utils import *
from habitat_extensions.measures import NDTW
from fastdtw import fastdtw
from ..utils import get_camera_orientations
from ..models.utils import (
length2mask, dir_angle_feature, dir_angle_feature_with_ele,
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf # noqa: F401
class BaseVLNCETrainer(BaseILTrainer):
r"""A base trainer for VLN-CE imitation learning."""
supported_tasks: List[str] = ["VLN-v0"]
def __init__(self, config=None):
super().__init__(config)
self.policy = None
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.obs_transforms = []
self.start_epoch = 0
self.step_id = 0
def _initialize_policy(
self,
config: Config,
load_from_ckpt: bool,
observation_space: Space,
action_space: Space,
) -> None:
policy = baseline_registry.get_policy(self.config.MODEL.policy_name)
self.policy = policy.from_config(
config=config,
observation_space=observation_space,
action_space=action_space,
)
''' initialize the waypoint predictor here '''
from waypoint_prediction.TRM_net import BinaryDistPredictor_TRM
self.waypoint_predictor = BinaryDistPredictor_TRM(device=self.device)
self.waypoint_predictor.load_state_dict(
torch.load(
'./waypoint_prediction/checkpoints/check_val_best_avg_wayscore',
map_location = torch.device('cpu'),
)['predictor']['state_dict']
)
for param in self.waypoint_predictor.parameters():
param.requires_grad = False
self.policy.to(self.device)
self.waypoint_predictor.to(self.device)
self.num_recurrent_layers = self.policy.net.num_recurrent_layers
if self.config.GPU_NUMBERS > 1:
print('Using', self.config.GPU_NUMBERS,'GPU!')
self.policy.net = DDP(self.policy.net.to(self.device), device_ids=[self.device],
output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
# self.waypoint_predictor = DDP(self.waypoint_predictor.to(self.device), device_ids=[self.device],
# output_device=self.device, find_unused_parameters=True, broadcast_buffers=False)
self.optimizer = torch.optim.AdamW(
self.policy.parameters(), lr=self.config.IL.lr,
)
if load_from_ckpt:
ckpt_path = config.IL.ckpt_to_load
ckpt_dict = self.load_checkpoint(ckpt_path, map_location="cpu")
if 'module' in list(ckpt_dict['state_dict'].keys())[0] and self.config.GPU_NUMBERS == 1:
self.policy.net = torch.nn.DataParallel(self.policy.net.to(self.device),
device_ids=[self.device], output_device=self.device)
self.policy.load_state_dict(ckpt_dict["state_dict"])
self.policy.net = self.policy.net.module
# self.waypoint_predictor = torch.nn.DataParallel(self.waypoint_predictor.to(self.device),
# device_ids=[self.device], output_device=self.device)
else:
self.policy.load_state_dict(ckpt_dict["state_dict"])
if config.IL.is_requeue:
self.optimizer.load_state_dict(ckpt_dict["optim_state"])
self.start_epoch = ckpt_dict["epoch"] + 1
self.step_id = ckpt_dict["step_id"]
logger.info(f"Loaded weights from checkpoint: {ckpt_path}")
params = sum(param.numel() for param in self.policy.parameters())
params_t = sum(
p.numel() for p in self.policy.parameters() if p.requires_grad
)
logger.info(f"Agent parameters: {params}. Trainable: {params_t}")
logger.info("Finished setting up policy.")
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
return torch.load(checkpoint_path, *args, **kwargs)
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames=None,
):
# pausing envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
recurrent_hidden_states = recurrent_hidden_states[state_index]
not_done_masks = not_done_masks[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
batch[k] = v[state_index]
if rgb_frames is not None:
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
recurrent_hidden_states,
not_done_masks,
prev_actions,
batch,
rgb_frames,
)
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object
checkpoint_index: index of the current checkpoint
Returns:
None
"""
if self.local_rank < 1:
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
# config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.DATASET.ROLES = ["guide"]
# config.TASK_CONFIG.DATASET.LANGUAGES = config.EVAL.LANGUAGES
# config.TASK_CONFIG.TASK.NDTW.SPLIT = config.EVAL.SPLIT
# config.TASK_CONFIG.TASK.SDTW.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = checkpoint_path
if len(config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP_VLNCE")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{config.TASK_CONFIG.DATASET.SPLIT}.json",
)
if os.path.exists(fname):
print("skipping -- evaluation exists.")
return
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=self.traj
)
dataset_length = sum(envs.number_of_episodes)
print('local rank:', self.local_rank, '|', 'dataset length:', dataset_length)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
# prev_actions = torch.zeros(
# envs.num_envs, 1, device=self.device, dtype=torch.long
# )
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
stats_episodes = {}
rgb_frames = [[] for _ in range(envs.num_envs)]
if len(config.VIDEO_OPTION) > 0:
os.makedirs(config.VIDEO_DIR, exist_ok=True)
if config.EVAL.EPISODE_COUNT == -1:
episodes_to_eval = sum(envs.number_of_episodes)
else:
episodes_to_eval = min(
config.EVAL.EPISODE_COUNT, sum(envs.number_of_episodes)
)
pbar = tqdm.tqdm(total=episodes_to_eval) if config.use_pbar else None
log_str = (
f"[Ckpt: {checkpoint_index}]"
" [Episodes evaluated: {evaluated}/{total}]"
" [Time elapsed (s): {time}]"
)
start_time = time.time()
total_weight = 0.
ml_loss = 0.
while envs.num_envs > 0 and len(stats_episodes) < episodes_to_eval:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i,
"get_agent_info", {})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
# instruction
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
for j, ob in enumerate(observations):
if env_actions[j]['action']['action'] == 0:
continue
else:
envs.call_at(j,
'change_current_path',
{'new_path': ob.pop('positions'),
'collisions': ob.pop('collisions')}
)
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8, device=self.device)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if len(config.VIDEO_OPTION) > 0:
frame = observations_to_image(observations[i], infos[i])
frame = append_text_to_image(
frame, current_episodes[i].instruction.instruction_text
)
rgb_frames[i].append(frame)
if not dones[i]:
continue
info = infos[i]
metric = {}
metric['steps_taken'] = info['steps_taken']
ep_id = str(envs.current_episodes()[i].episode_id)
gt_path = np.array(self.gt_data[ep_id]['locations']).astype(np.float)
if 'current_path' in envs.current_episodes()[i].info.keys():
positions_ = np.array(envs.current_episodes()[i].info['current_path']).astype(np.float)
collisions_ = np.array(envs.current_episodes()[i].info['collisions'])
assert collisions_.shape[0] == positions_.shape[0] - 1
else:
positions_ = np.array(dis_to_con(np.array(info['position']['position']))).astype(np.float)
distance = np.array(info['position']['distance']).astype(np.float)
metric['distance_to_goal'] = distance[-1]
metric['success'] = 1. if distance[-1] <= 3. and env_actions[i]['action']['action'] == 0 else 0.
metric['oracle_success'] = 1. if (distance <= 3.).any() else 0.
metric['path_length'] = np.linalg.norm(positions_[1:] - positions_[:-1],axis=1).sum()
metric['collisions'] = collisions_.mean()
gt_length = distance[0]
metric['spl'] = metric['success']*gt_length/max(gt_length,metric['path_length'])
act_con_path = positions_
gt_con_path = np.array(gt_path).astype(np.float)
dtw_distance = fastdtw(act_con_path, gt_con_path, dist=NDTW.euclidean_distance)[0]
nDTW = np.exp(-dtw_distance / (len(gt_con_path) * config.TASK_CONFIG.TASK.SUCCESS_DISTANCE))
metric['ndtw'] = nDTW
stats_episodes[current_episodes[i].episode_id] = metric
observations[i] = envs.reset_at(i)[0]
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
if config.use_pbar:
pbar.update()
else:
logger.info(
log_str.format(
evaluated=len(stats_episodes),
total=episodes_to_eval,
time=round(time.time() - start_time),
)
)
if len(config.VIDEO_OPTION) > 0:
generate_video(
video_option=config.VIDEO_OPTION,
video_dir=config.VIDEO_DIR,
images=rgb_frames[i],
episode_id=current_episodes[i].episode_id,
checkpoint_idx=checkpoint_index,
metrics={
"spl": stats_episodes[
current_episodes[i].episode_id
]["spl"]
},
tb_writer=writer,
)
del stats_episodes[current_episodes[i].episode_id][
"top_down_map_vlnce"
]
del stats_episodes[current_episodes[i].episode_id][
"collisions"
]
rgb_frames[i] = []
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if next_episodes[i].episode_id in stats_episodes:
envs_to_pause.append(i)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings, # prev_actions
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings,
batch,
rgb_frames,
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.use_pbar:
pbar.close()
if self.world_size > 1:
distr.barrier()
aggregated_stats = {}
num_episodes = len(stats_episodes)
for stat_key in next(iter(stats_episodes.values())).keys():
aggregated_stats[stat_key] = (
sum(v[stat_key] for v in stats_episodes.values())
/ num_episodes
)
total = torch.tensor(num_episodes).cuda()
if self.world_size > 1:
dist.reduce(total,dst=0)
total = total.item()
if self.world_size > 1:
logger.info(
f"rank {self.local_rank}'s {num_episodes}-episode results: {aggregated_stats}")
for k,v in aggregated_stats.items():
v = torch.tensor(v*num_episodes).cuda()
cat_v = gather_list_and_concat(v,self.world_size)
v = (sum(cat_v)/total).item()
aggregated_stats[k] = v
split = config.TASK_CONFIG.DATASET.SPLIT
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ep_ckpt_{checkpoint_index}_{split}_r{self.local_rank}_w{self.world_size}.json",
)
with open(fname, "w") as f:
json.dump(stats_episodes, f, indent=4)
if self.local_rank < 1:
if config.EVAL.SAVE_RESULTS:
fname = os.path.join(
config.RESULTS_DIR,
f"stats_ckpt_{checkpoint_index}_{split}.json",
)
with open(fname, "w") as f:
json.dump(aggregated_stats, f, indent=4)
logger.info(f"Episodes evaluated: {total}")
checkpoint_num = checkpoint_index + 1
for k, v in aggregated_stats.items():
logger.info(f"Average episode {k}: {v:.6f}")
writer.add_scalar(f"eval_{split}_{k}", v, checkpoint_num)
def collect_val_traj(self):
from habitat_extensions.task import ALL_ROLES_MASK, RxRVLNCEDatasetV1
trajectories = defaultdict(list)
split = self.config.TASK_CONFIG.DATASET.SPLIT
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
if "{role}" in self.config.IL.RECOLLECT_TRAINER.gt_file:
gt_data = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
if (
ALL_ROLES_MASK not in self.config.TASK_CONFIG.DATASET.ROLES
and role not in self.config.TASK_CONFIG.DATASET.ROLES
):
continue
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_file.format(
split=split, role=role
),
"rt",
) as f:
gt_data.update(json.load(f))
else:
with gzip.open(
self.config.IL.RECOLLECT_TRAINER.gt_path.format(
split=split)
) as f:
gt_data = json.load(f)
else:
with gzip.open(
self.config.TASK_CONFIG.TASK.NDTW.GT_PATH.format(
split=split)
) as f:
gt_data = json.load(f)
self.gt_data = gt_data
trajectories = gt_data
self.trajectories = gt_data
trajectories = list(trajectories.keys())[self.config.local_rank::self.config.GPU_NUMBERS]
return trajectories
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
or BaseILTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
os.makedirs(self.config.TENSORBOARD_DIR, exist_ok=True)
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
world_size = self.config.GPU_NUMBERS
self.world_size = world_size
self.local_rank = self.config.local_rank
self.config.defrost()
# split = self.config.TASK_CONFIG.DATASET.SPLIT
# self.config.TASK_CONFIG.TASK.NDTW.SPLIT = split
# self.config.TASK_CONFIG.TASK.SDTW.SPLIT = split
self.config.TASK_CONFIG.DATASET.ROLES = ["guide"]
self.config.TASK_CONFIG.TASK.MEASUREMENTS = ['POSITION',
'STEPS_TAKEN',
]
if 'HIGHTOLOW' in self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
self.config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWEVAL'
self.config.TASK_CONFIG.DATASET.LANGUAGES = self.config.EVAL.LANGUAGES
self.config.TASK_CONFIG.DATASET.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.NDTW.SPLIT = self.config.EVAL.SPLIT
self.config.TASK_CONFIG.TASK.SDTW.SPLIT = self.config.EVAL.SPLIT
self.config.use_pbar = not is_slurm_batch_job()
if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.config.EVAL.trajectories_file = \
self.config.EVAL.trajectories_file[:-8] + '_w' + \
str(self.world_size) + '_r' + str(self.local_rank) + '.json.gz'
# if choosing image
resize_config = self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
config = self.config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(config.SIMULATOR, camera_template, camera_config)
config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
self.config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
self.config.TASK_CONFIG = config
self.config.SENSORS = config.SIMULATOR.AGENT_0.SENSORS
self.config.freeze()
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
# self.config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
# -1
# )
torch.cuda.set_device(self.device)
if world_size > 1:
distr.init_process_group(backend='nccl', init_method='env://')
self.device = self.config.TORCH_GPU_IDS[self.local_rank]
torch.cuda.set_device(self.device)
self.config.defrost()
self.config.TORCH_GPU_ID = self.config.TORCH_GPU_IDS[self.local_rank]
self.config.freeze()
#
# if 'rxr' in self.config.BASE_TASK_CONFIG_PATH:
self.traj = self.collect_val_traj()
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL_CKPT_PATH_DIR):
# evaluate singe checkpoint
proposed_index = get_checkpoint_id(
self.config.EVAL_CKPT_PATH_DIR
)
if proposed_index is not None:
ckpt_idx = proposed_index
else:
ckpt_idx = 0
self._eval_checkpoint(
self.config.EVAL_CKPT_PATH_DIR,
writer,
checkpoint_index=ckpt_idx,
)
else:
# evaluate multiple checkpoints in order
prev_ckpt_ind = -1
while True:
current_ckpt = None
while current_ckpt is None:
current_ckpt = poll_checkpoint_folder(
self.config.EVAL_CKPT_PATH_DIR, prev_ckpt_ind
)
time.sleep(2) # sleep for 2 secs before polling again
if self.local_rank < 1:
logger.info(f"=======current_ckpt: {current_ckpt}=======")
prev_ckpt_ind += 1
self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=prev_ckpt_ind,
)
def inference(self) -> None:
r"""Runs inference on a single checkpoint, creating a path predictions file."""
checkpoint_path = self.config.INFERENCE.CKPT_PATH
logger.info(f"checkpoint_path: {checkpoint_path}")
if self.config.INFERENCE.USE_CKPT_CONFIG:
config = self._setup_eval_config(
self.load_checkpoint(checkpoint_path, map_location="cpu")[
"config"
]
)
else:
config = self.config.clone()
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = self.config.INFERENCE.SPLIT
# config.TASK_CONFIG.DATASET.SPLIT = 'val_unseen'
config.TASK_CONFIG.DATASET.ROLES = ["guide"]
config.TASK_CONFIG.DATASET.LANGUAGES = config.INFERENCE.LANGUAGES
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = (
-1
)
config.IL.ckpt_to_load = config.INFERENCE.CKPT_PATH
config.TASK_CONFIG.TASK.MEASUREMENTS = []
config.TASK_CONFIG.TASK.SENSORS = [
s for s in config.TASK_CONFIG.TASK.SENSORS if "INSTRUCTION" in s
]
if 'HIGHTOLOW' in config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS:
idx = config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS.index('HIGHTOLOW')
config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS[idx] = 'HIGHTOLOWINFER'
# if choosing image
resize_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES
task_config = config.TASK_CONFIG
camera_orientations = get_camera_orientations(12)
# sensor_uuids = []
for sensor_type in ["RGB", "DEPTH"]:
resizer_size = dict(resize_config)[sensor_type.lower()]
sensor = getattr(task_config.SIMULATOR, f"{sensor_type}_SENSOR")
for action, orient in camera_orientations.items():
camera_template = f"{sensor_type}_{action}"
camera_config = deepcopy(sensor)
camera_config.ORIENTATION = camera_orientations[action]
camera_config.UUID = camera_template.lower()
# sensor_uuids.append(camera_config.UUID)
setattr(task_config.SIMULATOR, camera_template, camera_config)
task_config.SIMULATOR.AGENT_0.SENSORS.append(camera_template)
resize_config.append((camera_template.lower(), resizer_size))
config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR.SIZES = resize_config
config.TASK_CONFIG = task_config
config.SENSORS = task_config.SIMULATOR.AGENT_0.SENSORS
config.ENV_NAME = "VLNCEInferenceEnv"
config.freeze()
# envs = construct_envs_auto_reset_false(
# config, get_env_class(config.ENV_NAME),
# self.traj
# )
envs = construct_envs(
config, get_env_class(config.ENV_NAME),
auto_reset_done=False,
episodes_allowed=None,
)
obs_transforms = get_active_obs_transforms(config)
observation_space = apply_obs_transforms_obs_space(
envs.observation_spaces[0], obs_transforms
)
self._initialize_policy(
config,
load_from_ckpt=True,
observation_space=observation_space,
action_space=envs.action_spaces[0],
)
self.policy.eval()
self.waypoint_predictor.eval()
observations = envs.reset()
observations = extract_instruction_tokens(
observations, self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
if 'CMA' in self.config.MODEL.policy_name:
rnn_states = torch.zeros(
envs.num_envs,
self.num_recurrent_layers,
config.MODEL.STATE_ENCODER.hidden_size,
device=self.device,
)
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t = torch.zeros(
envs.num_envs, 768,
device=self.device,
)
language_features = torch.zeros(
envs.num_envs, 80, 768,
device=self.device,
)
not_done_masks = torch.zeros(
envs.num_envs, 1, dtype=torch.uint8, device=self.device
)
episode_predictions = defaultdict(list)
# episode ID --> instruction ID for rxr predictions format
instruction_ids: Dict[str, int] = {}
# populate episode_predictions with the starting state
current_episodes = envs.current_episodes()
for i in range(envs.num_envs):
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}})
)
if config.INFERENCE.FORMAT == "rxr":
ep_id = current_episodes[i].episode_id
k = current_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
with tqdm.tqdm(
total=sum(envs.count_episodes()),
desc=f"[inference:{self.config.INFERENCE.SPLIT}]",
) as pbar:
while envs.num_envs > 0:
current_episodes = envs.current_episodes()
positions = []; headings = []
for ob_i in range(len(current_episodes)):
agent_state_i = envs.call_at(ob_i,
"get_info", {"observations": {}})
positions.append(agent_state_i['position'])
headings.append(agent_state_i['heading'])
with torch.no_grad():
if 'CMA' in self.config.MODEL.policy_name:
# instructions
instruction_embedding, all_lang_masks = self.policy.net(
mode = "language",
observations = batch,
)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, rnn_states = self.policy.net(
mode = 'navigation',
observations = batch,
instruction = instruction_embedding,
text_mask = all_lang_masks,
rnn_states = rnn_states,
headings = headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
elif 'VLNBERT' in self.config.MODEL.policy_name:
# instruction
lang_idx_tokens = batch['instruction']
padding_idx = 0
lang_masks = (lang_idx_tokens != padding_idx)
lang_token_type_ids = torch.zeros_like(lang_masks,
dtype=torch.long, device=self.device)
h_t_flag = h_t.sum(1)==0.0
h_t_init, language_features = self.policy.net(
mode='language',
lang_idx_tokens=lang_idx_tokens,
lang_masks=lang_masks)
h_t[h_t_flag] = h_t_init[h_t_flag]
language_features = torch.cat(
(h_t.unsqueeze(1), language_features[:,1:,:]), dim=1)
# candidate waypoints prediction
cand_rgb, cand_depth, \
cand_direction, cand_mask, candidate_lengths, \
batch_angles, batch_distances = self.policy.net(
mode = "waypoint",
waypoint_predictor = self.waypoint_predictor,
observations = batch,
in_train = False,
)
# navigation action logits
logits, h_t = self.policy.net(
mode = 'navigation',
observations=batch,
lang_masks=lang_masks,
lang_feats=language_features,
lang_token_type_ids=lang_token_type_ids,
headings=headings,
cand_rgb = cand_rgb,
cand_depth = cand_depth,
cand_direction = cand_direction,
cand_mask = cand_mask,
masks = not_done_masks,
)
logits = logits.masked_fill_(cand_mask, -float('inf'))
# high-to-low actions in environments
actions = logits.argmax(dim=-1, keepdim=True)
env_actions = []
for j in range(logits.size(0)):
if actions[j].item() == candidate_lengths[j]-1:
env_actions.append({'action':
{'action': 0, 'action_args':{}}})
else:
env_actions.append({'action':
{'action': 4, # HIGHTOLOW
'action_args':{
'angle': batch_angles[j][actions[j].item()],
'distance': batch_distances[j][actions[j].item()],
}}})
outputs = envs.step(env_actions)
observations, _, dones, infos = [list(x) for x in zip(*outputs)]
not_done_masks = torch.tensor(
[[0] if done else [1] for done in dones],
dtype=torch.uint8,
device=self.device,
)
# reset envs and observations if necessary
for i in range(envs.num_envs):
if 'infos' in observations[i].keys():
episode_predictions[current_episodes[i].episode_id] += observations[i].pop('infos')
else:
episode_predictions[current_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}}))
if not dones[i]:
continue
if 'CMA' in self.config.MODEL.policy_name:
rnn_states[i] *= 0.
elif 'VLNBERT' in self.config.MODEL.policy_name:
h_t[i] *= 0.
observations[i] = envs.reset_at(i)[0]
pbar.update()
observations = extract_instruction_tokens(
observations,
self.config.TASK_CONFIG.TASK.INSTRUCTION_SENSOR_UUID,
)
batch = batch_obs(observations, self.device)
batch = apply_obs_transforms_batch(batch, obs_transforms)
envs_to_pause = []
next_episodes = envs.current_episodes()
for i in range(envs.num_envs):
if not dones[i]:
continue
if next_episodes[i].episode_id in episode_predictions:
envs_to_pause.append(i)
else:
episode_predictions[next_episodes[i].episode_id].append(
envs.call_at(i, "get_info", {"observations": {}}))
if config.INFERENCE.FORMAT == "rxr":
ep_id = next_episodes[i].episode_id
k = next_episodes[i].instruction.instruction_id
instruction_ids[ep_id] = int(k)
if 'VLNBERT' in self.config.MODEL.policy_name:
rnn_states = h_t
headings = torch.tensor(headings)
(
envs,
rnn_states,
not_done_masks,
headings,
batch,
rgb_frames,
) = self._pause_envs(
envs_to_pause,
envs,
rnn_states,
not_done_masks,
headings,
batch,
)
headings = headings.tolist()
if 'VLNBERT' in self.config.MODEL.policy_name:
h_t = rnn_states
envs.close()
if config.INFERENCE.FORMAT == "r2r":
with open(config.INFERENCE.PREDICTIONS_FILE, "w") as f:
json.dump(episode_predictions, f, indent=2)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
else: # use 'rxr' format for rxr-habitat leaderboard
predictions_out = []
for k,v in episode_predictions.items():
# save only positions that changed
path = [v[0]["position"]]
for p in v[1:]:
if path[-1] != p["position"]:
path.append(p["position"])
predictions_out.append(
{
"instruction_id": instruction_ids[k],
"path": path,
}
)
predictions_out.sort(key=lambda x: x["instruction_id"])
with jsonlines.open(
config.INFERENCE.PREDICTIONS_FILE, mode="w"
) as writer:
writer.write_all(predictions_out)
logger.info(
f"Predictions saved to: {config.INFERENCE.PREDICTIONS_FILE}"
)
| 45,832 | 40.971612 | 112 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/Policy_ViewSelection_CMA.py | import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from habitat_baselines.utils.common import CustomFixedCategorical
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.resnet_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder
)
from vlnce_baselines.models.policy import ILPolicy
from waypoint_prediction.utils import nms
from vlnce_baselines.models.utils import (
length2mask, angle_feature, dir_angle_feature)
import math
@baseline_registry.register_policy
class PolicyViewSelectionCMA(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
CMANet(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_IDS[config.local_rank]
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class CMANet(Net):
r"""A cross-modal attention (CMA) network that contains:
Instruction encoder
Depth encoder
RGB encoder
CMA state encoder
"""
def __init__(
self, observation_space: Space, model_config: Config, num_actions
):
super().__init__()
self.model_config = model_config
model_config.defrost()
model_config.INSTRUCTION_ENCODER.final_state_only = False
model_config.freeze()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
# Init the instruction encoder
self.instruction_encoder = InstructionEncoder(
model_config.INSTRUCTION_ENCODER
)
# Init the depth encoder
assert model_config.DEPTH_ENCODER.cnn_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.cnn_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
# Init the RGB encoder
assert model_config.RGB_ENCODER.cnn_type in [
"TorchVisionResNet152", "TorchVisionResNet50"
], "RGB_ENCODER.cnn_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
hidden_size = model_config.STATE_ENCODER.hidden_size
self._hidden_size = hidden_size
# merging visual inputs
self.rgb_linear = nn.Sequential(
nn.Linear(
model_config.RGB_ENCODER.encode_size,
model_config.RGB_ENCODER.output_size,
),
nn.ReLU(True),
)
if self.depth_encoder.spatial_output:
None
else:
self.depth_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.encode_size,
model_config.DEPTH_ENCODER.output_size,
),
nn.ReLU(True),
)
self.vismerge_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden,
),
nn.ReLU(True),
)
self.enc_prev_act = nn.Sequential(
nn.Linear(model_config.VISUAL_DIM.directional, model_config.VISUAL_DIM.directional),
nn.Tanh(),
)
# Init the RNN state decoder
self.state_encoder = build_rnn_state_encoder(
input_size=model_config.VISUAL_DIM.vis_hidden + model_config.VISUAL_DIM.directional,
hidden_size=model_config.STATE_ENCODER.hidden_size,
rnn_type=model_config.STATE_ENCODER.rnn_type,
num_layers=1,
)
self.prev_state_vis_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.text_vis_attn = SoftDotAttention(
self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.VISUAL_DIM.vis_hidden,
output_tilde=False
)
self.state_text_attn = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size,
self.instruction_encoder.output_size,
self.instruction_encoder.output_size,
output_tilde=False
)
self.state_vis_logits = SoftDotAttention(
model_config.STATE_ENCODER.hidden_size+model_config.VISUAL_DIM.vis_hidden+self.instruction_encoder.output_size,
model_config.VISUAL_DIM.vis_hidden,
model_config.STATE_ENCODER.hidden_size,
output_tilde=False
)
self.register_buffer(
"_scale", torch.tensor(1.0 / ((hidden_size // 2) ** 0.5))
)
self.space_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(start_dim=2),
)
self.train()
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def num_recurrent_layers(self):
return self.state_encoder.num_recurrent_layers
def forward(self, mode=None,
waypoint_predictor=None,
observations=None,
instruction=None, text_mask=None,
rnn_states=None,
cand_rgb=None, cand_depth=None,
cand_direction=None, cand_mask=None,
headings=None, masks=None,
post_states=None, in_train=True):
r"""
instruction_embedding: [batch_size x INSTRUCTION_ENCODER.output_size]
depth_embedding: [batch_size x DEPTH_ENCODER.output_size]
rgb_embedding: [batch_size x RGB_ENCODER.output_size]
"""
if mode == 'language':
ctx, all_lang_masks = self.instruction_encoder(observations)
return ctx, all_lang_masks
elif mode == 'waypoint':
batch_size = observations['instruction'].size(0)
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
# single view images in clockwise agrees with the panoramic image
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k:
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count)%NUM_IMGS
depth_batch[ra_count+bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count+bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
obs_view12['depth'] = depth_batch
obs_view12['rgb'] = rgb_batch
depth_embedding = self.depth_encoder(obs_view12)
rgb_embedding = self.rgb_encoder(obs_view12)
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
if isinstance(candidate_lengths, int):
candidate_lengths = [candidate_lengths]
max_candidate = max(candidate_lengths) # including stop
cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Augment waypoint prediction
# parts of heatmap for sampling (fix offset first)
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-waypoint_predictor.HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-waypoint_predictor.HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
batch_way_log_prob = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
batch_way_log_prob.append(
probs_c.log_prob(way_heats_act))
cand_rgb = torch.zeros(
(batch_size, max_candidate, 2048, 7, 7),
dtype=torch.float32, device=self.device)
cand_depth = torch.zeros(
(batch_size, max_candidate, 128, 4, 4),
dtype=torch.float32, device=self.device)
batch_angles = []
batch_distances = []
batch_img_idxes = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# distance indexes for candidates
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# 2pi- becoz counter-clockwise is the positive direction
angle_rad = 2*math.pi-angle_idxes.float()/120*2*math.pi
batch_angles.append(angle_rad.tolist())
batch_distances.append(
((distance_idxes + 1)*0.25).tolist())
# counter-clockwise image indexes
img_idxes = 12 - ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
batch_img_idxes.append(img_idxes)
for k in range(len(img_idxes)):
cand_rgb[j][k] = rgb_feats[j][img_idxes[k]]
cand_depth[j][k] = depth_feats[j][img_idxes[k]]
cand_direction = dir_angle_feature(batch_angles).to(self.device)
if in_train:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob
else:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances
elif mode == 'navigation':
cand_rgb_feats_pool = self.space_pool(cand_rgb)
rgb_in = self.rgb_linear(cand_rgb_feats_pool)
cand_depth_feats_pool = self.space_pool(cand_depth)
depth_in = self.depth_linear(cand_depth_feats_pool)
vis_in = self.vismerge_linear(
torch.cat((rgb_in, depth_in, cand_direction), dim=2),)
''' aggregate visual features by agent's previous state -------------- '''
prev_state = rnn_states[:, 0:self.state_encoder.num_recurrent_layers].squeeze(1)
vis_prev_state, _ = self.prev_state_vis_attn(
prev_state, vis_in, cand_mask)
''' first state encoder for new visual features '''
prev_actions = angle_feature(headings, device=self.device)
prev_actions = self.enc_prev_act(prev_actions)
state_in = torch.cat([vis_prev_state, prev_actions], dim=1)
rnn_states_out = rnn_states.detach().clone()
(
state,
rnn_states_out[:, 0 : self.state_encoder.num_recurrent_layers],
) = self.state_encoder(
state_in,
rnn_states[:, 0 : self.state_encoder.num_recurrent_layers],
masks,
)
''' language attention using state '''
text_state, _ = self.state_text_attn(
state, instruction, text_mask)
''' visual attention using attended language '''
vis_text_feats, _ = self.text_vis_attn(
text_state, vis_in, cand_mask)
x = torch.cat((state, vis_text_feats, text_state), dim=1)
_, logits = self.state_vis_logits(
x, vis_in, cand_mask, output_prob=False)
return logits, rnn_states_out
class SoftDotAttention(nn.Module):
def __init__(self, q_dim, kv_dim, hidden_dim, output_tilde=False):
'''Initialize layer.'''
super(SoftDotAttention, self).__init__()
self.linear_q = nn.Linear(q_dim, hidden_dim, bias=True)
self.linear_kv = nn.Linear(kv_dim, hidden_dim, bias=True)
self.sm = nn.Softmax(dim=1)
self.output_tilde = output_tilde
if output_tilde:
self.linear_out = nn.Linear(q_dim + hidden_dim, hidden_dim, bias=False)
self.tanh = nn.Tanh()
def forward(self, q, kv, mask=None, output_prob=True):
'''Propagate h through the network.
q: (query) batch x dim
kv: (keys and values) batch x seq_len x dim
mask: batch x seq_len indices to be masked
'''
x_q = self.linear_q(q).unsqueeze(2)
x_kv = self.linear_kv(kv)
attn = torch.bmm(x_kv, x_q).squeeze(2)
logit = attn
if mask is not None:
attn.masked_fill_(mask, -float('inf'))
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1))
weighted_x_kv = torch.bmm(attn3, x_kv).squeeze(1)
if not output_prob:
attn = logit
if self.output_tilde:
h_tilde = torch.cat((weighted_x_kv, q), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
return h_tilde, attn
else:
return weighted_x_kv, attn
| 18,135 | 38.598253 | 142 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/Policy_ViewSelection_VLNBERT.py | import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from gym import Space
from habitat import Config
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
from habitat_baselines.rl.ppo.policy import Net
from vlnce_baselines.models.vlnbert.vlnbert_init import get_vlnbert_models
from vlnce_baselines.common.aux_losses import AuxLosses
from vlnce_baselines.models.encoders.instruction_encoder import (
InstructionEncoder,
)
from vlnce_baselines.models.encoders.resnet_encoders import (
TorchVisionResNet50,
VlnResnetDepthEncoder,
)
from vlnce_baselines.models.policy import ILPolicy
from waypoint_prediction.utils import nms
from vlnce_baselines.models.utils import (
angle_feature_with_ele, dir_angle_feature_with_ele, length2mask)
import math
@baseline_registry.register_policy
class PolicyViewSelectionVLNBERT(ILPolicy):
def __init__(
self,
observation_space: Space,
action_space: Space,
model_config: Config,
):
super().__init__(
VLNBERT(
observation_space=observation_space,
model_config=model_config,
num_actions=action_space.n,
),
action_space.n,
)
@classmethod
def from_config(
cls, config: Config, observation_space: Space, action_space: Space
):
config.defrost()
config.MODEL.TORCH_GPU_ID = config.TORCH_GPU_IDS[config.local_rank]
config.freeze()
return cls(
observation_space=observation_space,
action_space=action_space,
model_config=config.MODEL,
)
class VLNBERT(Net):
def __init__(
self, observation_space: Space, model_config: Config, num_actions,
):
super().__init__()
device = (
torch.device("cuda", model_config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
self.device = device
print('\nInitalizing the VLN-BERT model ...')
self.vln_bert = get_vlnbert_models(config=None)
self.vln_bert.config.directions = 1 # a trivial number, change during nav
layer_norm_eps = self.vln_bert.config.layer_norm_eps
# Init the depth encoder
assert model_config.DEPTH_ENCODER.cnn_type in [
"VlnResnetDepthEncoder"
], "DEPTH_ENCODER.cnn_type must be VlnResnetDepthEncoder"
self.depth_encoder = VlnResnetDepthEncoder(
observation_space,
output_size=model_config.DEPTH_ENCODER.output_size,
checkpoint=model_config.DEPTH_ENCODER.ddppo_checkpoint,
backbone=model_config.DEPTH_ENCODER.backbone,
spatial_output=model_config.spatial_output,
)
# Init the RGB encoder
assert model_config.RGB_ENCODER.cnn_type in [
"TorchVisionResNet152", "TorchVisionResNet50"
], "RGB_ENCODER.cnn_type must be TorchVisionResNet152 or TorchVisionResNet50"
if model_config.RGB_ENCODER.cnn_type == "TorchVisionResNet50":
self.rgb_encoder = TorchVisionResNet50(
observation_space,
model_config.RGB_ENCODER.output_size,
device,
spatial_output=model_config.spatial_output,
)
# merging visual inputs
self.space_pool = nn.Sequential(
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(start_dim=2),)
self.rgb_linear = nn.Sequential(
nn.Linear(
model_config.RGB_ENCODER.encode_size,
model_config.RGB_ENCODER.output_size,
),
nn.ReLU(True),
)
self.depth_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.encode_size,
model_config.DEPTH_ENCODER.output_size,
),
nn.ReLU(True),
)
self.vismerge_linear = nn.Sequential(
nn.Linear(
model_config.DEPTH_ENCODER.output_size + model_config.RGB_ENCODER.output_size + model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden,
),
nn.ReLU(True),
)
self.action_state_project = nn.Sequential(
nn.Linear(model_config.VISUAL_DIM.vis_hidden+model_config.VISUAL_DIM.directional,
model_config.VISUAL_DIM.vis_hidden),
nn.Tanh())
self.action_LayerNorm = BertLayerNorm(
model_config.VISUAL_DIM.vis_hidden, eps=layer_norm_eps)
self.drop_env = nn.Dropout(p=0.4)
self.train()
@property # trivial argument, just for init with habitat
def output_size(self):
return 1
@property
def is_blind(self):
return self.rgb_encoder.is_blind or self.depth_encoder.is_blind
@property
def num_recurrent_layers(self):
return 1
def forward(self, mode=None,
waypoint_predictor=None,
observations=None,
lang_idx_tokens=None, lang_masks=None,
lang_feats=None, lang_token_type_ids=None,
headings=None,
cand_rgb=None, cand_depth=None,
cand_direction=None, cand_mask=None,
masks=None,
post_states=None, in_train=True):
if mode == 'language':
h_t, language_features = self.vln_bert(
'language', lang_idx_tokens,
attention_mask=lang_masks, lang_mask=lang_masks,)
return h_t, language_features
elif mode == 'waypoint':
batch_size = observations['instruction'].size(0)
''' encoding rgb/depth at all directions ----------------------------- '''
NUM_ANGLES = 120 # 120 angles 3 degrees each
NUM_IMGS = 12
NUM_CLASSES = 12 # 12 distances at each sector
depth_batch = torch.zeros_like(observations['depth']).repeat(NUM_IMGS, 1, 1, 1)
rgb_batch = torch.zeros_like(observations['rgb']).repeat(NUM_IMGS, 1, 1, 1)
# reverse the order of input images to clockwise
# single view images in clockwise agrees with the panoramic image
a_count = 0
for i, (k, v) in enumerate(observations.items()):
if 'depth' in k:
for bi in range(v.size(0)):
ra_count = (NUM_IMGS - a_count)%NUM_IMGS
depth_batch[ra_count+bi*NUM_IMGS] = v[bi]
rgb_batch[ra_count+bi*NUM_IMGS] = observations[k.replace('depth','rgb')][bi]
a_count += 1
obs_view12 = {}
obs_view12['depth'] = depth_batch
obs_view12['rgb'] = rgb_batch
depth_embedding = self.depth_encoder(obs_view12)
rgb_embedding = self.rgb_encoder(obs_view12)
''' waypoint prediction ----------------------------- '''
waypoint_heatmap_logits = waypoint_predictor(
rgb_embedding, depth_embedding)
# reverse the order of images back to counter-clockwise
rgb_embed_reshape = rgb_embedding.reshape(
batch_size, NUM_IMGS, 2048, 7, 7)
depth_embed_reshape = depth_embedding.reshape(
batch_size, NUM_IMGS, 128, 4, 4)
rgb_feats = torch.cat((
rgb_embed_reshape[:,0:1,:],
torch.flip(rgb_embed_reshape[:,1:,:], [1]),
), dim=1)
depth_feats = torch.cat((
depth_embed_reshape[:,0:1,:],
torch.flip(depth_embed_reshape[:,1:,:], [1]),
), dim=1)
# from heatmap to points
batch_x_norm = torch.softmax(
waypoint_heatmap_logits.reshape(
batch_size, NUM_ANGLES*NUM_CLASSES,
), dim=1
)
batch_x_norm = batch_x_norm.reshape(
batch_size, NUM_ANGLES, NUM_CLASSES,
)
batch_x_norm_wrap = torch.cat((
batch_x_norm[:,-1:,:],
batch_x_norm,
batch_x_norm[:,:1,:]),
dim=1)
batch_output_map = nms(
batch_x_norm_wrap.unsqueeze(1),
max_predictions=5,
sigma=(7.0,5.0))
# predicted waypoints before sampling
batch_output_map = batch_output_map.squeeze(1)[:,1:-1,:]
candidate_lengths = ((batch_output_map!=0).sum(-1).sum(-1) + 1).tolist()
if isinstance(candidate_lengths, int):
candidate_lengths = [candidate_lengths]
max_candidate = max(candidate_lengths) # including stop
cand_mask = length2mask(candidate_lengths, device=self.device)
if in_train:
# Augment waypoint prediction
# parts of heatmap for sampling (fix offset first)
HEATMAP_OFFSET = 5
batch_way_heats_regional = torch.cat(
(waypoint_heatmap_logits[:,-HEATMAP_OFFSET:,:],
waypoint_heatmap_logits[:,:-HEATMAP_OFFSET,:],
), dim=1)
batch_way_heats_regional = batch_way_heats_regional.reshape(batch_size, 12, 10, 12)
batch_sample_angle_idxes = []
batch_sample_distance_idxes = []
for j in range(batch_size):
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# clockwise image indexes (same as batch_x_norm)
img_idxes = ((angle_idxes.cpu().numpy()+5) // 10)
img_idxes[img_idxes==12] = 0
# heatmap regions for sampling
way_heats_regional = batch_way_heats_regional[j][img_idxes].view(img_idxes.size, -1)
way_heats_probs = F.softmax(way_heats_regional, 1)
probs_c = torch.distributions.Categorical(way_heats_probs)
way_heats_act = probs_c.sample().detach()
sample_angle_idxes = []
sample_distance_idxes = []
for k, way_act in enumerate(way_heats_act):
if img_idxes[k] != 0:
angle_pointer = (img_idxes[k] - 1) * 10 + 5
else:
angle_pointer = 0
sample_angle_idxes.append(way_act//12+angle_pointer)
sample_distance_idxes.append(way_act%12)
batch_sample_angle_idxes.append(sample_angle_idxes)
batch_sample_distance_idxes.append(sample_distance_idxes)
cand_rgb = torch.zeros(
(batch_size, max_candidate, 2048, 7, 7),
dtype=torch.float32, device=self.device)
cand_depth = torch.zeros(
(batch_size, max_candidate, 128, 4, 4),
dtype=torch.float32, device=self.device)
batch_angles = []; batch_angles_c = []
batch_distances = []
batch_img_idxes = []
for j in range(batch_size):
if in_train:
angle_idxes = torch.tensor(batch_sample_angle_idxes[j])
distance_idxes = torch.tensor(batch_sample_distance_idxes[j])
else:
# angle indexes with candidates
angle_idxes = batch_output_map[j].nonzero()[:, 0]
# distance indexes for candidates
distance_idxes = batch_output_map[j].nonzero()[:, 1]
# 2pi- becoz counter-clockwise is the positive direction
angle_rad_cc = 2*math.pi-angle_idxes.float()/120*2*math.pi
batch_angles.append(angle_rad_cc.tolist())
angle_rad_c = angle_idxes.float()/120*2*math.pi
batch_angles_c.append(angle_rad_c.tolist())
batch_distances.append(
((distance_idxes + 1)*0.25).tolist())
# counter-clockwise image indexes
img_idxes = 12 - (angle_idxes.cpu().numpy()+5) // 10
img_idxes[img_idxes==12] = 0
batch_img_idxes.append(img_idxes)
for k in range(len(img_idxes)):
cand_rgb[j][k] = rgb_feats[j][img_idxes[k]]
cand_depth[j][k] = depth_feats[j][img_idxes[k]]
# use clockwise angles because of vlnbert pretraining
cand_direction = dir_angle_feature_with_ele(batch_angles_c).to(self.device)
if in_train:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances #, batch_way_log_prob
else:
return cand_rgb, cand_depth, cand_direction, cand_mask, candidate_lengths, batch_angles, batch_distances
elif mode == 'navigation':
# use clockwise angles because of vlnbert pretraining
headings = [2*np.pi - k for k in headings]
prev_actions = angle_feature_with_ele(headings, device=self.device)
cand_rgb_feats_pool = self.space_pool(cand_rgb)
cand_rgb_feats_pool = self.drop_env(cand_rgb_feats_pool)
cand_depth_feats_pool = self.space_pool(cand_depth)
rgb_in = self.rgb_linear(cand_rgb_feats_pool)
depth_in = self.depth_linear(cand_depth_feats_pool)
vis_in = self.vismerge_linear(
torch.cat((rgb_in, depth_in, cand_direction), dim=2),
)
''' vln-bert processing ------------------------------------- '''
state_action_embed = torch.cat(
(lang_feats[:,0,:], prev_actions), dim=1)
state_with_action = self.action_state_project(state_action_embed)
state_with_action = self.action_LayerNorm(state_with_action)
self.vln_bert.config.directions = cand_rgb.size(1)
state_feats = torch.cat((
state_with_action.unsqueeze(1), lang_feats[:,1:,:]), dim=1)
bert_candidate_mask = (cand_mask == 0)
attention_mask = torch.cat((
lang_masks, bert_candidate_mask), dim=-1)
h_t, logits = self.vln_bert('visual',
state_feats,
attention_mask=attention_mask,
lang_mask=lang_masks, vis_mask=bert_candidate_mask,
img_feats=vis_in)
return logits, h_t
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| 15,286 | 40.204852 | 142 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/utils.py | import math
import torch
def angle_feature(headings, device=None):
heading_enc = torch.zeros(len(headings), 64, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[math.sin(head), math.cos(head)] * (64 // 2))
return heading_enc.to(device)
def dir_angle_feature(angle_list, device=None):
feature_dim = 64
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[math.sin(angle_rad),
math.cos(angle_rad)] * (feature_dim // 2))
return heading_enc
def angle_feature_with_ele(headings, device=None):
heading_enc = torch.zeros(len(headings), 128, dtype=torch.float32)
for i, head in enumerate(headings):
heading_enc[i] = torch.tensor(
[
math.sin(head), math.cos(head),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc.to(device)
def dir_angle_feature_with_ele(angle_list, device=None):
feature_dim = 128
batch_size = len(angle_list)
max_leng = max([len(k) for k in angle_list]) + 1 # +1 for stop
heading_enc = torch.zeros(
batch_size, max_leng, feature_dim, dtype=torch.float32)
for i in range(batch_size):
for j, angle_rad in enumerate(angle_list[i]):
heading_enc[i][j] = torch.tensor(
[
math.sin(angle_rad), math.cos(angle_rad),
math.sin(0.0), math.cos(0.0), # elevation
] * (128 // 4))
return heading_enc
def length2mask(length, size=None, device=None):
batch_size = len(length)
size = int(max(length)) if size is None else size
mask = (torch.arange(size, dtype=torch.int64).unsqueeze(0).repeat(batch_size, 1)
> (torch.LongTensor(length) - 1).unsqueeze(1)).to(device)
return mask | 2,129 | 31.769231 | 84 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/policy.py | import abc
from typing import Any
from habitat_baselines.rl.ppo.policy import Policy
from habitat_baselines.utils.common import (
CategoricalNet,
CustomFixedCategorical,
)
from torch.distributions import Categorical
class ILPolicy(Policy, metaclass=abc.ABCMeta):
def __init__(self, net, dim_actions):
r"""Defines an imitation learning policy as having functions act() and
build_distribution().
"""
super(Policy, self).__init__()
self.net = net
self.dim_actions = dim_actions
# self.action_distribution = CategoricalNet(
# self.net.output_size, self.dim_actions
# )
def forward(self, *x):
raise NotImplementedError
def act(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution = self.action_distribution(features)
# if distribution.logit
if deterministic:
action = distribution.mode()
else:
action = distribution.sample()
return action, rnn_hidden_states
def get_value(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def evaluate_actions(self, *args: Any, **kwargs: Any):
raise NotImplementedError
def build_distribution(
self, observations, rnn_hidden_states, prev_actions, masks
) -> CustomFixedCategorical:
features, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
return self.action_distribution(features)
def act2(
self,
observations,
rnn_hidden_states,
prev_actions,
masks,
deterministic=False,
):
print('need to revise for CMA and VLNBERT')
import pdb; pdb.set_trace()
feature_rgb, feature_depth, rnn_hidden_states = self.net(
observations, rnn_hidden_states, prev_actions, masks
)
distribution_rgb = self.action_distribution(feature_rgb)
distribution_depth = self.action_distribution(feature_depth)
probs = (distribution_rgb.probs + distribution_depth.probs)/2
# if distribution.logit
if deterministic:
action = probs.argmax(dim=-1, keepdim=True)
else:
action = Categorical(probs).sample().unsqueeze(-1)
return action, rnn_hidden_states
| 2,642 | 27.419355 | 78 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/__init__.py | 0 | 0 | 0 | py |
|
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/encoders/resnet_encoders.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from gym import spaces
from habitat import logger
from habitat_baselines.rl.ddppo.policy import resnet
from habitat_baselines.rl.ddppo.policy.resnet_policy import ResNetEncoder
import torchvision
class VlnResnetDepthEncoder(nn.Module):
def __init__(
self,
observation_space,
output_size=128,
checkpoint="NONE",
backbone="resnet50",
resnet_baseplanes=32,
normalize_visual_inputs=False,
trainable=False,
spatial_output: bool = False,
):
super().__init__()
self.visual_encoder = ResNetEncoder(
spaces.Dict({"depth": observation_space.spaces["depth"]}),
baseplanes=resnet_baseplanes,
ngroups=resnet_baseplanes // 2,
make_backbone=getattr(resnet, backbone),
normalize_visual_inputs=normalize_visual_inputs,
)
for param in self.visual_encoder.parameters():
param.requires_grad_(trainable)
if checkpoint != "NONE":
ddppo_weights = torch.load(checkpoint)
weights_dict = {}
for k, v in ddppo_weights["state_dict"].items():
split_layer_name = k.split(".")[2:]
if split_layer_name[0] != "visual_encoder":
continue
layer_name = ".".join(split_layer_name[1:])
weights_dict[layer_name] = v
del ddppo_weights
self.visual_encoder.load_state_dict(weights_dict, strict=True)
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.visual_fc = nn.Sequential(
# nn.Flatten(),
# nn.Linear(
# np.prod(self.visual_encoder.output_shape), output_size
# ),
# nn.ReLU(True),
# )
None
else:
self.spatial_embeddings = nn.Embedding(
self.visual_encoder.output_shape[1]
* self.visual_encoder.output_shape[2],
64,
)
self.output_shape = list(self.visual_encoder.output_shape)
self.output_shape[0] += self.spatial_embeddings.embedding_dim
self.output_shape = tuple(self.output_shape)
def forward(self, observations):
"""
Args:
observations: [BATCH, HEIGHT, WIDTH, CHANNEL]
Returns:
[BATCH, OUTPUT_SIZE]
"""
if "depth_features" in observations:
x = observations["depth_features"]
else:
x = self.visual_encoder(observations)
if self.spatial_output:
b, c, h, w = x.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=x.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([x, spatial_features], dim=1)
else:
# return self.visual_fc(x)
return x
class TorchVisionResNet50(nn.Module):
r"""
Takes in observations and produces an embedding of the rgb component.
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
device: torch.device
"""
def __init__(
self,
observation_space,
output_size,
device,
spatial_output: bool = False,
):
super().__init__()
self.device = device
self.resnet_layer_size = 2048
linear_layer_input_size = 0
if "rgb" in observation_space.spaces:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
obs_size_0 = observation_space.spaces["rgb"].shape[0]
obs_size_1 = observation_space.spaces["rgb"].shape[1]
if obs_size_0 != 224 or obs_size_1 != 224:
logger.warn(
"TorchVisionResNet50: observation size is not conformant to expected ResNet input size [3x224x224]"
)
linear_layer_input_size += self.resnet_layer_size
else:
self._n_input_rgb = 0
if self.is_blind:
self.cnn = nn.Sequential()
return
rgb_resnet = models.resnet50(pretrained=True)
rgb_modules = list(rgb_resnet.children())[:-2]
self.cnn = torch.nn.Sequential(*rgb_modules)
# disable gradients for resnet, params frozen
for param in self.cnn.parameters():
param.requires_grad = False
self.cnn.eval()
self.spatial_output = spatial_output
if not self.spatial_output:
self.output_shape = (output_size,)
# self.fc = nn.Linear(linear_layer_input_size, output_size)
# self.activation = nn.ReLU()
None
else:
class SpatialAvgPool(nn.Module):
def forward(self, x):
x = F.adaptive_avg_pool2d(x, (4, 4))
return x
self.cnn.avgpool = SpatialAvgPool()
self.cnn.fc = nn.Sequential()
self.spatial_embeddings = nn.Embedding(4 * 4, 64)
self.output_shape = (
self.resnet_layer_size + self.spatial_embeddings.embedding_dim,
4,
4,
)
# self.layer_extract = self.cnn._modules.get("avgpool")
from torchvision import transforms
self.rgb_transform = torch.nn.Sequential(
transforms.ConvertImageDtype(torch.float),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
)
@property
def is_blind(self):
return self._n_input_rgb == 0
def forward(self, observations):
r"""Sends RGB observation through the TorchVision ResNet50 pre-trained
on ImageNet. Sends through fully connected layer, activates, and
returns final embedding.
"""
def resnet_forward(observation):
# resnet_output = torch.zeros(
# 1, dtype=torch.float32, device=observation.device
# )
# def hook(m, i, o):
# resnet_output.set_(o)
# output: [BATCH x RESNET_DIM]
# h = self.layer_extract.register_forward_hook(hook)
resnet_output = self.cnn(observation)
# h.remove()
return resnet_output
if "rgb_features" in observations:
resnet_output = observations["rgb_features"]
else:
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT x WIDTH]
rgb_observations = observations["rgb"].permute(0, 3, 1, 2)
rgb_observations = self.rgb_transform(rgb_observations)
# rgb_observations = rgb_observations / 255.0 # normalize RGB
resnet_output = resnet_forward(rgb_observations.contiguous())
if self.spatial_output:
b, c, h, w = resnet_output.size()
spatial_features = (
self.spatial_embeddings(
torch.arange(
0,
self.spatial_embeddings.num_embeddings,
device=resnet_output.device,
dtype=torch.long,
)
)
.view(1, -1, h, w)
.expand(b, self.spatial_embeddings.embedding_dim, h, w)
)
return torch.cat([resnet_output, spatial_features], dim=1)#.to(self.device)
else:
# return self.activation(
# self.fc(torch.flatten(resnet_output, 1))
# ) # [BATCH x OUTPUT_DIM]
return resnet_output
| 8,103 | 32.626556 | 119 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/encoders/instruction_encoder.py | import gzip
import json
import torch
import torch.nn as nn
from habitat import Config
class InstructionEncoder(nn.Module):
def __init__(self, config: Config):
r"""An encoder that uses RNN to encode an instruction. Returns
the final hidden state after processing the instruction sequence.
Args:
config: must have
embedding_size: The dimension of each embedding vector
hidden_size: The hidden (output) size
rnn_type: The RNN cell type. Must be GRU or LSTM
final_state_only: Whether or not to return just the final state
"""
super().__init__()
self.config = config
# lang_drop_ratio = 0.50
# self.drop = nn.Dropout(p=lang_drop_ratio)
rnn = nn.GRU if self.config.rnn_type == "GRU" else nn.LSTM
self.encoder_rnn = rnn(
input_size=config.embedding_size,
hidden_size=config.hidden_size,
bidirectional=config.bidirectional,
)
if config.sensor_uuid == "instruction":
if self.config.use_pretrained_embeddings:
self.embedding_layer = nn.Embedding.from_pretrained(
embeddings=self._load_embeddings(),
freeze=not self.config.fine_tune_embeddings,
)
else: # each embedding initialized to sampled Gaussian
self.embedding_layer = nn.Embedding(
num_embeddings=config.vocab_size,
embedding_dim=config.embedding_size,
padding_idx=0,
)
@property
def output_size(self):
return self.config.hidden_size * (1 + int(self.config.bidirectional))
def _load_embeddings(self):
"""Loads word embeddings from a pretrained embeddings file.
PAD: index 0. [0.0, ... 0.0]
UNK: index 1. mean of all R2R word embeddings: [mean_0, ..., mean_n]
why UNK is averaged: https://bit.ly/3u3hkYg
Returns:
embeddings tensor of size [num_words x embedding_dim]
"""
with gzip.open(self.config.embedding_file, "rt") as f:
embeddings = torch.tensor(json.load(f))
return embeddings
def forward(self, observations):
"""
Tensor sizes after computation:
instruction: [batch_size x seq_length]
lengths: [batch_size]
hidden_state: [batch_size x hidden_size]
"""
if self.config.sensor_uuid == "instruction":
instruction = observations["instruction"].long()
lengths = (instruction != 0.0).long().sum(dim=1)
instruction = self.embedding_layer(instruction)
# instruction = self.drop(instruction)
else:
instruction = observations["rxr_instruction"]
lengths = (instruction != 0.0).long().sum(dim=2)
lengths = (lengths != 0.0).long().sum(dim=1)
packed_seq = nn.utils.rnn.pack_padded_sequence(
instruction, lengths.cpu(), batch_first=True, enforce_sorted=False
)
output, final_state = self.encoder_rnn(packed_seq)
if self.config.rnn_type == "LSTM":
final_state = final_state[0]
if self.config.final_state_only: # default False
return final_state.squeeze(0)
else:
ctx = nn.utils.rnn.pad_packed_sequence(output,
batch_first=True)[0].permute(0, 2, 1)
all_lang_masks = (ctx == 0.0).all(dim=1)
ctx = ctx.permute(0, 2, 1)
# ctx = self.drop(ctx)
return ctx, all_lang_masks
| 3,647 | 34.764706 | 79 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/vlnbert/vlnbert_PREVALENT.py | # PREVALENT, 2020, [email protected]
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import BertPreTrainedModel, BertConfig
import pdb
logger = logging.getLogger(__name__)
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except (ImportError, AttributeError) as e:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
BertLayerNorm = torch.nn.LayerNorm
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = True
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertXAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
self.att = BertOutAttention(config, ctx_dim=ctx_dim)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, ctx_tensor, ctx_att_mask=None):
output, attention_scores = self.att(input_tensor, ctx_tensor, ctx_att_mask)
attention_output = self.output(output, input_tensor)
return attention_output, attention_scores
class BertOutAttention(nn.Module):
def __init__(self, config, ctx_dim=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
# visual_dim = 2048
if ctx_dim is None:
ctx_dim =config.hidden_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(ctx_dim, self.all_head_size)
self.value = nn.Linear(ctx_dim, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, context, attention_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(context)
mixed_value_layer = self.value(context)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_scores
class LXRTXLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# Lang self-att and FFN layer
self.lang_self_att = BertAttention(config)
self.lang_inter = BertIntermediate(config)
self.lang_output = BertOutput(config)
# Visn self-att and FFN layer
self.visn_self_att = BertAttention(config)
self.visn_inter = BertIntermediate(config)
self.visn_output = BertOutput(config)
# The cross attention layer
self.visual_attention = BertXAttention(config)
def cross_att(self, lang_input, lang_attention_mask, visn_input, visn_attention_mask):
''' Cross Attention -- cross for vision not for language '''
visn_att_output, attention_scores = self.visual_attention(visn_input, lang_input, ctx_att_mask=lang_attention_mask)
return visn_att_output, attention_scores
def self_att(self, visn_input, visn_attention_mask):
''' Self Attention -- on visual features with language clues '''
visn_att_output = self.visn_self_att(visn_input, visn_attention_mask)
return visn_att_output
def output_fc(self, visn_input):
''' Feed forward '''
visn_inter_output = self.visn_inter(visn_input)
visn_output = self.visn_output(visn_inter_output, visn_input)
return visn_output
def forward(self, lang_feats, lang_attention_mask,
visn_feats, visn_attention_mask, tdx):
''' visual self-attention with state '''
visn_att_output = torch.cat((lang_feats[:, 0:1, :], visn_feats), dim=1)
state_vis_mask = torch.cat((lang_attention_mask[:,:,:,0:1], visn_attention_mask), dim=-1)
''' state and vision attend to language '''
visn_att_output, cross_attention_scores = self.cross_att(lang_feats[:, 1:, :], lang_attention_mask[:, :, :, 1:], visn_att_output, state_vis_mask)
language_attention_scores = cross_attention_scores[:, :, 0, :]
state_visn_att_output = self.self_att(visn_att_output, state_vis_mask)
state_visn_output = self.output_fc(state_visn_att_output[0])
visn_att_output = state_visn_output[:, 1:, :]
lang_att_output = torch.cat((state_visn_output[:, 0:1, :], lang_feats[:,1:,:]), dim=1)
visual_attention_scores = state_visn_att_output[1][:, :, 0, 1:]
return lang_att_output, visn_att_output, language_attention_scores, visual_attention_scores
class VisionEncoder(nn.Module):
def __init__(self, vision_size, config):
super().__init__()
feat_dim = vision_size
# Object feature encoding
self.visn_fc = nn.Linear(feat_dim, config.hidden_size)
self.visn_layer_norm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, visn_input):
feats = visn_input
x = self.visn_fc(feats)
x = self.visn_layer_norm(x)
output = self.dropout(x)
return output
class VLNBert(BertPreTrainedModel):
def __init__(self, config):
super(VLNBert, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.pooler = BertPooler(config)
self.img_dim = config.img_feature_dim # 2176
logger.info('VLNBert Image Dimension: {}'.format(self.img_dim))
self.img_feature_type = config.img_feature_type # ''
self.vl_layers = config.vl_layers # 4
self.la_layers = config.la_layers # 9
self.lalayer = nn.ModuleList(
[BertLayer(config) for _ in range(self.la_layers)])
self.addlayer = nn.ModuleList(
[LXRTXLayer(config) for _ in range(self.vl_layers)])
# self.vision_encoder = VisionEncoder(self.config.img_feature_dim, self.config)
# self.apply(self.init_weights)
self.init_weights()
# del self.img_dim
# del self.vision_encoder
# del self.embeddings
def forward(self, mode, input_ids, token_type_ids=None,
attention_mask=None, lang_mask=None, vis_mask=None,
position_ids=None, head_mask=None, img_feats=None):
attention_mask = lang_mask
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
if mode == 'language':
''' LXMERT language branch (in VLN only perform this at initialization) '''
embedding_output = self.embeddings(input_ids,
position_ids=position_ids, token_type_ids=token_type_ids)
text_embeds = embedding_output
for layer_module in self.lalayer:
temp_output = layer_module(text_embeds, extended_attention_mask)
text_embeds = temp_output[0]
sequence_output = text_embeds
pooled_output = self.pooler(sequence_output)
return pooled_output, sequence_output
elif mode == 'visual':
''' LXMERT visual branch (no language processing during navigation) '''
text_embeds = input_ids
text_mask = extended_attention_mask
# img_embedding_output = self.vision_encoder(img_feats)
img_embedding_output = img_feats
img_seq_len = img_feats.shape[1]
batch_size = text_embeds.size(0)
img_seq_mask = vis_mask
extended_img_mask = img_seq_mask.unsqueeze(1).unsqueeze(2)
extended_img_mask = extended_img_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_img_mask = (1.0 - extended_img_mask) * -10000.0
img_mask = extended_img_mask
lang_output = text_embeds
visn_output = img_embedding_output
for tdx, layer_module in enumerate(self.addlayer):
lang_output, visn_output, language_attention_scores, visual_attention_scores = layer_module(lang_output, text_mask, visn_output, img_mask, tdx)
sequence_output = lang_output
pooled_output = self.pooler(sequence_output)
visual_action_scores = visual_attention_scores.mean(dim=1)
return pooled_output, visual_action_scores
| 19,050 | 41.811236 | 159 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/vlnce_baselines/models/vlnbert/vlnbert_init.py | # Recurrent VLN-BERT, 2020, by [email protected]
from transformers import (BertConfig, BertTokenizer)
def get_vlnbert_models(config=None):
config_class = BertConfig
from vlnce_baselines.models.vlnbert.vlnbert_PREVALENT import VLNBert
model_class = VLNBert
model_name_or_path = 'data/pretrained_models/rec_vln_bert-models/vlnbert_prevalent_model.bin'
vis_config = config_class.from_pretrained('bert-base-uncased')
vis_config.img_feature_dim = 2176
vis_config.img_feature_type = ""
vis_config.vl_layers = 4
vis_config.la_layers = 9
visual_model = model_class.from_pretrained(model_name_or_path, config=vis_config)
return visual_model
| 685 | 35.105263 | 97 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/shortest_path_follower.py | # Copied from https://github.com/facebookresearch/habitat-lab/blob/v0.1.4/habitat/tasks/nav/shortest_path_follower.py
# Use the Habitat v0.1.4 ShortestPathFollower for compatibility with
# the dataset generation oracle.
from typing import Optional, Union
import habitat_sim
import numpy as np
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.utils.geometry_utils import (
angle_between_quaternions,
quaternion_from_two_vectors,
)
EPSILON = 1e-6
def action_to_one_hot(action: int) -> np.array:
one_hot = np.zeros(len(HabitatSimActions), dtype=np.float32)
one_hot[action] = 1
return one_hot
class ShortestPathFollowerCompat:
r"""Utility class for extracting the action on the shortest path to the
goal.
Args:
sim: HabitatSim instance.
goal_radius: Distance between the agent and the goal for it to be
considered successful.
return_one_hot: If true, returns a one-hot encoding of the action
(useful for training ML agents). If false, returns the
SimulatorAction.
"""
def __init__(
self, sim: HabitatSim, goal_radius: float, return_one_hot: bool = True
):
assert (
getattr(sim, "geodesic_distance", None) is not None
), "{} must have a method called geodesic_distance".format(
type(sim).__name__
)
self._sim = sim
self._max_delta = sim.habitat_config.FORWARD_STEP_SIZE - EPSILON
self._goal_radius = goal_radius
self._step_size = sim.habitat_config.FORWARD_STEP_SIZE
self._mode = (
"geodesic_path"
if getattr(sim, "get_straight_shortest_path_points", None)
is not None
else "greedy"
)
self._return_one_hot = return_one_hot
def _get_return_value(self, action) -> Union[int, np.array]:
if self._return_one_hot:
return action_to_one_hot(action)
else:
return action
def get_next_action(
self, goal_pos: np.array
) -> Optional[Union[int, np.array]]:
"""Returns the next action along the shortest path."""
if (
self._sim.geodesic_distance(
self._sim.get_agent_state().position, goal_pos
)
<= self._goal_radius
):
return None
max_grad_dir = self._est_max_grad_dir(goal_pos)
if max_grad_dir is None:
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
return self._step_along_grad(max_grad_dir)
def _step_along_grad(
self, grad_dir: np.quaternion
) -> Union[int, np.array]:
current_state = self._sim.get_agent_state()
alpha = angle_between_quaternions(grad_dir, current_state.rotation)
if alpha <= np.deg2rad(self._sim.habitat_config.TURN_ANGLE) + EPSILON:
return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
else:
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
best_turn = (
HabitatSimActions.TURN_LEFT
if (
angle_between_quaternions(
grad_dir, self._sim.get_agent_state().rotation
)
< alpha
)
else HabitatSimActions.TURN_RIGHT
)
self._reset_agent_state(current_state)
return self._get_return_value(best_turn)
def _reset_agent_state(self, state: habitat_sim.AgentState) -> None:
self._sim.set_agent_state(
state.position, state.rotation, reset_sensors=False
)
def _geo_dist(self, goal_pos: np.array) -> float:
return self._sim.geodesic_distance(
self._sim.get_agent_state().position, goal_pos
)
def _est_max_grad_dir(self, goal_pos: np.array) -> np.array:
current_state = self._sim.get_agent_state()
current_pos = current_state.position
if self.mode == "geodesic_path":
points = self._sim.get_straight_shortest_path_points(
self._sim.get_agent_state().position, goal_pos
)
# Add a little offset as things get weird if
# points[1] - points[0] is anti-parallel with forward
if len(points) < 2:
return None
max_grad_dir = quaternion_from_two_vectors(
self._sim.forward_vector,
points[1]
- points[0]
+ EPSILON
* np.cross(self._sim.up_vector, self._sim.forward_vector),
)
max_grad_dir.x = 0
max_grad_dir = np.normalized(max_grad_dir)
else:
current_rotation = self._sim.get_agent_state().rotation
current_dist = self._geo_dist(goal_pos)
best_geodesic_delta = -2 * self._max_delta
best_rotation = current_rotation
for _ in range(0, 360, self._sim.habitat_config.TURN_ANGLE):
sim_action = HabitatSimActions.MOVE_FORWARD
self._sim.step(sim_action)
new_delta = current_dist - self._geo_dist(goal_pos)
if new_delta > best_geodesic_delta:
best_rotation = self._sim.get_agent_state().rotation
best_geodesic_delta = new_delta
# If the best delta is within (1 - cos(TURN_ANGLE))% of the
# best delta (the step size), then we almost certainly have
# found the max grad dir and should just exit
if np.isclose(
best_geodesic_delta,
self._max_delta,
rtol=1
- np.cos(np.deg2rad(self._sim.habitat_config.TURN_ANGLE)),
):
break
self._sim.set_agent_state(
current_pos,
self._sim.get_agent_state().rotation,
reset_sensors=False,
)
sim_action = HabitatSimActions.TURN_LEFT
self._sim.step(sim_action)
self._reset_agent_state(current_state)
max_grad_dir = best_rotation
return max_grad_dir
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, new_mode: str):
r"""Sets the mode for how the greedy follower determines the best next
step.
Args:
new_mode: geodesic_path indicates using the simulator's shortest
path algorithm to find points on the map to navigate between.
greedy indicates trying to move forward at all possible
orientations and selecting the one which reduces the geodesic
distance the most.
"""
assert new_mode in {"geodesic_path", "greedy"}
if new_mode == "geodesic_path":
assert (
getattr(self._sim, "get_straight_shortest_path_points", None)
is not None
)
self._mode = new_mode
| 7,219 | 35.1 | 117 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/task.py | import gzip
import json
import os
from typing import Dict, List, Optional, Union
import attr
from habitat.config import Config
from habitat.core.dataset import Dataset
from habitat.core.registry import registry
from habitat.core.utils import not_none_validator
from habitat.datasets.pointnav.pointnav_dataset import ALL_SCENES_MASK
from habitat.datasets.utils import VocabDict
from habitat.tasks.nav.nav import NavigationGoal
from habitat.tasks.vln.vln import InstructionData, VLNEpisode
import random
random.seed(0)
DEFAULT_SCENE_PATH_PREFIX = "data/scene_datasets/"
ALL_LANGUAGES_MASK = "*"
ALL_ROLES_MASK = "*"
@attr.s(auto_attribs=True)
class ExtendedInstructionData:
instruction_text: str = attr.ib(default=None, validator=not_none_validator)
instruction_id: Optional[str] = attr.ib(default=None)
language: Optional[str] = attr.ib(default=None)
annotator_id: Optional[str] = attr.ib(default=None)
edit_distance: Optional[float] = attr.ib(default=None)
timed_instruction: Optional[List[Dict[str, Union[float, str]]]] = attr.ib(
default=None
)
instruction_tokens: Optional[List[str]] = attr.ib(default=None)
split: Optional[str] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class VLNExtendedEpisode(VLNEpisode):
goals: Optional[List[NavigationGoal]] = attr.ib(default=None)
reference_path: Optional[List[List[float]]] = attr.ib(default=None)
instruction: ExtendedInstructionData = attr.ib(
default=None, validator=not_none_validator
)
trajectory_id: Optional[Union[int, str]] = attr.ib(default=None)
@registry.register_dataset(name="VLN-CE-v1")
class VLNCEDatasetV1(Dataset):
r"""Class inherited from Dataset that loads a Vision and Language
Navigation dataset.
"""
episodes: List[VLNEpisode]
instruction_vocab: VocabDict
@staticmethod
def check_config_paths_exist(config: Config) -> bool:
return os.path.exists(
config.DATA_PATH.format(split=config.SPLIT)
) and os.path.exists(config.SCENES_DIR)
@staticmethod
def _scene_from_episode(episode: VLNEpisode) -> str:
r"""Helper method to get the scene name from an episode. Assumes
the scene_id is formated /path/to/<scene_name>.<ext>
"""
return os.path.splitext(os.path.basename(episode.scene_id))[0]
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Return a sorted list of scenes"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return sorted(
{cls._scene_from_episode(episode) for episode in dataset.episodes}
)
def __init__(self, config: Optional[Config] = None) -> None:
self.episodes = []
if config is None:
return
dataset_filename = config.DATA_PATH.format(split=config.SPLIT)
with gzip.open(dataset_filename, "rt") as f:
self.from_json(f.read(), scenes_dir=config.SCENES_DIR)
if ALL_SCENES_MASK not in config.CONTENT_SCENES:
scenes_to_load = set(config.CONTENT_SCENES)
self.episodes = [
episode
for episode in self.episodes
if self._scene_from_episode(episode) in scenes_to_load
]
if config.EPISODES_ALLOWED is not None:
ep_ids_before = {ep.episode_id for ep in self.episodes}
ep_ids_to_purge = ep_ids_before - set([ int(id) for id in config.EPISODES_ALLOWED])
self.episodes = [
episode
for episode in self.episodes
if episode.episode_id not in ep_ids_to_purge
]
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
deserialized = json.loads(json_str)
self.instruction_vocab = VocabDict(
word_list=deserialized["instruction_vocab"]["word_list"]
)
for episode in deserialized["episodes"]:
episode = VLNExtendedEpisode(**episode)
if scenes_dir is not None:
if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):
episode.scene_id = episode.scene_id[
len(DEFAULT_SCENE_PATH_PREFIX) :
]
episode.scene_id = os.path.join(scenes_dir, episode.scene_id)
episode.instruction = InstructionData(**episode.instruction)
if episode.goals is not None:
for g_index, goal in enumerate(episode.goals):
episode.goals[g_index] = NavigationGoal(**goal)
self.episodes.append(episode)
random.shuffle(self.episodes)
@registry.register_dataset(name="RxR-VLN-CE-v1")
class RxRVLNCEDatasetV1(Dataset):
r"""Loads the RxR VLN-CE Dataset."""
episodes: List[VLNEpisode]
instruction_vocab: VocabDict
annotation_roles: List[str] = ["guide", "follower"]
languages: List[str] = ["en-US", "en-IN", "hi-IN", "te-IN"]
@staticmethod
def _scene_from_episode(episode: VLNEpisode) -> str:
r"""Helper method to get the scene name from an episode. Assumes
the scene_id is formated /path/to/<scene_name>.<ext>
"""
return os.path.splitext(os.path.basename(episode.scene_id))[0]
@staticmethod
def _language_from_episode(episode: VLNExtendedEpisode) -> str:
return episode.instruction.language
@classmethod
def get_scenes_to_load(cls, config: Config) -> List[str]:
r"""Return a sorted list of scenes"""
assert cls.check_config_paths_exist(config)
dataset = cls(config)
return sorted(
{cls._scene_from_episode(episode) for episode in dataset.episodes}
)
@classmethod
def extract_roles_from_config(cls, config: Config) -> List[str]:
if ALL_ROLES_MASK in config.ROLES:
return cls.annotation_roles
assert set(config.ROLES).issubset(set(cls.annotation_roles))
return config.ROLES
@classmethod
def check_config_paths_exist(cls, config: Config) -> bool:
return all(
os.path.exists(
config.DATA_PATH.format(split=config.SPLIT, role=role)
)
for role in cls.extract_roles_from_config(config)
) and os.path.exists(config.SCENES_DIR)
def __init__(self, config: Optional[Config] = None) -> None:
self.episodes = []
self.config = config
if config is None:
return
for role in self.extract_roles_from_config(config):
with gzip.open(
config.DATA_PATH.format(split=config.SPLIT, role=role), "rt"
) as f:
self.from_json(f.read(), scenes_dir=config.SCENES_DIR)
if ALL_SCENES_MASK not in config.CONTENT_SCENES:
scenes_to_load = set(config.CONTENT_SCENES)
self.episodes = [
episode
for episode in self.episodes
if self._scene_from_episode(episode) in scenes_to_load
]
if ALL_LANGUAGES_MASK not in config.LANGUAGES:
languages_to_load = set(config.LANGUAGES)
self.episodes = [
episode
for episode in self.episodes
if self._language_from_episode(episode) in languages_to_load
]
if config.EPISODES_ALLOWED is not None:
ep_ids_before = {ep.episode_id for ep in self.episodes}
ep_ids_to_purge = ep_ids_before - set(config.EPISODES_ALLOWED)
self.episodes = [
episode
for episode in self.episodes
if episode.episode_id not in ep_ids_to_purge
]
def from_json(
self, json_str: str, scenes_dir: Optional[str] = None
) -> None:
deserialized = json.loads(json_str)
for episode in deserialized["episodes"]:
episode = VLNExtendedEpisode(**episode)
if scenes_dir is not None:
if episode.scene_id.startswith(DEFAULT_SCENE_PATH_PREFIX):
episode.scene_id = episode.scene_id[
len(DEFAULT_SCENE_PATH_PREFIX) :
]
episode.scene_id = os.path.join(scenes_dir, episode.scene_id)
episode.instruction = ExtendedInstructionData(
**episode.instruction
)
episode.instruction.split = self.config.SPLIT
if episode.goals is not None:
for g_index, goal in enumerate(episode.goals):
episode.goals[g_index] = NavigationGoal(**goal)
self.episodes.append(episode)
| 8,711 | 34.851852 | 95 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/nav.py | from typing import Any, List, Optional, Tuple
import math
import numpy as np
from habitat.core.embodied_task import (
SimulatorTaskAction,
)
from habitat.core.registry import registry
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.tasks.utils import cartesian_to_polar
# @registry.register_task_action
# class MoveForwardByDistanceAction(SimulatorTaskAction):
# def step(self, *args: Any, distance: float,**kwargs: Any):
# r"""Update ``_metric``, this method is called from ``Env`` on each
# ``step``.
# """
# original_amount = self._sim.get_agent(0).agent_config.action_space[1].actuation.amount
# self._sim.get_agent(0).agent_config.action_space[1].actuation.amount = distance
# output = self._sim.step(HabitatSimActions.MOVE_FORWARD)
# self._sim.get_agent(0).agent_config.action_space[1].actuation.amount = original_amount
# return output
@registry.register_task_action
class MoveHighToLowAction(SimulatorTaskAction):
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
# left_action = HabitatSimActions.TURN_LEFT
forward_action = HabitatSimActions.MOVE_FORWARD
# init_left = self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = angle * 180 / math.pi
# output = self._sim.step(left_action)
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
self._sim.set_agent_state(init_state.position, rotation)
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = distance
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
else:
self._sim.step_without_obs(forward_action)
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = init_left
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = init_forward
return output
@registry.register_task_action
class MoveHighToLowActionEval(SimulatorTaskAction):
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
positions = []
collisions = []
# left_action = HabitatSimActions.TURN_LEFT
forward_action = HabitatSimActions.MOVE_FORWARD
# init_left = self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = angle * 180 / math.pi
# output = self._sim.step(left_action)
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
self._sim.set_agent_state(init_state.position, rotation)
# positions.append(init_state.position)
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = distance
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
else:
self._sim.step_without_obs(forward_action)
positions.append(self._sim.get_agent_state().position)
collisions.append(self._sim.previous_step_collided)
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = init_left
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = init_forward
output['positions'] = positions
output['collisions'] = collisions
return output
@registry.register_task_action
class MoveHighToLowActionInfer(SimulatorTaskAction):
def step(self, *args: Any,
angle: float, distance: float,
**kwargs: Any):
r"""This control method is called from ``Env`` on each ``step``.
"""
init_state = self._sim.get_agent_state()
def get_info(sim):
agent_state = sim.get_agent_state()
heading_vector = quaternion_rotate_vector(
agent_state.rotation.inverse(), np.array([0, 0, -1])
)
heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
return {
"position": agent_state.position.tolist(),
"heading": heading,
"stop": False,
}
infos = []
# left_action = HabitatSimActions.TURN_LEFT
forward_action = HabitatSimActions.MOVE_FORWARD
# init_left = self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount
init_forward = self._sim.get_agent(0).agent_config.action_space[
forward_action].actuation.amount
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = angle * 180 / math.pi
# output = self._sim.step(left_action)
theta = np.arctan2(init_state.rotation.imag[1],
init_state.rotation.real) + angle / 2
rotation = np.quaternion(np.cos(theta), 0, np.sin(theta), 0)
self._sim.set_agent_state(init_state.position, rotation)
# positions.append(init_state.position)
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = distance
ksteps = int(distance//init_forward)
for k in range(ksteps):
if k == ksteps - 1:
output = self._sim.step(forward_action)
else:
self._sim.step_without_obs(forward_action)
infos.append(get_info(self._sim))
# self._sim.get_agent(0).agent_config.action_space[
# left_action].actuation.amount = init_left
# self._sim.get_agent(0).agent_config.action_space[
# forward_action].actuation.amount = init_forward
output['infos'] = infos
return output | 7,067 | 40.093023 | 96 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/obs_transformers.py | import copy
import numbers
from typing import Dict, List, Tuple, Union
import torch
from gym import spaces
from habitat.config import Config
from habitat.core.logging import logger
from habitat_baselines.common.baseline_registry import baseline_registry
from habitat_baselines.common.obs_transformers import ObservationTransformer
from habitat_baselines.utils.common import (
center_crop,
get_image_height_width,
overwrite_gym_box_shape,
)
from torch import Tensor
@baseline_registry.register_obs_transformer()
class CenterCropperPerSensor(ObservationTransformer):
"""An observation transformer that center crops your input on a per-sensor basis."""
sensor_crops: Dict[str, Union[int, Tuple[int, int]]]
channels_last: bool
def __init__(
self,
sensor_crops: List[Tuple[str, Union[int, Tuple[int, int]]]],
channels_last: bool = True,
):
"""Args:
size: A sequence (h, w) or int of the size you wish to resize/center_crop.
If int, assumes square crop
channels_list: indicates if channels is the last dimension
trans_keys: The list of sensors it will try to centercrop.
"""
super().__init__()
self.sensor_crops = dict(sensor_crops)
for k in self.sensor_crops:
size = self.sensor_crops[k]
if isinstance(size, numbers.Number):
self.sensor_crops[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
observation_space = copy.deepcopy(observation_space)
for key in observation_space.spaces:
if (
key in self.sensor_crops
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_crops[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Center cropping observation size of %s from %s to %s"
% (key, (h, w), self.sensor_crops[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_crops[key]
)
return observation_space
@torch.no_grad()
def forward(self, observations: Dict[str, Tensor]) -> Dict[str, Tensor]:
observations.update(
{
sensor: center_crop(
observations[sensor],
self.sensor_crops[sensor],
channels_last=self.channels_last,
)
for sensor in self.sensor_crops
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
cc_config = config.RL.POLICY.OBS_TRANSFORMS.CENTER_CROPPER_PER_SENSOR
return cls(cc_config.SENSOR_CROPS)
@baseline_registry.register_obs_transformer()
class ResizerPerSensor(ObservationTransformer):
r"""An nn module the resizes images to any aspect ratio.
This module assumes that all images in the batch are of the same size.
"""
def __init__(
self,
sizes: int,
channels_last: bool = True,
trans_keys: Tuple[str] = ("rgb", "depth", "semantic"),
):
super().__init__()
"""Args:
size: The size you want to resize
channels_last: indicates if channels is the last dimension
"""
self.sensor_resizes = dict(sizes)
for k in self.sensor_resizes:
size = self.sensor_resizes[k]
if isinstance(size, numbers.Number):
self.sensor_resizes[k] = (int(size), int(size))
assert len(size) == 2, "forced input size must be len of 2 (h, w)"
self.channels_last = channels_last
def transform_observation_space(
self,
observation_space: spaces.Dict,
):
for key in observation_space.spaces:
if (
key in self.sensor_resizes
and observation_space.spaces[key].shape[-3:-1]
!= self.sensor_resizes[key]
):
h, w = get_image_height_width(
observation_space.spaces[key], channels_last=True
)
logger.info(
"Resizing observation size of %s from %s to %s"
% (key, (h, w), self.sensor_resizes[key])
)
observation_space.spaces[key] = overwrite_gym_box_shape(
observation_space.spaces[key], self.sensor_resizes[key]
)
return observation_space
def _transform_obs(self, obs: torch.Tensor, size) -> torch.Tensor:
img = torch.as_tensor(obs)
no_batch_dim = len(img.shape) == 3
if len(img.shape) < 3 or len(img.shape) > 5:
raise NotImplementedError()
if no_batch_dim:
img = img.unsqueeze(0) # Adds a batch dimension
h, w = get_image_height_width(img, channels_last=self.channels_last)
if self.channels_last:
if len(img.shape) == 4:
# NHWC -> NCHW
img = img.permute(0, 3, 1, 2)
else:
# NDHWC -> NDCHW
img = img.permute(0, 1, 4, 2, 3)
h, w = size
img = torch.nn.functional.interpolate(
img.float(), size=(h, w), mode="area"
).to(dtype=img.dtype)
if self.channels_last:
if len(img.shape) == 4:
# NCHW -> NHWC
img = img.permute(0, 2, 3, 1)
else:
# NDCHW -> NDHWC
img = img.permute(0, 1, 3, 4, 2)
if no_batch_dim:
img = img.squeeze(dim=0) # Removes the batch dimension
return img
@torch.no_grad()
def forward(
self, observations: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
observations.update(
{
sensor: self._transform_obs(
observations[sensor], self.sensor_resizes[sensor])
for sensor in self.sensor_resizes
if sensor in observations
}
)
return observations
@classmethod
def from_config(cls, config: Config):
r_config = config.RL.POLICY.OBS_TRANSFORMS.RESIZER_PER_SENSOR
return cls(r_config.SIZES)
| 6,642 | 33.598958 | 88 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/utils.py | from typing import Dict
import numpy as np
from habitat.core.utils import try_cv2_import
from habitat.utils.visualizations import maps as habitat_maps
from habitat.utils.visualizations.utils import draw_collision
from habitat_extensions import maps
cv2 = try_cv2_import()
def observations_to_image(observation: Dict, info: Dict) -> np.ndarray:
r"""Generate image of single frame from observation and info
returned from a single environment step().
Args:
observation: observation returned from an environment step().
info: info returned from an environment step().
Returns:
generated image of a single frame.
"""
egocentric_view = []
observation_size = -1
if "rgb" in observation:
observation_size = observation["rgb"].shape[0]
rgb = observation["rgb"][:, :, :3]
egocentric_view.append(rgb)
# draw depth map if observation has depth info. resize to rgb size.
if "depth" in observation:
if observation_size == -1:
observation_size = observation["depth"].shape[0]
depth_map = (observation["depth"].squeeze() * 255).astype(np.uint8)
depth_map = np.stack([depth_map for _ in range(3)], axis=2)
depth_map = cv2.resize(
depth_map,
dsize=(observation_size, observation_size),
interpolation=cv2.INTER_CUBIC,
)
egocentric_view.append(depth_map)
assert (
len(egocentric_view) > 0
), "Expected at least one visual sensor enabled."
egocentric_view = np.concatenate(egocentric_view, axis=1)
# draw collision
if "collisions" in info and info["collisions"]["is_collision"]:
egocentric_view = draw_collision(egocentric_view)
frame = egocentric_view
map_k = None
if "top_down_map_vlnce" in info:
map_k = "top_down_map_vlnce"
elif "top_down_map" in info:
map_k = "top_down_map"
if map_k is not None:
td_map = info[map_k]["map"]
td_map = maps.colorize_topdown_map(
td_map,
info[map_k]["fog_of_war_mask"],
fog_of_war_desat_amount=0.75,
)
td_map = habitat_maps.draw_agent(
image=td_map,
agent_center_coord=info[map_k]["agent_map_coord"],
agent_rotation=info[map_k]["agent_angle"],
agent_radius_px=min(td_map.shape[0:2]) // 24,
)
if td_map.shape[1] < td_map.shape[0]:
td_map = np.rot90(td_map, 1)
if td_map.shape[0] > td_map.shape[1]:
td_map = np.rot90(td_map, 1)
# scale top down map to align with rgb view
old_h, old_w, _ = td_map.shape
top_down_height = observation_size
top_down_width = int(float(top_down_height) / old_h * old_w)
# cv2 resize (dsize is width first)
td_map = cv2.resize(
td_map,
(top_down_width, top_down_height),
interpolation=cv2.INTER_CUBIC,
)
frame = np.concatenate((egocentric_view, td_map), axis=1)
return frame
| 3,056 | 31.870968 | 75 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/habitat_simulator.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import (
TYPE_CHECKING,
Any,
Dict,
List,
Optional,
Sequence,
Set,
Union,
cast,
)
import numpy as np
from gym import spaces
from gym.spaces.box import Box
from numpy import ndarray
if TYPE_CHECKING:
from torch import Tensor
import habitat_sim
from habitat_sim.simulator import MutableMapping, MutableMapping_T
from habitat.sims.habitat_simulator.habitat_simulator import HabitatSim
from habitat.core.dataset import Episode
from habitat.core.registry import registry
from habitat.core.simulator import (
AgentState,
Config,
DepthSensor,
Observations,
RGBSensor,
SemanticSensor,
Sensor,
SensorSuite,
ShortestPathPoint,
Simulator,
VisualObservation,
)
from habitat.core.spaces import Space
# inherit habitat-lab/habitat/sims/habitat_simulator/habitat_simulator.py
@registry.register_simulator(name="Sim-v1")
class Simulator(HabitatSim):
r"""Simulator wrapper over habitat-sim
habitat-sim repo: https://github.com/facebookresearch/habitat-sim
Args:
config: configuration for initializing the simulator.
"""
def __init__(self, config: Config) -> None:
super().__init__(config)
def step_without_obs(self,
action: Union[str, int, MutableMapping_T[int, Union[str, int]]],
dt: float = 1.0 / 60.0,):
self._num_total_frames += 1
if isinstance(action, MutableMapping):
return_single = False
else:
action = cast(Dict[int, Union[str, int]], {self._default_agent_id: action})
return_single = True
collided_dict: Dict[int, bool] = {}
for agent_id, agent_act in action.items():
agent = self.get_agent(agent_id)
collided_dict[agent_id] = agent.act(agent_act)
self.__last_state[agent_id] = agent.get_state()
# # step physics by dt
# step_start_Time = time.time()
# super().step_world(dt)
# self._previous_step_time = time.time() - step_start_Time
multi_observations = {}
for agent_id in action.keys():
agent_observation = {}
agent_observation["collided"] = collided_dict[agent_id]
multi_observations[agent_id] = agent_observation
if return_single:
sim_obs = multi_observations[self._default_agent_id]
else:
sim_obs = multi_observations
self._prev_sim_obs = sim_obs
| 2,654 | 27.244681 | 87 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/measures.py | import gzip
import json
import pickle
from typing import Any, List, Union
import numpy as np
from dtw import dtw
from fastdtw import fastdtw
from habitat.config import Config
from habitat.core.embodied_task import EmbodiedTask, Measure
from habitat.core.registry import registry
from habitat.core.simulator import Simulator
from habitat.tasks.nav.nav import DistanceToGoal, Success
from habitat.tasks.utils import cartesian_to_polar
from habitat.utils.geometry_utils import quaternion_rotate_vector
from habitat.utils.visualizations import fog_of_war
from habitat.utils.visualizations import maps as habitat_maps
from habitat_extensions import maps
from habitat_extensions.task import RxRVLNCEDatasetV1
@registry.register_measure
class Position(Measure):
r"""Path Length (PL)
PL = sum(geodesic_distance(agent_prev_position, agent_position)
over all agent positions.
"""
cls_uuid: str = "position"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, *args: Any, **kwargs: Any):
self._metric = {'distance':[], 'position':[]}
self.update_metric(episode)
def update_metric(self, episode, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position
if len(self._metric['position']) > 0:
if (current_position == self._metric['position'][-1]).all():
return
distance = self._sim.geodesic_distance(
current_position,
[goal.position for goal in episode.goals],
episode,
)
self._metric['position'].append(self._sim.get_agent_state().position)
self._metric['distance'].append(distance)
@registry.register_measure
class PathLength(Measure):
r"""Path Length (PL)
PL = sum(geodesic_distance(agent_prev_position, agent_position)
over all agent positions.
"""
cls_uuid: str = "path_length"
@staticmethod
def euclidean_distance(
position_a: np.ndarray, position_b: np.ndarray
) -> float:
return np.linalg.norm(position_b - position_a, ord=2)
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__(**kwargs)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._previous_position = self._sim.get_agent_state().position
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position
self._metric += self.euclidean_distance(
current_position, self._previous_position
)
self._previous_position = current_position
@registry.register_measure
class OracleNavigationError(Measure):
r"""Oracle Navigation Error (ONE)
ONE = min(geosdesic_distance(agent_pos, goal))
over all locations in the agent's path.
"""
cls_uuid: str = "oracle_navigation_error"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = float("inf")
self.update_metric(episode, task)
def update_metric(self, episode, task: EmbodiedTask, **kwargs: Any):
distance_to_target = task.measurements.measures[
DistanceToGoal.cls_uuid
].get_metric()
self._metric = min(self._metric, distance_to_target)
@registry.register_measure
class OracleSuccess(Measure):
r"""Oracle Success Rate (OSR)
OSR = I(ONE <= goal_radius),
where ONE is Oracle Navigation Error.
"""
cls_uuid: str = "oracle_success"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
task.measurements.check_measure_dependencies(
self.uuid, [DistanceToGoal.cls_uuid]
)
self._metric = 0
self.update_metric(episode, task)
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
d = task.measurements.measures[DistanceToGoal.cls_uuid].get_metric()
self._metric = float(self._metric or d < self._config.SUCCESS_DISTANCE)
@registry.register_measure
class OracleSPL(Measure):
r"""OracleSPL (Oracle Success weighted by Path Length)
OracleSPL = max(SPL) over all points in the agent path
"""
cls_uuid: str = "oracle_spl"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(
self, *args: Any, episode, task: EmbodiedTask, **kwargs: Any
):
task.measurements.check_measure_dependencies(self.uuid, ["spl"])
self._metric = 0.0
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
spl = task.measurements.measures["spl"].get_metric()
self._metric = max(self._metric, spl)
@registry.register_measure
class StepsTaken(Measure):
r"""Counts the number of times update_metric() is called. This is equal to
the number of times that the agent takes an action. STOP counts as an
action.
"""
cls_uuid: str = "steps_taken"
def __init__(self, *args: Any, **kwargs: Any):
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, *args: Any, **kwargs: Any):
self._metric = 0.0
def update_metric(self, *args: Any, **kwargs: Any):
self._metric += 1.0
@registry.register_measure
class NDTW(Measure):
r"""NDTW (Normalized Dynamic Time Warping)
ref: Effective and General Evaluation for Instruction
Conditioned Navigation using Dynamic Time
Warping - Magalhaes et. al
https://arxiv.org/pdf/1907.05446.pdf
"""
cls_uuid: str = "ndtw"
@staticmethod
def euclidean_distance(
position_a: Union[List[float], np.ndarray],
position_b: Union[List[float], np.ndarray],
) -> float:
return np.linalg.norm(
np.array(position_b) - np.array(position_a), ord=2
)
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
self.dtw_func = fastdtw if config.FDTW else dtw
if "{role}" in config.GT_PATH:
self.gt_json = {}
for role in RxRVLNCEDatasetV1.annotation_roles:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT, role=role), "rt"
) as f:
self.gt_json.update(json.load(f))
else:
with gzip.open(
config.GT_PATH.format(split=config.SPLIT), "rt"
) as f:
self.gt_json = json.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, *args: Any, **kwargs: Any):
self.locations = []
self.gt_locations = self.gt_json[str(episode.episode_id)]["locations"]
self.update_metric()
def update_metric(self, *args: Any, **kwargs: Any):
current_position = self._sim.get_agent_state().position.tolist()
if len(self.locations) == 0:
self.locations.append(current_position)
else:
if current_position == self.locations[-1]:
return
self.locations.append(current_position)
dtw_distance = self.dtw_func(
self.locations, self.gt_locations, dist=self.euclidean_distance
)[0]
nDTW = np.exp(
-dtw_distance
/ (len(self.gt_locations) * self._config.SUCCESS_DISTANCE)
)
self._metric = nDTW
@registry.register_measure
class SDTW(Measure):
r"""SDTW (Success Weighted be nDTW)
ref: Effective and General Evaluation for Instruction
Conditioned Navigation using Dynamic Time
Warping - Magalhaes et. al
https://arxiv.org/pdf/1907.05446.pdf
"""
cls_uuid: str = "sdtw"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._config = config
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def reset_metric(self, episode, task, *args: Any, **kwargs: Any):
task.measurements.check_measure_dependencies(
self.uuid, [NDTW.cls_uuid, Success.cls_uuid]
)
self.update_metric(episode, task)
def update_metric(
self, episode, task: EmbodiedTask, *args: Any, **kwargs: Any
):
ep_success = task.measurements.measures[Success.cls_uuid].get_metric()
nDTW = task.measurements.measures[NDTW.cls_uuid].get_metric()
self._metric = ep_success * nDTW
@registry.register_measure
class TopDownMapVLNCE(Measure):
r"""A top down map that optionally shows VLN-related visual information
such as MP3D node locations and MP3D agent traversals.
"""
cls_uuid: str = "top_down_map_vlnce"
def __init__(
self, *args: Any, sim: Simulator, config: Config, **kwargs: Any
):
self._sim = sim
self._config = config
with open(self._config.GRAPHS_FILE, "rb") as f:
self._conn_graphs = pickle.load(f)
super().__init__()
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def get_original_map(self):
habitat_maps.get_topdown_map_from_sim
top_down_map = maps.get_top_down_map(
self._sim,
self._config.MAP_RESOLUTION,
self._meters_per_pixel,
)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = np.zeros_like(top_down_map)
else:
self._fog_of_war_mask = None
return top_down_map
def reset_metric(self, *args: Any, episode, **kwargs: Any):
self._scene_id = episode.scene_id.split("/")[-2]
self._step_count = 0
self._metric = None
self._meters_per_pixel = habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, self._sim
)
self._top_down_map = self.get_original_map()
agent_position = self._sim.get_agent_state().position
scene_id = episode.scene_id.split("/")[-1].split(".")[0]
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
sim=self._sim,
)
self._previous_xy_location = (a_y, a_x)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
fov=self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, sim=self._sim
),
)
if self._config.DRAW_FIXED_WAYPOINTS:
maps.draw_mp3d_nodes(
self._top_down_map,
self._sim,
episode,
self._conn_graphs[scene_id],
self._meters_per_pixel,
)
if self._config.DRAW_SHORTEST_PATH:
shortest_path_points = self._sim.get_straight_shortest_path_points(
agent_position, episode.goals[0].position
)
maps.draw_straight_shortest_path_points(
self._top_down_map,
self._sim,
self._config.MAP_RESOLUTION,
shortest_path_points,
)
if self._config.DRAW_REFERENCE_PATH:
maps.draw_reference_path(
self._top_down_map,
self._sim,
episode,
self._config.MAP_RESOLUTION,
self._meters_per_pixel,
)
# draw source and target points last to avoid overlap
if self._config.DRAW_SOURCE_AND_TARGET:
maps.draw_source_and_target(
self._top_down_map,
self._sim,
episode,
self._meters_per_pixel,
)
# MP3D START NODE
self._nearest_node = maps.get_nearest_node(
self._conn_graphs[scene_id], np.take(agent_position, (0, 2))
)
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self.update_metric(episode, action=None)
def update_metric(self, *args: Any, **kwargs: Any):
self._step_count += 1
(
house_map,
map_agent_pos,
) = self.update_map(self._sim.get_agent_state().position)
self._metric = {
"map": house_map,
"fog_of_war_mask": self._fog_of_war_mask,
"agent_map_coord": map_agent_pos,
"agent_angle": self.get_polar_angle(),
"bounds": {
k: v
for k, v in zip(
["lower", "upper"],
self._sim.pathfinder.get_bounds(),
)
},
"meters_per_px": self._meters_per_pixel,
}
def get_polar_angle(self):
agent_state = self._sim.get_agent_state()
# quaternion is in x, y, z, w format
ref_rotation = agent_state.rotation
heading_vector = quaternion_rotate_vector(
ref_rotation.inverse(), np.array([0, 0, -1])
)
phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
z_neg_z_flip = np.pi
return np.array(phi) + z_neg_z_flip
def update_map(self, agent_position):
a_x, a_y = habitat_maps.to_grid(
agent_position[2],
agent_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
# Don't draw over the source point
gradient_color = 15 + min(
self._step_count * 245 // self._config.MAX_EPISODE_STEPS, 245
)
if self._top_down_map[a_x, a_y] != maps.MAP_SOURCE_POINT_INDICATOR:
maps.drawline(
self._top_down_map,
self._previous_xy_location,
(a_y, a_x),
gradient_color,
thickness=int(
self._config.MAP_RESOLUTION
* 1.4
/ maps.MAP_THICKNESS_SCALAR
),
style="filled",
)
if self._config.FOG_OF_WAR.DRAW:
self._fog_of_war_mask = fog_of_war.reveal_fog_of_war(
self._top_down_map,
self._fog_of_war_mask,
np.array([a_x, a_y]),
self.get_polar_angle(),
self._config.FOG_OF_WAR.FOV,
max_line_len=self._config.FOG_OF_WAR.VISIBILITY_DIST
/ habitat_maps.calculate_meters_per_pixel(
self._config.MAP_RESOLUTION, sim=self._sim
),
)
point_padding = int(0.2 / self._meters_per_pixel)
prev_nearest_node = self._nearest_node
self._nearest_node = maps.update_nearest_node(
self._conn_graphs[self._scene_id],
self._nearest_node,
np.take(agent_position, (0, 2)),
)
if (
self._nearest_node != prev_nearest_node
and self._config.DRAW_MP3D_AGENT_PATH
):
nn_position = self._conn_graphs[self._scene_id].nodes[
self._nearest_node
]["position"]
(prev_s_x, prev_s_y) = (self.s_x, self.s_y)
self.s_x, self.s_y = habitat_maps.to_grid(
nn_position[2],
nn_position[0],
self._top_down_map.shape[0:2],
self._sim,
)
self._top_down_map[
self.s_x
- int(2.0 / 3.0 * point_padding) : self.s_x
+ int(2.0 / 3.0 * point_padding)
+ 1,
self.s_y
- int(2.0 / 3.0 * point_padding) : self.s_y
+ int(2.0 / 3.0 * point_padding)
+ 1,
] = gradient_color
maps.drawline(
self._top_down_map,
(prev_s_y, prev_s_x),
(self.s_y, self.s_x),
gradient_color,
thickness=int(
1.0
/ 2.0
* np.round(
self._config.MAP_RESOLUTION / maps.MAP_THICKNESS_SCALAR
)
),
)
self._previous_xy_location = (a_y, a_x)
map_agent_pos = (a_x, a_y)
return self._top_down_map, map_agent_pos
| 18,287 | 30.860627 | 79 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/sensors.py | from typing import Any, Dict
import numpy as np
from gym import spaces
from habitat.config import Config
from habitat.core.registry import registry
from habitat.core.simulator import Observations, Sensor, SensorTypes, Simulator
from habitat.sims.habitat_simulator.actions import HabitatSimActions
from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
from habitat_extensions.shortest_path_follower import (
ShortestPathFollowerCompat,
)
from habitat_extensions.task import VLNExtendedEpisode
@registry.register_sensor(name="GlobalGPSSensor")
class GlobalGPSSensor(Sensor):
r"""The agents current location in the global coordinate frame
Args:
sim: reference to the simulator for calculating task observations.
config: Contains the DIMENSIONALITY field for the number of dimensions
to express the agents position
Attributes:
_dimensionality: number of dimensions used to specify the agents position
"""
cls_uuid: str = "globalgps"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
self._dimensionality = getattr(config, "DIMENSIONALITY", 2)
assert self._dimensionality in [2, 3]
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.POSITION
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(self._dimensionality,),
dtype=np.float32,
)
def get_observation(self, *args: Any, **kwargs: Any):
return self._sim.get_agent_state().position.astype(np.float32)
@registry.register_sensor
class ShortestPathSensor(Sensor):
r"""Sensor for observing the action to take that follows the shortest path
to the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the sensor.
"""
cls_uuid: str = "shortest_path_sensor"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
super().__init__(config=config)
if config.USE_ORIGINAL_FOLLOWER:
self.follower = ShortestPathFollowerCompat(
sim, config.GOAL_RADIUS, return_one_hot=False
)
self.follower.mode = "geodesic_path"
else:
self.follower = ShortestPathFollower(
sim, config.GOAL_RADIUS, return_one_hot=False
)
# self._sim = sim
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.TACTILE
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(low=0.0, high=100, shape=(1,), dtype=np.float)
def get_observation(self, *args: Any, episode, **kwargs: Any):
best_action = self.follower.get_next_action(episode.goals[0].position)
return np.array(
[
best_action
if best_action is not None
else HabitatSimActions.STOP
]
)
@registry.register_sensor
class VLNOracleProgressSensor(Sensor):
r"""Sensor for observing how much progress has been made towards the goal.
Args:
sim: reference to the simulator for calculating task observations.
config: config for the sensor.
"""
cls_uuid: str = "progress"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self._sim = sim
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any):
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
# TODO: what is the correct sensor type?
return SensorTypes.MEASUREMENT
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(low=0.0, high=1.0, shape=(1,), dtype=np.float)
def get_observation(
self, observations, *args: Any, episode, **kwargs: Any
):
current_position = self._sim.get_agent_state().position.tolist()
distance_to_target = self._sim.geodesic_distance(
current_position, episode.goals[0].position
)
if "geodesic_distance" not in episode.info.keys():
distance_from_start = self._sim.geodesic_distance(
episode.start_position, episode.goals[0].position
)
episode.info["geodesic_distance"] = distance_from_start
distance_from_start = episode.info["geodesic_distance"]
progress = (distance_from_start - distance_to_target) / distance_from_start
return np.array(progress, dtype = np.float32)
@registry.register_sensor
class RxRInstructionSensor(Sensor):
cls_uuid: str = "rxr_instruction"
def __init__(
self, sim: Simulator, config: Config, *args: Any, **kwargs: Any
):
self.features_path = config.features_path
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.MEASUREMENT
def _get_observation_space(self, *args: Any, **kwargs: Any):
return spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(512, 768),
dtype=np.float32,
)
def get_observation(
self,
observations: Dict[str, "Observations"],
episode: VLNExtendedEpisode,
**kwargs,
):
features = np.load(
self.features_path.format(
split=episode.instruction.split,
id=int(episode.instruction.instruction_id),
lang=episode.instruction.language.split("-")[0],
),
)
feats = np.zeros((512, 768), dtype=np.float32)
s = features["features"].shape
feats[: s[0], : s[1]] = features["features"]
return feats
| 6,291 | 31.266667 | 84 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/maps.py | from typing import Dict, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from habitat.core.simulator import Simulator
from habitat.core.utils import try_cv2_import
from habitat.tasks.vln.vln import VLNEpisode
from habitat.utils.visualizations import maps as habitat_maps
cv2 = try_cv2_import()
AGENT_SPRITE = habitat_maps.AGENT_SPRITE
MAP_THICKNESS_SCALAR: int = 128
MAP_INVALID_POINT = 0
MAP_VALID_POINT = 1
MAP_BORDER_INDICATOR = 2
MAP_SOURCE_POINT_INDICATOR = 3
MAP_TARGET_POINT_INDICATOR = 4
MAP_MP3D_WAYPOINT = 5
MAP_VIEW_POINT_INDICATOR = 6
MAP_TARGET_BOUNDING_BOX = 7
MAP_REFERENCE_POINT = 8
MAP_MP3D_REFERENCE_PATH = 9
MAP_SHORTEST_PATH_WAYPOINT = 10
TOP_DOWN_MAP_COLORS = np.full((256, 3), 150, dtype=np.uint8)
TOP_DOWN_MAP_COLORS[12:] = cv2.applyColorMap(
np.arange(244, dtype=np.uint8), cv2.COLORMAP_JET
).squeeze(1)[:, ::-1]
TOP_DOWN_MAP_COLORS[MAP_INVALID_POINT] = [255, 255, 255] # White
TOP_DOWN_MAP_COLORS[MAP_VALID_POINT] = [150, 150, 150] # Light Grey
TOP_DOWN_MAP_COLORS[MAP_BORDER_INDICATOR] = [50, 50, 50] # Grey
TOP_DOWN_MAP_COLORS[MAP_SOURCE_POINT_INDICATOR] = [0, 0, 200] # Blue
TOP_DOWN_MAP_COLORS[MAP_TARGET_POINT_INDICATOR] = [200, 0, 0] # Red
TOP_DOWN_MAP_COLORS[MAP_MP3D_WAYPOINT] = [0, 200, 0] # Green
TOP_DOWN_MAP_COLORS[MAP_VIEW_POINT_INDICATOR] = [245, 150, 150] # Light Red
TOP_DOWN_MAP_COLORS[MAP_TARGET_BOUNDING_BOX] = [0, 175, 0] # Dark Green
TOP_DOWN_MAP_COLORS[MAP_REFERENCE_POINT] = [0, 0, 0] # Black
TOP_DOWN_MAP_COLORS[MAP_MP3D_REFERENCE_PATH] = [0, 0, 0] # Black
TOP_DOWN_MAP_COLORS[MAP_SHORTEST_PATH_WAYPOINT] = [0, 150, 0] # Dark Green
def get_top_down_map(sim, map_resolution, meters_per_pixel):
base_height = sim.get_agent(0).state.position[1]
td_map = habitat_maps.get_topdown_map(
sim.pathfinder,
base_height,
map_resolution,
False,
meters_per_pixel,
)
return td_map
def colorize_topdown_map(
top_down_map: np.ndarray,
fog_of_war_mask: Optional[np.ndarray] = None,
fog_of_war_desat_amount: float = 0.5,
) -> np.ndarray:
r"""Same as `maps.colorize_topdown_map` in Habitat-Lab, but with different map
colors.
"""
_map = TOP_DOWN_MAP_COLORS[top_down_map]
if fog_of_war_mask is not None:
fog_of_war_desat_values = np.array([[fog_of_war_desat_amount], [1.0]])
# Only desaturate things that are valid points as only valid points get revealed
desat_mask = top_down_map != MAP_INVALID_POINT
_map[desat_mask] = (
_map * fog_of_war_desat_values[fog_of_war_mask]
).astype(np.uint8)[desat_mask]
return _map
def static_to_grid(
realworld_x: float,
realworld_y: float,
grid_resolution: Tuple[int, int],
bounds: Dict[str, Tuple[float, float]],
):
r"""Return gridworld index of realworld coordinates assuming top-left corner
is the origin. The real world coordinates of lower left corner are
(coordinate_min, coordinate_min) and of top right corner are
(coordinate_max, coordinate_max). Same as the habitat-Lab maps.to_grid function
but with a static `bounds` instead of requiring a SIM/pathfinder instance.
"""
grid_size = (
abs(bounds["upper"][2] - bounds["lower"][2]) / grid_resolution[0],
abs(bounds["upper"][0] - bounds["lower"][0]) / grid_resolution[1],
)
grid_x = int((realworld_x - bounds["lower"][2]) / grid_size[0])
grid_y = int((realworld_y - bounds["lower"][0]) / grid_size[1])
return grid_x, grid_y
def drawline(
img: np.ndarray,
pt1: Union[Tuple[float], List[float]],
pt2: Union[Tuple[float], List[float]],
color: List[int],
thickness: int = 1,
style: str = "dotted",
gap: int = 15,
) -> None:
"""https://stackoverflow.com/questions/26690932/opencv-rectangle-with-dotted-or-dashed-lines
style: "dotted", "dashed", or "filled"
"""
assert style in ["dotted", "dashed", "filled"]
if style == "filled":
cv2.line(img, pt1, pt2, color, thickness)
return
dist = ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5
pts = []
for i in np.arange(0, dist, gap):
r = i / dist
x = int((pt1[0] * (1 - r) + pt2[0] * r) + 0.5)
y = int((pt1[1] * (1 - r) + pt2[1] * r) + 0.5)
pts.append((x, y))
if style == "dotted":
for p in pts:
cv2.circle(img, p, thickness, color, -1)
else:
s = pts[0]
e = pts[0]
for i, p in enumerate(pts):
s = e
e = p
if i % 2 == 1:
cv2.line(img, s, e, color, thickness)
def drawpoint(
img: np.ndarray,
position: Union[Tuple[int], List[int]],
color: List[int],
meters_per_px: float,
pad: float = 0.3,
) -> None:
point_padding = int(pad / meters_per_px)
img[
position[0] - point_padding : position[0] + point_padding + 1,
position[1] - point_padding : position[1] + point_padding + 1,
] = color
def draw_reference_path(
img: np.ndarray,
sim: Simulator,
episode: VLNEpisode,
map_resolution: int,
meters_per_px: float,
):
r"""Draws lines between each waypoint in the reference path."""
shortest_path_points = [
habitat_maps.to_grid(
p[2],
p[0],
img.shape[0:2],
sim,
)[::-1]
for p in episode.reference_path
]
pt_from = None
for i, pt_to in enumerate(shortest_path_points):
if i != 0:
drawline(
img,
(pt_from[0], pt_from[1]),
(pt_to[0], pt_to[1]),
MAP_SHORTEST_PATH_WAYPOINT,
thickness=int(0.4 * map_resolution / MAP_THICKNESS_SCALAR),
style="dashed",
gap=10,
)
pt_from = pt_to
for pt in shortest_path_points:
drawpoint(
img, (pt[1], pt[0]), MAP_SHORTEST_PATH_WAYPOINT, meters_per_px
)
def draw_straight_shortest_path_points(
img: np.ndarray,
sim: Simulator,
map_resolution: int,
shortest_path_points: List[List[float]],
):
r"""Draws the shortest path from start to goal assuming a standard
discrete action space.
"""
shortest_path_points = [
habitat_maps.to_grid(p[2], p[0], img.shape[0:2], sim)[::-1]
for p in shortest_path_points
]
habitat_maps.draw_path(
img,
[(p[1], p[0]) for p in shortest_path_points],
MAP_SHORTEST_PATH_WAYPOINT,
int(0.4 * map_resolution / MAP_THICKNESS_SCALAR),
)
def draw_source_and_target(
img: np.ndarray, sim: Simulator, episode: VLNEpisode, meters_per_px: float
):
s_x, s_y = habitat_maps.to_grid(
episode.start_position[2],
episode.start_position[0],
img.shape[0:2],
sim,
)
drawpoint(img, (s_x, s_y), MAP_SOURCE_POINT_INDICATOR, meters_per_px)
# mark target point
t_x, t_y = habitat_maps.to_grid(
episode.goals[0].position[2],
episode.goals[0].position[0],
img.shape[0:2],
sim,
)
drawpoint(img, (t_x, t_y), MAP_TARGET_POINT_INDICATOR, meters_per_px)
def get_nearest_node(graph: nx.Graph, current_position: List[float]) -> str:
"""Determine the closest MP3D node to the agent's start position as given
by a [x,z] position vector.
Returns:
node ID
"""
nearest = None
dist = float("inf")
for node in graph:
node_pos = graph.nodes[node]["position"]
node_pos = np.take(node_pos, (0, 2))
cur_dist = np.linalg.norm(
np.array(node_pos) - np.array(current_position), ord=2
)
if cur_dist < dist:
dist = cur_dist
nearest = node
return nearest
def update_nearest_node(
graph: nx.Graph, nearest_node: str, current_position: np.array
) -> str:
"""Determine the closest MP3D node to the agent's current position as
given by a [x,z] position vector. The selected node must be reachable
from the previous MP3D node as specified in the nav-graph edges.
Returns:
node ID
"""
nearest = None
dist = float("inf")
for node in [nearest_node] + [e[1] for e in graph.edges(nearest_node)]:
node_pos = graph.nodes[node]["position"]
node_pos = np.take(node_pos, (0, 2))
cur_dist = np.linalg.norm(
np.array(node_pos) - np.array(current_position), ord=2
)
if cur_dist < dist:
dist = cur_dist
nearest = node
return nearest
def draw_mp3d_nodes(
img: np.ndarray,
sim: Simulator,
episode: VLNEpisode,
graph: nx.Graph,
meters_per_px: float,
):
n = get_nearest_node(
graph, (episode.start_position[0], episode.start_position[2])
)
starting_height = graph.nodes[n]["position"][1]
for node in graph:
pos = graph.nodes[node]["position"]
# no obvious way to differentiate between floors. Use this for now.
if abs(pos[1] - starting_height) < 1.0:
r_x, r_y = habitat_maps.to_grid(
pos[2], pos[0], img.shape[0:2], sim
)
# only paint if over a valid point
if img[r_x, r_y]:
drawpoint(img, (r_x, r_y), MAP_MP3D_WAYPOINT, meters_per_px)
| 9,290 | 29.86711 | 96 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/__init__.py | from habitat_extensions import measures, obs_transformers, sensors, nav
from habitat_extensions.config.default import get_extended_config
from habitat_extensions.task import VLNCEDatasetV1
from habitat_extensions.habitat_simulator import Simulator
| 248 | 48.8 | 71 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/config/__init__.py | 0 | 0 | 0 | py |
|
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/habitat_extensions/config/default.py | from typing import List, Optional, Union
from habitat.config.default import Config as CN
from habitat.config.default import get_config
_C = get_config()
_C.defrost()
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOWINFER ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOWINFER = CN()
_C.TASK.ACTIONS.HIGHTOLOWINFER.TYPE = 'MoveHighToLowActionInfer'
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOWEVAL ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOWEVAL = CN()
_C.TASK.ACTIONS.HIGHTOLOWEVAL.TYPE = 'MoveHighToLowActionEval'
# ----------------------------------------------------------------------------
# CUSTOM ACTION: HIGHTOLOW ACTION
# ----------------------------------------------------------------------------
_C.TASK.ACTIONS.HIGHTOLOW = CN()
_C.TASK.ACTIONS.HIGHTOLOW.TYPE = 'MoveHighToLowAction'
# ----------------------------------------------------------------------------
# GPS SENSOR
# ----------------------------------------------------------------------------
_C.TASK.GLOBAL_GPS_SENSOR = CN()
_C.TASK.GLOBAL_GPS_SENSOR.TYPE = "GlobalGPSSensor"
_C.TASK.GLOBAL_GPS_SENSOR.DIMENSIONALITY = 3
# ----------------------------------------------------------------------------
# # RXR INSTRUCTION SENSOR
# ----------------------------------------------------------------------------
_C.TASK.RXR_INSTRUCTION_SENSOR = CN()
_C.TASK.RXR_INSTRUCTION_SENSOR.TYPE = "RxRInstructionSensor"
_C.TASK.RXR_INSTRUCTION_SENSOR.features_path = "data/datasets/RxR_VLNCE_v0/text_features/rxr_{split}/{id:06}_{lang}_text_features.npz"
_C.TASK.INSTRUCTION_SENSOR_UUID = "rxr_instruction"
# ----------------------------------------------------------------------------
# SHORTEST PATH SENSOR (previously: VLN_ORACLE_ACTION_SENSOR)
# ----------------------------------------------------------------------------
_C.TASK.SHORTEST_PATH_SENSOR = CN()
_C.TASK.SHORTEST_PATH_SENSOR.TYPE = "ShortestPathSensor"
# all goals can be navigated to within 0.5m.
_C.TASK.SHORTEST_PATH_SENSOR.GOAL_RADIUS = 0.5
# compatibility with the dataset generation oracle and paper results.
# if False, use the ShortestPathFollower in Habitat
_C.TASK.SHORTEST_PATH_SENSOR.USE_ORIGINAL_FOLLOWER = False
# ----------------------------------------------------------------------------
# VLN ORACLE PROGRESS SENSOR
# ----------------------------------------------------------------------------
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR = CN()
_C.TASK.VLN_ORACLE_PROGRESS_SENSOR.TYPE = "VLNOracleProgressSensor"
# ----------------------------------------------------------------------------
# NDTW MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.NDTW = CN()
_C.TASK.NDTW.TYPE = "NDTW"
_C.TASK.NDTW.SPLIT = "val_seen"
_C.TASK.NDTW.FDTW = True # False: DTW
_C.TASK.NDTW.GT_PATH = (
"data/datasets/R2R_VLNCE_v1-2_preprocessed/{split}/{split}_gt.json"
)
_C.TASK.NDTW.SUCCESS_DISTANCE = 3.0
# ----------------------------------------------------------------------------
# SDTW MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.SDTW = CN()
_C.TASK.SDTW.TYPE = "SDTW"
# ----------------------------------------------------------------------------
# PATH_LENGTH MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.PATH_LENGTH = CN()
_C.TASK.PATH_LENGTH.TYPE = "PathLength"
# ----------------------------------------------------------------------------
# ORACLE_NAVIGATION_ERROR MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_NAVIGATION_ERROR = CN()
_C.TASK.ORACLE_NAVIGATION_ERROR.TYPE = "OracleNavigationError"
# ----------------------------------------------------------------------------
# ORACLE_SUCCESS MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_SUCCESS = CN()
_C.TASK.ORACLE_SUCCESS.TYPE = "OracleSuccess"
_C.TASK.ORACLE_SUCCESS.SUCCESS_DISTANCE = 3.0
# ----------------------------------------------------------------------------
# ORACLE_SPL MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.ORACLE_SPL = CN()
_C.TASK.ORACLE_SPL.TYPE = "OracleSPL"
# ----------------------------------------------------------------------------
# STEPS_TAKEN MEASUREMENT
# ----------------------------------------------------------------------------
_C.TASK.STEPS_TAKEN = CN()
_C.TASK.STEPS_TAKEN.TYPE = "StepsTaken"
# ----------------------------------------------------------------------------
# POSITION MEASUREMENT For faster eval
# ----------------------------------------------------------------------------
_C.TASK.POSITION = CN()
_C.TASK.POSITION.TYPE = 'Position'
# -----------------------------------------------------------------------------
# TOP_DOWN_MAP_VLNCE MEASUREMENT
# -----------------------------------------------------------------------------
_C.TASK.TOP_DOWN_MAP_VLNCE = CN()
_C.TASK.TOP_DOWN_MAP_VLNCE.TYPE = "TopDownMapVLNCE"
_C.TASK.TOP_DOWN_MAP_VLNCE.MAX_EPISODE_STEPS = _C.ENVIRONMENT.MAX_EPISODE_STEPS
_C.TASK.TOP_DOWN_MAP_VLNCE.MAP_RESOLUTION = 1024
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SOURCE_AND_TARGET = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_BORDER = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_SHORTEST_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_REFERENCE_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_FIXED_WAYPOINTS = True
_C.TASK.TOP_DOWN_MAP_VLNCE.DRAW_MP3D_AGENT_PATH = True
_C.TASK.TOP_DOWN_MAP_VLNCE.GRAPHS_FILE = "data/connectivity_graphs.pkl"
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR = CN()
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.DRAW = True
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.FOV = 79
_C.TASK.TOP_DOWN_MAP_VLNCE.FOG_OF_WAR.VISIBILITY_DIST = 5.0
# ----------------------------------------------------------------------------
# DATASET EXTENSIONS
# ----------------------------------------------------------------------------
_C.DATASET.ROLES = ["guide"] # options: "*", "guide", "follower"
# language options by region: "*", "te-IN", "hi-IN", "en-US", "en-IN"
_C.DATASET.LANGUAGES = ["*"]
# a list or set of episode IDs to allow in dataset creation. None allows all.
_C.DATASET.EPISODES_ALLOWED = None
def get_extended_config(
config_paths: Optional[Union[List[str], str]] = None,
opts: Optional[list] = None,
) -> CN:
r"""Create a unified config with default values overwritten by values from
:p:`config_paths` and overwritten by options from :p:`opts`.
:param config_paths: List of config paths or string that contains comma
separated list of config paths.
:param opts: Config options (keys, values) in a list (e.g., passed from
command line into the config. For example,
:py:`opts = ['FOO.BAR', 0.5]`. Argument can be used for parameter
sweeping or quick tests.
"""
config = _C.clone()
if config_paths:
if isinstance(config_paths, str):
config_paths = [config_paths]
for config_path in config_paths:
config.merge_from_file(config_path)
if opts:
config.merge_from_list(opts)
config.freeze()
return config
| 7,363 | 46.818182 | 134 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/TRM_net.py | import torch
import torch.nn as nn
import numpy as np
from .utils import get_attention_mask
from .transformer.waypoint_bert import WaypointBert
from pytorch_transformers import BertConfig
class BinaryDistPredictor_TRM(nn.Module):
def __init__(self, hidden_dim=768, n_classes=12, device=None):
super(BinaryDistPredictor_TRM, self).__init__()
self.device = device
self.num_angles = 120
self.num_imgs = 12
self.n_classes = 12 # num of distances
self.TRM_LAYER = 2
self.TRM_NEIGHBOR = 1
self.HEATMAP_OFFSET = 5
self.visual_fc_rgb = nn.Sequential(
nn.Flatten(),
nn.Linear(np.prod([2048,7,7]), hidden_dim),
nn.ReLU(True),
)
self.visual_fc_depth = nn.Sequential(
nn.Flatten(),
nn.Linear(np.prod([128,4,4]), hidden_dim),
nn.ReLU(True),
)
self.visual_merge = nn.Sequential(
nn.Linear(hidden_dim*2, hidden_dim),
nn.ReLU(True),
)
config = BertConfig()
config.model_type = 'visual'
config.finetuning_task = 'waypoint_predictor'
config.hidden_dropout_prob = 0.3
config.hidden_size = 768
config.num_attention_heads = 12
config.num_hidden_layers = self.TRM_LAYER
self.waypoint_TRM = WaypointBert(config=config)
self.mask = get_attention_mask(
num_imgs=self.num_imgs,
neighbor=self.TRM_NEIGHBOR).to(self.device)
self.vis_classifier = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim,
int(n_classes*(self.num_angles/self.num_imgs))),
)
def forward(self, rgb_feats, depth_feats):
bsi = rgb_feats.size(0) // self.num_imgs
rgb_x = self.visual_fc_rgb(rgb_feats).reshape(
bsi, self.num_imgs, -1)
depth_x = self.visual_fc_depth(depth_feats).reshape(
bsi, self.num_imgs, -1)
vis_x = self.visual_merge(
torch.cat((rgb_x, depth_x), dim=-1)
)
attention_mask = self.mask.repeat(bsi,1,1,1)
vis_rel_x = self.waypoint_TRM(
vis_x, attention_mask=attention_mask
)
vis_logits = self.vis_classifier(vis_rel_x)
vis_logits = vis_logits.reshape(
bsi, self.num_angles, self.n_classes)
# heatmap offset (each image is pointing at the agent's heading)
vis_logits = torch.cat(
(vis_logits[:,self.HEATMAP_OFFSET:,:], vis_logits[:,:self.HEATMAP_OFFSET,:]),
dim=1)
return vis_logits #, vis_rel_x
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
| 3,269 | 32.030303 | 89 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/utils.py |
import torch
import numpy as np
import sys
import glob
import json
def neighborhoods(mu, x_range, y_range, sigma, circular_x=True, gaussian=False):
""" Generate masks centered at mu of the given x and y range with the
origin in the centre of the output
Inputs:
mu: tensor (N, 2)
Outputs:
tensor (N, y_range, s_range)
"""
x_mu = mu[:,0].unsqueeze(1).unsqueeze(1)
y_mu = mu[:,1].unsqueeze(1).unsqueeze(1)
# Generate bivariate Gaussians centered at position mu
x = torch.arange(start=0,end=x_range, device=mu.device, dtype=mu.dtype).unsqueeze(0).unsqueeze(0)
y = torch.arange(start=0,end=y_range, device=mu.device, dtype=mu.dtype).unsqueeze(1).unsqueeze(0)
y_diff = y - y_mu
x_diff = x - x_mu
if circular_x:
x_diff = torch.min(torch.abs(x_diff), torch.abs(x_diff + x_range))
if gaussian:
output = torch.exp(-0.5 * ((x_diff/sigma[0])**2 + (y_diff/sigma[1])**2 ))
else:
output = torch.logical_and(
torch.abs(x_diff) <= sigma[0], torch.abs(y_diff) <= sigma[1]
).type(mu.dtype)
return output
def nms(pred, max_predictions=10, sigma=(1.0,1.0), gaussian=False):
''' Input (batch_size, 1, height, width) '''
shape = pred.shape
output = torch.zeros_like(pred)
flat_pred = pred.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
supp_pred = pred.clone()
flat_output = output.reshape((shape[0],-1)) # (BATCH_SIZE, 24*48)
for i in range(max_predictions):
# Find and save max over the entire map
flat_supp_pred = supp_pred.reshape((shape[0],-1))
val, ix = torch.max(flat_supp_pred, dim=1)
indices = torch.arange(0,shape[0])
flat_output[indices,ix] = flat_pred[indices,ix]
# Suppression
y = ix / shape[-1]
x = ix % shape[-1]
mu = torch.stack([x,y], dim=1).float()
g = neighborhoods(mu, shape[-1], shape[-2], sigma, gaussian=gaussian)
supp_pred *= (1-g.unsqueeze(1))
output[output < 0] = 0
return output
def print_progress(iteration, total, prefix='', suffix='', decimals=1, bar_length=50):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
bar_length - Optional : character length of bar (Int)
"""
str_format = "{0:." + str(decimals) + "f}"
percents = str_format.format(100 * (iteration / float(total)))
filled_length = int(round(bar_length * iteration / float(total)))
bar = '█' * filled_length + '-' * (bar_length - filled_length)
sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar, percents, '%', suffix)),
if iteration == total:
sys.stdout.write('\n')
sys.stdout.flush()
def get_attention_mask(num_imgs=12, neighbor=1):
assert neighbor <= 5
mask = np.zeros((num_imgs,num_imgs))
t = np.zeros(num_imgs)
t[:neighbor+1] = np.ones(neighbor+1)
if neighbor != 0:
t[-neighbor:] = np.ones(neighbor)
for ri in range(num_imgs):
mask[ri] = t
t = np.roll(t, 1)
return torch.from_numpy(mask).reshape(1,1,num_imgs,num_imgs).long() | 3,409 | 32.431373 | 101 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/waypoint_bert.py | # Copyright (c) 2020 Microsoft Corporation. Licensed under the MIT license.
# Modified in Recurrent VLN-BERT, 2020, [email protected]
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss, MSELoss
from .pytorch_transformer.modeling_bert import (BertEmbeddings,
BertSelfAttention, BertAttention, BertEncoder, BertLayer,
BertSelfOutput, BertIntermediate, BertOutput,
BertPooler, BertLayerNorm, BertPreTrainedModel,
BertPredictionHeadTransform)
logger = logging.getLogger(__name__)
class VisPosEmbeddings(nn.Module):
def __init__(self, config):
super(VisPosEmbeddings, self).__init__()
self.position_embeddings = nn.Embedding(24, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_vis_feats, position_ids=None):
seq_length = input_vis_feats.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_vis_feats.device)
position_ids = position_ids.unsqueeze(0).repeat(input_vis_feats.size(0), 1)
vis_embeddings = input_vis_feats
position_embeddings = self.position_embeddings(position_ids)
embeddings = vis_embeddings + position_embeddings
embeddings = self.LayerNorm(embeddings)
# embeddings = self.dropout(embeddings)
return embeddings
class CaptionBertSelfAttention(BertSelfAttention):
"""
Modified from BertSelfAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertSelfAttention, self).__init__(config)
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
if history_state is not None:
x_states = torch.cat([history_state, hidden_states], dim=1)
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(x_states)
mixed_value_layer = self.value(x_states)
else:
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
''' language feature only provide Keys and Values '''
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_scores)
return outputs
class CaptionBertAttention(BertAttention):
"""
Modified from BertAttention to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertAttention, self).__init__(config)
self.self = CaptionBertSelfAttention(config)
self.output = BertSelfOutput(config)
self.config = config
def forward(self, input_tensor, attention_mask, head_mask=None,
history_state=None):
''' transformer processing '''
self_outputs = self.self(input_tensor, attention_mask, head_mask, history_state)
''' feed-forward network with residule '''
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class CaptionBertLayer(BertLayer):
"""
Modified from BertLayer to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertLayer, self).__init__(config)
self.attention = CaptionBertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None,
history_state=None):
attention_outputs = self.attention(hidden_states, attention_mask,
head_mask, history_state)
''' feed-forward network with residule '''
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:]
return outputs
class CaptionBertEncoder(BertEncoder):
"""
Modified from BertEncoder to add support for output_hidden_states.
"""
def __init__(self, config):
super(CaptionBertEncoder, self).__init__(config)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
# 12 Bert layers
self.layer = nn.ModuleList([CaptionBertLayer(config) for _ in range(config.num_hidden_layers)])
self.config = config
def forward(self, hidden_states, attention_mask, head_mask=None,
encoder_history_states=None):
for i, layer_module in enumerate(self.layer):
history_state = None if encoder_history_states is None else encoder_history_states[i] # default None
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i],
history_state)
hidden_states = layer_outputs[0]
if i == self.config.num_hidden_layers - 1:
slang_attention_score = layer_outputs[1]
outputs = (hidden_states, slang_attention_score)
return outputs
class BertImgModel(nn.Module):
""" Expand from BertModel to handle image region features as input
"""
def __init__(self, config):
super(BertImgModel, self).__init__()
self.config = config
# self.vis_pos_embeds = VisPosEmbeddings(config)
self.encoder = CaptionBertEncoder(config)
def forward(self, input_x, attention_mask=None):
extended_attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
head_mask = [None] * self.config.num_hidden_layers
''' positional encodings '''
# input_x = self.vis_pos_embeds(input_x)
''' pass to the Transformer layers '''
encoder_outputs = self.encoder(input_x,
extended_attention_mask, head_mask=head_mask)
outputs = (encoder_outputs[0],) + encoder_outputs[1:]
return outputs
class WaypointBert(nn.Module):
"""
Modified from BertForMultipleChoice to support oscar training.
"""
def __init__(self, config=None):
super(WaypointBert, self).__init__()
self.config = config
self.bert = BertImgModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_x, attention_mask=None):
outputs = self.bert(input_x, attention_mask=attention_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
return sequence_output | 8,306 | 37.281106 | 112 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .file_utils import cached_path
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
TF_WEIGHTS_NAME = 'model.ckpt'
try:
from torch.nn import Identity
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input):
return input
if not six.PY2:
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = ''.join(docstr) + fn.__doc__
return fn
return docstring_decorator
else:
# Not possible to update class docstrings on python2
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
return fn
return docstring_decorator
class PretrainedConfig(object):
""" Base class for all configuration classes.
Handle a few common parameters and methods for loading/downloading/saving configurations.
"""
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
self.finetuning_task = kwargs.pop('finetuning_task', None)
self.num_labels = kwargs.pop('num_labels', 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.torchscript = kwargs.pop('torchscript', False)
def save_pretrained(self, save_directory):
""" Save a configuration object to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
r""" Instantiate a PretrainedConfig from a pre-trained model configuration.
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model configuration to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a saved configuration `file`.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**return_unused_kwargs**: (`optional`) bool:
- If False, then this function returns just the final configuration object.
- If True, then this functions returns a tuple `(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes:
ie the part of kwargs which has not been used to update `config` and is otherwise ignored.
**kwargs**: (`optional`) dict:
Dictionary of key/value pairs with which to update the configuration object after loading.
- The values in kwargs of any keys which are configuration attributes will be used
to override the loaded values.
- Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the `return_unused_kwargs` keyword parameter.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from S3 and cache.
>>> config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
>>> config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
>>> config = BertConfig.from_pretrained('bert-base-uncased', output_attention=True, foo=False)
>>> assert config.output_attention == True
>>> config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attention=True,
>>> foo=False, return_unused_kwargs=True)
>>> assert config.output_attention == True
>>> assert unused_kwargs == {'foo': False}
"""
cache_dir = kwargs.pop('cache_dir', None)
return_unused_kwargs = kwargs.pop('return_unused_kwargs', False)
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
config_file = cls.pretrained_config_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
else:
config_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_config_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained model configuration file.".format(
config_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_config_archive_map.keys()),
config_file))
return None
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(
config_file, resolved_config_file))
# Load config
config = cls.from_json_file(resolved_config_file)
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", config)
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class PreTrainedModel(nn.Module):
""" Base class for all models. Handle loading/storing model config and
a simple interface for dowloading and loading pretrained models.
"""
config_class = PretrainedConfig
pretrained_model_archive_map = {}
load_tf_weights = lambda model, config, path: None
base_model_prefix = ""
input_embeddings = None
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedModel, self).__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
# Save config in model
self.config = config
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self.init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def _tie_or_clone_weights(self, first_module, second_module):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
first_module.weight = nn.Parameter(second_module.weight.clone())
else:
first_module.weight = second_module.weight
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: does nothing and just returns a pointer to the input tokens Embedding Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embedding Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
if hasattr(self, 'tie_weights'):
self.tie_weights()
return model_embeds
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Args:
heads_to_prune: dict of {layer_num (int): list of heads to prune in this layer (list of int)}
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model with its configuration file to a directory, so that it
can be re-loaded using the `from_pretrained(save_directory)` class method.
"""
assert os.path.isdir(save_directory), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model it-self if we are using distributed training
model_to_save = self.module if hasattr(self, 'module') else self
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using `model.eval()` (Dropout modules are desactivated)
To train the model, you should first set it back in training mode with `model.train()`
Params:
**pretrained_model_name_or_path**: either:
- a string with the `shortcut name` of a pre-trained model to load from cache
or download and cache if not already stored in cache (e.g. 'bert-base-uncased').
- a path to a `directory` containing a configuration file saved
using the `save_pretrained(save_directory)` method.
- a path or url to a tensorflow index checkpoint `file` (e.g. `./tf_model/model.ckpt.index`).
In this case, ``from_tf`` should be set to True and a configuration object should be
provided as `config` argument. This loading option is slower than converting the TensorFlow
checkpoint in a PyTorch model using the provided conversion scripts and loading
the PyTorch model afterwards.
**model_args**: (`optional`) Sequence:
All remaning positional arguments will be passed to the underlying model's __init__ function
**config**: an optional configuration for the model to use instead of an automatically loaded configuation.
Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with a `shortcut name` of a pre-trained model), or
- the model was saved using the `save_pretrained(save_directory)` (loaded by suppling the save directory).
**state_dict**: an optional state dictionnary for the model to use instead of a state dictionary loaded
from saved weights file.
This option can be used if you want to create a model from a pretrained configuraton but load your own weights.
In this case though, you should check if using `save_pretrained(dir)` and `from_pretrained(save_directory)` is not
a simpler option.
**cache_dir**: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
**output_loading_info**: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
**kwargs**: (`optional`) dict:
Dictionary of key, values to update the configuration object after loading.
Can be used to override selected configuration parameters. E.g. ``output_attention=True``.
- If a configuration is provided with `config`, **kwargs will be directly passed
to the underlying model's __init__ method.
- If a configuration is not provided, **kwargs will be first passed to the pretrained
model configuration class loading function (`PretrainedConfig.from_pretrained`).
Each key of **kwargs that corresponds to a configuration attribute
will be used to override said attribute with the supplied **kwargs value.
Remaining keys that do not correspond to any configuration attribute will
be passed to the underlying model's __init__ function.
Examples::
>>> model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
>>> model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
>>> model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
>>> assert model.config.output_attention == True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
>>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop('config', None)
state_dict = kwargs.pop('state_dict', None)
cache_dir = kwargs.pop('cache_dir', None)
from_tf = kwargs.pop('from_tf', False)
output_loading_info = kwargs.pop('output_loading_info', False)
# Load config
if config is None:
config, model_kwargs = cls.config_class.from_pretrained(
pretrained_model_name_or_path, *model_args,
cache_dir=cache_dir, return_unused_kwargs=True,
**kwargs
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
else:
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
if from_tf:
# Directly load from a TensorFlow checkpoint
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = pretrained_model_name_or_path
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
except EnvironmentError:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
logger.error(
"Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file))
else:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(cls.pretrained_model_archive_map.keys()),
archive_file))
return None
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
state_dict = torch.load(resolved_archive_file, map_location='cpu')
if from_tf:
# Directly load from a TensorFlow checkpoint
return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# Load from a PyTorch state_dict
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ''
model_to_load = model
if not hasattr(model, cls.base_model_prefix) and any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
start_prefix = cls.base_model_prefix + '.'
if hasattr(model, cls.base_model_prefix) and not any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()):
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
print(" Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
print(" Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
model.__class__.__name__, "\n\t".join(error_msgs)))
if hasattr(model, 'tie_weights'):
model.tie_weights() # make sure word embedding weights are still tied
# Set model in evaluation mode to desactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
return model, loading_info
return model
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class PoolerStartLogits(nn.Module):
""" Compute SQuAD start_logits from sequence hidden states. """
def __init__(self, config):
super(PoolerStartLogits, self).__init__()
self.dense = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, p_mask=None):
""" Args:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
invalid position mask such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
x = self.dense(hidden_states).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerEndLogits(nn.Module):
""" Compute SQuAD end_logits from sequence hidden states and start token hidden state.
"""
def __init__(self, config):
super(PoolerEndLogits, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense_1 = nn.Linear(config.hidden_size, 1)
def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
""" Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to hidden_states
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span:
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
"""
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
slen, hsz = hidden_states.shape[-2:]
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz)
start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz)
x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
x = self.activation(x)
x = self.LayerNorm(x)
x = self.dense_1(x).squeeze(-1)
if p_mask is not None:
x = x * (1 - p_mask) - 1e30 * p_mask
return x
class PoolerAnswerClass(nn.Module):
""" Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
def __init__(self, config):
super(PoolerAnswerClass, self).__init__()
self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.activation = nn.Tanh()
self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
"""
Args:
One of ``start_states``, ``start_positions`` should be not None.
If both are set, ``start_positions`` overrides ``start_states``.
**start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
hidden states of the first tokens for the labeled span.
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
note(Original repo):
no dependency on end_feature so that we can obtain one single `cls_logits`
for each sample
"""
hsz = hidden_states.shape[-1]
assert start_states is not None or start_positions is not None, "One of start_states, start_positions should be not None"
if start_positions is not None:
start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz)
if cls_index is not None:
cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz)
cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz)
else:
cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz)
x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
x = self.activation(x)
x = self.dense_1(x).squeeze(-1)
return x
class SQuADHead(nn.Module):
r""" A SQuAD head inspired by XLNet.
Parameters:
config (:class:`~pytorch_transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Inputs:
**hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
hidden states of sequence tokens
**start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the first token for the labeled span.
**end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
position of the last token for the labeled span.
**cls_index**: torch.LongTensor of shape ``(batch_size,)``
position of the CLS token. If None, take the last token.
**is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
Whether the question has a possible answer in the paragraph or not.
**p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
1.0 means token should be masked.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
**start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
**start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
Indices for the top config.start_n_top start token possibilities (beam-search).
**end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
**cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
``torch.FloatTensor`` of shape ``(batch_size,)``
Log probabilities for the ``is_impossible`` label of the answers.
"""
def __init__(self, config):
super(SQuADHead, self).__init__()
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
def forward(self, hidden_states, start_positions=None, end_positions=None,
cls_index=None, is_impossible=None, p_mask=None):
outputs = ()
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(start_log_probs, self.start_n_top, dim=-1) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(start_states) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(end_log_probs, self.end_n_top, dim=1) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs
class SequenceSummary(nn.Module):
r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
Args of the config class:
summary_type:
- 'last' => [default] take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'token_ids' => supply a Tensor of classification token indices (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj: Add a projection after the vector extraction
summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
summary_first_dropout: Add a dropout before the projection and activation
summary_last_dropout: Add a dropout after the projection and activation
"""
def __init__(self, config):
super(SequenceSummary, self).__init__()
self.summary_type = config.summary_type if hasattr(config, 'summary_use_proj') else 'last'
if config.summary_type == 'attn':
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.summary = Identity()
if hasattr(config, 'summary_use_proj') and config.summary_use_proj:
if hasattr(config, 'summary_proj_to_labels') and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = nn.Linear(config.hidden_size, num_classes)
self.activation = Identity()
if hasattr(config, 'summary_activation') and config.summary_activation == 'tanh':
self.activation = nn.Tanh()
self.first_dropout = Identity()
if hasattr(config, 'summary_first_dropout') and config.summary_first_dropout > 0:
self.first_dropout = nn.Dropout(config.summary_first_dropout)
self.last_dropout = Identity()
if hasattr(config, 'summary_last_dropout') and config.summary_last_dropout > 0:
self.last_dropout = nn.Dropout(config.summary_last_dropout)
def forward(self, hidden_states, token_ids=None):
""" hidden_states: float Tensor in shape [bsz, seq_len, hidden_size], the hidden-states of the last layer.
token_ids: [optional] index of the classification token if summary_type == 'token_ids',
shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
if summary_type == 'token_ids' and token_ids is None:
we take the last token of the sequence as classification token
"""
if self.summary_type == 'last':
output = hidden_states[:, -1]
elif self.summary_type == 'first':
output = hidden_states[:, 0]
elif self.summary_type == 'mean':
output = hidden_states.mean(dim=1)
elif self.summary_type == 'token_ids':
if token_ids is None:
token_ids = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2]-1, dtype=torch.long)
else:
token_ids = token_ids.unsqueeze(-1).unsqueeze(-1)
token_ids = token_ids.expand((-1,) * (token_ids.dim()-1) + (hidden_states.size(-1),))
# shape of token_ids: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = hidden_states.gather(-2, token_ids).squeeze(-2) # shape (bsz, XX, hidden_size)
elif self.summary_type == 'attn':
raise NotImplementedError
output = self.first_dropout(output)
output = self.summary(output)
output = self.activation(output)
output = self.last_dropout(output)
return output
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__))
| 44,611 | 48.513873 | 157 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/modeling_bert.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import (WEIGHTS_NAME, CONFIG_NAME, PretrainedConfig, PreTrainedModel,
prune_linear_layer, add_start_docstrings)
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
}
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
}
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(PretrainedConfig):
r"""
:class:`~pytorch_transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size_or_config_json_file=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.key = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.value = nn.Linear(config.hidden_size, self.all_head_size) # [768 * 768]
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
for head in heads:
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
def forward(self, input_tensor, attention_mask, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size,
config.vocab_size,
bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~pytorch_transformers.BertConfig`): Model configuration class with all the parameters of the model.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1[``.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare Bert Model transformer outputing raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertModel(BertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertModel(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForPreTraining(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMaskedLM(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, masked_lm_labels=input_ids)
>>> loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMaskedLM, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config)
self.apply(self.init_weights)
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention is they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForNextSentencePrediction(BertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForNextSentencePrediction(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids)
>>> seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(BertForNextSentencePrediction, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForSequenceClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING)
class BertForMultipleChoice(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForMultipleChoice(config)
>>> choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
>>> input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
>>> labels = torch.tensor(1).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForMultipleChoice, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
num_choices = input_ids.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
outputs = self.bert(flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForTokenClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForTokenClassification(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
>>> outputs = model(input_ids, labels=labels)
>>> loss, scores = outputs[:2]
"""
def __init__(self, config):
super(BertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class BertForQuestionAnswering(BertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
>>> config = BertConfig.from_pretrained('bert-base-uncased')
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>>
>>> model = BertForQuestionAnswering(config)
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss, start_scores, end_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.apply(self.init_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None,
end_positions=None, position_ids=None, head_mask=None):
outputs = self.bert(input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
attention_mask=attention_mask, head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| 67,047 | 52.382166 | 187 | py |
Discrete-Continuous-VLN | Discrete-Continuous-VLN-main/waypoint_prediction/transformer/pytorch_transformer/file_utils.py | """
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
import sys
import json
import logging
import os
import shutil
import tempfile
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_transformers')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_PRETRAINED_BERT_CACHE = Path(
os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
default_cache_path)
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if sys.version_info[0] == 2 and not isinstance(cache_dir, str):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
if sys.version_info[0] == 2 and etag is not None:
etag = etag.decode('utf-8')
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
if sys.version_info[0] == 2 and isinstance(output_string, str):
output_string = unicode(output_string, 'utf-8') # The beauty of python 2
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
| 8,876 | 33.142308 | 98 | py |
Synthetic2Realistic | Synthetic2Realistic-master/test.py | import os
from options.test_options import TestOptions
from dataloader.data_loader import dataloader
from model.models import create_model
from util.visualizer import Visualizer
from util import html
opt = TestOptions().parse()
dataset = dataloader(opt)
dataset_size = len(dataset) * opt.batchSize
print ('testing images = %d ' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
web_dir = os.path.join(opt.results_dir,opt.name, '%s_%s' %(opt.phase, opt.which_epoch))
web_page = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# testing
for i,data in enumerate(dataset):
model.set_input(data)
model.test()
model.save_results(visualizer, web_page) | 737 | 29.75 | 113 | py |
Synthetic2Realistic | Synthetic2Realistic-master/train.py | import time
from options.train_options import TrainOptions
from dataloader.data_loader import dataloader
from model.models import create_model
from util.visualizer import Visualizer
opt = TrainOptions().parse()
dataset = dataloader(opt)
dataset_size = len(dataset) * opt.batchSize
print('training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
total_steps=0
for epoch in range(opt.epoch_count, opt.niter+opt.niter_decay+1):
epoch_start_time = time.time()
epoch_iter = 0
# training
for i, data in enumerate(dataset):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
model.set_input(data)
model.optimize_parameters(i)
if total_steps % opt.display_freq == 0:
if epoch >= opt.transform_epoch:
model.validation_target()
visualizer.display_current_results(model.get_current_visuals(), epoch)
if total_steps % opt.print_freq == 0:
errors = model.get_current_errors()
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
if opt.display_id > 0:
visualizer.plot_current_errors(epoch, float(epoch_iter)/dataset_size, opt, errors)
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.save_networks('latest')
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch (epoch %d, iters %d)' % (epoch, total_steps))
model.save_networks('latest')
model.save_networks(epoch)
print ('End of the epoch %d / %d \t Time Take: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time()-epoch_start_time))
model.update_learning_rate()
| 1,916 | 34.5 | 98 | py |
Synthetic2Realistic | Synthetic2Realistic-master/options/train_options.py | from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
# training epoch
self.parser.add_argument('--epoch_count', type=int, default=1,
help='the starting epoch count')
self.parser.add_argument('--niter', type=int, default=6,
help='# of iter with initial learning rate')
self.parser.add_argument('--niter_decay', type=int, default=4,
help='# of iter to decay learning rate to zero')
self.parser.add_argument('--continue_train', action='store_true',
help='continue training: load the latest model')
self.parser.add_argument('--transform_epoch', type=int, default=0,
help='# of epoch for transform learning')
self.parser.add_argument('--task_epoch', type=int, default=0,
help='# of epoch for task learning')
# learning rate and loss weight
self.parser.add_argument('--lr_policy', type=str, default='lambda',
help='learning rate policy[lambda|step|plateau]')
self.parser.add_argument('--lr_task', type=float, default=1e-4,
help='initial learning rate for adam')
self.parser.add_argument('--lr_trans', type=float, default=5e-5,
help='initial learning rate for discriminator')
self.parser.add_argument('--lambda_rec_img', type=float, default=100.0,
help='weight for image reconstruction loss')
self.parser.add_argument('--lambda_gan_img', type=float, default=1.0,
help='weight for image GAN loss')
self.parser.add_argument('--lambda_gan_feature', type=float, default=0.1,
help='weight for feature GAN loss')
self.parser.add_argument('--lambda_rec_lab', type=float, default=100.0,
help='weight for task loss')
self.parser.add_argument('--lambda_smooth', type=float, default=0.1,
help='weight for depth smooth loss')
# display the results
self.parser.add_argument('--display_freq', type=int, default=100,
help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100,
help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=5000,
help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1,
help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true',
help='do not save intermediate training results')
# others
self.parser.add_argument('--separate', action='store_true',
help='transform and task network training end-to-end or separate')
self.parser.add_argument('--pool_size', type=int, default=20,
help='the size of image buffer that stores previously generated images')
self.isTrain = True
| 3,503 | 62.709091 | 105 | py |
Synthetic2Realistic | Synthetic2Realistic-master/options/base_options.py | import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
# basic define
self.parser.add_argument('--name', type=str, default='experiment_name',
help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints',
help='models are save here')
self.parser.add_argument('--which_epoch', type=str, default='latest',
help='which epoch to load')
self.parser.add_argument('--gpu_ids', type=str, default='0',
help='gpu ids: e.g. 0, 1, 2 use -1 for CPU')
self.parser.add_argument('--model', type=str, default='wsupervised',
help='choose which model to use, [supervised] | [wsupervised]')
# data pattern define
self.parser.add_argument('--img_source_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainA_SYN10.txt',
help='training and testing dataset for source domain')
self.parser.add_argument('--img_target_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainA.txt',
help='training and testing dataser for target domain')
self.parser.add_argument('--lab_source_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainC_SYN10.txt',
help='training label for source domain')
self.parser.add_argument('--lab_target_file', type=str, default='/data/dataset/Image2Depth_SUN_NYU/trainC.txt',
help='training label for target domain')
self.parser.add_argument('--dataset_mode', type=str, default='paired',
help='chooses how datasets are loaded. [paired| unpaired]')
self.parser.add_argument('--loadSize', type=list, default=[640, 192],
help='load image into same size [256, 192]|[640, 192]')
self.parser.add_argument('--flip', action='store_true',
help='if specified, do flip the image for data augmentation')
self.parser.add_argument('--scale_rate', type=float, default=0,
help='scale images with same rate')
self.parser.add_argument('--rotation', action='store_true',
help='if specified, rotate the images for data augmentation')
self.parser.add_argument('--crop', action='store_true',
help='if specified, crop the images for data augmentation')
self.parser.add_argument('--batchSize', type=int, default=6,
help='input batch size')
self.parser.add_argument('--nThreads', type=int, default=2,
help='# threads for loading data')
self.parser.add_argument('--shuffle', action='store_true',
help='if true, takes images randomly')
# network structure define
self.parser.add_argument('--image_nc', type=int, default=3,
help='# of input image channels')
self.parser.add_argument('--label_nc', type=int, default=1,
help='# of output label channels')
self.parser.add_argument('--ngf', type=int, default=64,
help='# of encoder filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64,
help='# of discriminator filter in first conv layer')
self.parser.add_argument('--image_feature', type=int, default=512,
help='the max channels for image features')
self.parser.add_argument('--num_D', type=int, default=1,
help='# of number of the discriminator')
self.parser.add_argument('--transform_layers', type=int, default=9,
help='# of number of the down sample layers for transform network')
self.parser.add_argument('--task_layers', type=int, default=4,
help='# of number of the down sample layers for task network')
self.parser.add_argument('--image_D_layers', type=int, default=3,
help='# of number of the down layers for image discriminator')
self.parser.add_argument('--feature_D_layers', type=int, default=2,
help='# of number of the layers for features discriminator')
self.parser.add_argument('--task_model_type', type=str, default='UNet',
help='select model for task network [UNet] |[ResNet]')
self.parser.add_argument('--trans_model_type', type=str, default='ResNet',
help='select model for transform network [UNet] |[ResNet]')
self.parser.add_argument('--norm', type=str, default='batch',
help='batch normalization or instance normalization')
self.parser.add_argument('--activation', type=str, default='PReLU',
help='ReLu, LeakyReLU, PReLU, or SELU')
self.parser.add_argument('--init_type', type=str, default='kaiming',
help='network initialization [normal|xavier|kaiming]')
self.parser.add_argument('--drop_rate', type=float, default=0,
help='# of drop rate')
self.parser.add_argument('--U_weight', type=float, default=0.1,
help='weight for Unet')
# display parameter define
self.parser.add_argument('--display_winsize', type=int, default=256,
help='display window size')
self.parser.add_argument('--display_id', type=int, default=1,
help='display id of the web')
self.parser.add_argument('--display_port', type=int, default=8097,
help='visidom port of the web display')
self.parser.add_argument('--display_single_pane_ncols', type=int, default=0,
help='if positive, display all images in a single visidom web panel')
def parse(self):
if not self.initialized:
self.initialize()
self.opt=self.parser.parse_args()
self.opt.isTrain = self.isTrain
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >=0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids):
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('--------------Options--------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('----------------End----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
if self.opt.isTrain:
file_name = os.path.join(expr_dir, 'train_opt.txt')
else:
file_name = os.path.join(expr_dir, 'test_opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('--------------Options--------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('----------------End----------------\n')
return self.opt | 7,866 | 57.708955 | 125 | py |
Synthetic2Realistic | Synthetic2Realistic-master/options/__init__.py | 0 | 0 | 0 | py |
|
Synthetic2Realistic | Synthetic2Realistic-master/options/test_options.py | from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples')
self.parser.add_argument('--results_dir', type=str, default='./results/', help = 'saves results here')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.isTrain = False | 478 | 42.545455 | 110 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/image_pool.py | import random
import torch
from torch.autograd import Variable
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return Variable(images)
return_images = []
for image in images:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs += 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0,1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| 1,083 | 29.111111 | 67 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/html.py | import dominate
from dominate.tags import *
import os
class HTML:
def __init__(self, web_dir, title, reflesh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
# print(self.img_dir)
self.doc = dominate.document(title=title)
if reflesh > 0:
with self.doc.head:
meta(http_equiv="reflesh", content=str(reflesh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style="table-layout: fixed;")
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=400):
self.add_table()
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % width, src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__':
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims = []
txts = []
links = []
for n in range(4):
ims.append('image_%d.png' % n)
txts.append('text_%d' % n)
links.append('image_%d.png' % n)
html.add_images(ims, txts, links)
html.save()
| 1,912 | 28.430769 | 95 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/task.py | import torch
import torch.nn.functional as F
###################################################################
# depth function
###################################################################
# calculate the loss
def rec_loss(pred, truth):
mask = truth == -1
mask = mask.float()
errors = torch.abs(pred - truth) * (1.0-mask)
# batch_max = 0.2 * torch.max(errors).data[0]
batch_max = 0.0 * torch.max(errors).item()
if batch_max == 0:
return torch.mean(errors)
errors_mask = errors < batch_max
errors_mask = errors_mask.float()
sqerrors = (errors ** 2 + batch_max*batch_max) / (2*batch_max)
return torch.mean(errors*errors_mask + sqerrors*(1-errors_mask))
def scale_pyramid(img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(1, num_scales):
ratio = 2**i
nh = h // ratio
nw = w // ratio
scaled_img = F.upsample(img, size=(nh, nw), mode='bilinear', align_corners=True)
scaled_imgs.append(scaled_img)
scaled_imgs.reverse()
return scaled_imgs
def gradient_x(img):
gx = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx
def gradient_y(img):
gy = img[:, :, :, :-1] - img[:, :, :, 1:]
return gy
# calculate the gradient loss
def get_smooth_weight(depths, Images, num_scales):
depth_gradient_x = [gradient_x(d) for d in depths]
depth_gradient_y = [gradient_y(d) for d in depths]
Image_gradient_x = [gradient_x(img) for img in Images]
Image_gradient_y = [gradient_y(img) for img in Images]
weight_x = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in Image_gradient_x]
weight_y = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in Image_gradient_y]
smoothness_x = [depth_gradient_x[i] * weight_x[i] for i in range(num_scales)]
smoothness_y = [depth_gradient_y[i] * weight_y[i] for i in range(num_scales)]
loss_x = [torch.mean(torch.abs(smoothness_x[i]))/2**i for i in range(num_scales)]
loss_y = [torch.mean(torch.abs(smoothness_y[i]))/2**i for i in range(num_scales)]
return sum(loss_x+loss_y) | 2,150 | 27.302632 | 96 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/visualizer.py | import numpy as np
import os
import ntpath
import time
from . import util
from . import html
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
if self.display_id > 0:
import visdom
self.vis = visdom.Visdom(port = opt.display_port)
self.display_single_pane_ncols = opt.display_single_pane_ncols
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch):
if self.display_id > 0: # show images in the browser
if self.display_single_pane_ncols > 0:
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}
table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h)
ncols = self.display_single_pane_ncols
title = self.name
label_html = ''
label_html_row = ''
nrows = int(np.ceil(len(visuals.items()) / ncols))
images = []
idx = 0
for label, image_numpy in visuals.items():
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1]))*255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
# pane col = image row
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win = self.display_id + 2,
opts=dict(title=title + ' labels'))
else:
idx = 1
for label, image_numpy in visuals.items():
#image_numpy = np.flipud(image_numpy)
self.vis.image(image_numpy.transpose([2,0,1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html: # save images to a html file
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X':[],'Y':[], 'legend':list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])]*len(self.plot_data['legend']),1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
# save image to the disk
def save_images(self, webpage, visuals, image_path):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
| 6,117 | 42.7 | 96 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/util.py | import numpy as np
import os
import imageio
# convert a tensor into a numpy array
def tensor2im(image_tensor, bytes=255.0, imtype=np.uint8):
if image_tensor.dim() == 3:
image_numpy = image_tensor.cpu().float().numpy()
else:
image_numpy = image_tensor[0].cpu().float().numpy()
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * bytes
return image_numpy.astype(imtype)
def save_image(image_numpy, image_path):
if image_numpy.shape[2] == 1:
image_numpy = image_numpy.reshape(image_numpy.shape[0], image_numpy.shape[1])
imageio.imwrite(image_path, image_numpy)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path) | 864 | 27.833333 | 85 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/__init__.py | 0 | 0 | 0 | py |
|
Synthetic2Realistic | Synthetic2Realistic-master/util/evaluation.py | import argparse
from data_kitti import *
parser = argparse.ArgumentParser(description='Evaluation ont the dataset')
parser.add_argument('--split', type=str, default='eigen', help='data split')
parser.add_argument('--predicted_depth_path', type=str, default='../dataset/KITTI31_predicted_lsgan/', help='path to estimated depth')
parser.add_argument('--gt_path', type = str, default='/data/dataset/KITTI/',
help = 'path to original kitti dataset /data/dataset/NYU_Test/testB')
parser.add_argument('--file_path', type = str, default='../datasplit/', help = 'path to datasplit files')
parser.add_argument('--save_path', type = str, default='/home/asus/lyndon/program/data/Image2Depth_31_KITTI/', help='path to save the train and test dataset')
parser.add_argument('--min_depth', type=float, default=1, help='minimun depth for evaluation')
parser.add_argument('--max_depth', type=float, default=50, help='maximun depth for evaluation, indoor 8.0, outdoor 50')
parser.add_argument('--normize_depth', type=float, default=80, help='depth normalization value, indoor 8.0, outdoor 80 (training scale)')
parser.add_argument('--eigen_crop',action='store_true', help='if set, crops according to Eigen NIPS14')
parser.add_argument('--garg_crop', action='store_true', help='if set, crops according to Garg ECCV16')
args = parser.parse_args()
if __name__ == "__main__":
predicted_depths = load_depth(args.predicted_depth_path,args.split, args.normize_depth)
if args.split == 'indoor':
ground_truths = load_depth(args.gt_path, args.split, 10)
num_samples = len(ground_truths)
elif args.split == 'eigen':
test_files = read_text_lines(args.file_path + 'eigen_test_files.txt')
gt_files, gt_calib, im_sizes, im_files, cams = read_file_data(test_files, args.gt_path)
num_samples = len(im_files)
ground_truths = []
for t_id in range(num_samples):
camera_id = cams[t_id]
depth = generate_depth_map(gt_calib[t_id], gt_files[t_id], im_sizes[t_id], camera_id, False, True)
ground_truths.append(depth.astype(np.float32))
depth = cv2.resize(predicted_depths[t_id],(im_sizes[t_id][1], im_sizes[t_id][0]),interpolation=cv2.INTER_LINEAR)
predicted_depths[t_id] = depth
abs_rel = np.zeros(num_samples, np.float32)
sq_rel = np.zeros(num_samples,np.float32)
rmse = np.zeros(num_samples,np.float32)
rmse_log = np.zeros(num_samples,np.float32)
a1 = np.zeros(num_samples,np.float32)
a2 = np.zeros(num_samples,np.float32)
a3 = np.zeros(num_samples,np.float32)
for i in range(len(ground_truths)):
# for i in range(1):
ground_depth = ground_truths[i]
predicted_depth = predicted_depths[i]
# print(ground_depth.max(),ground_depth.min())
# print(predicted_depth.max(),predicted_depth.min())
# depth_predicted = (predicted_depth / 7) * 255
# depth_predicted = Image.fromarray(depth_predicted.astype(np.uint8))
# depth_predicted.save(os.path.join('/home/asus/lyndon/program/Image2Depth/results/predicted_depth/', str(i)+'.png'))
# depth = (depth / 80) * 255
# depth = Image.fromarray(depth.astype(np.uint8))
# depth.save(os.path.join('/data/result/syn_real_result/KITTI/ground_truth/{:05d}.png'.format(t_id)))
predicted_depth[predicted_depth < args.min_depth] = args.min_depth
predicted_depth[predicted_depth > args.max_depth] = args.max_depth
if args.split == 'indoor':
ground_depth = ground_depth[12:468, 16:624]
height, width = ground_depth.shape
predicted_depth = cv2.resize(predicted_depth,(width,height),interpolation=cv2.INTER_LINEAR)
mask = np.logical_and(ground_depth > args.min_depth, ground_depth < args.max_depth)
elif args.split == 'eigen':
height, width = ground_depth.shape
mask = np.logical_and(ground_depth > args.min_depth, ground_depth < args.max_depth)
# crop used by Garg ECCV16
if args.garg_crop:
crop = np.array([0.40810811 * height, 0.99189189 * height,
0.03594771 * width, 0.96405229 * width]).astype(np.int32)
# crop we found by trail and error to reproduce Eigen NIPS14 results
elif args.eigen_crop:
crop = np.array([0.3324324 * height, 0.91351351 * height,
0.0359477 * width, 0.96405229 * width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], a1[i], a2[i], a3[i] = compute_errors(ground_depth[mask],predicted_depth[mask])
print('{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'
.format(i, abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], a1[i], a2[i], a3[i]))
print ('{:>10},{:>10},{:>10},{:>10},{:>10},{:>10},{:>10}'.format('abs_rel','sq_rel','rmse','rmse_log','a1','a2','a3'))
print ('{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f},{:10.4f}'
.format(abs_rel.mean(),sq_rel.mean(),rmse.mean(),rmse_log.mean(),a1.mean(),a2.mean(),a3.mean())) | 5,327 | 49.742857 | 158 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/visual_result.py | import matplotlib.pyplot as plt
import sys,os
sys.path.append('/home/asus/lyndon/program/Image2Depth/dataloader')
from dataloader.image_folder import make_dataset
import numpy as np
import scipy.misc
dataRoot = '/data/dataset/Image2Depth31_KITTI/testB'
dispairtyRoot = '/data/result/disparities_eigen_godard/disparities.npy'
depthSave = '/data/result/KITTI/Godard17'
width_to_focal = dict()
width_to_focal[1242] = 721.5377
width_to_focal[1241] = 718.856
width_to_focal[1224] = 707.0493
width_to_focal[1238] = 718.3351
# dataset, _ = make_dataset(dataRoot)
#
# for data in dataset:
# output_name = os.path.splitext(os.path.basename(data))[0]
# depth = ImageOps.invert(Image.open(data))
# plt.imsave(os.path.join(depthSave, "{}.png".format(output_name)), depth, cmap='plasma')
dispairties = np.load(dispairtyRoot)
num, height, width = dispairties.shape
i = 0
for dispairty in dispairties:
i += 1
# dispairty = dispairty * dispairty.shape[1]
# depth_pred = width_to_focal[1224] * 0.54 / dispairty
# depth_pred[np.isinf(depth_pred)] = 0
# depth_pred[depth_pred > 80] = 80
# depth_pred[depth_pred < 1e-3] = 1e-3
depth_to_img = scipy.misc.imresize(dispairty, [375, 1242])
plt.imsave(os.path.join(depthSave, "{}_disp.png".format(i)), depth_to_img, cmap='plasma')
# depth = width_to_focal[1224] * 0.54 / dispairty
| 1,366 | 28.717391 | 93 | py |
Synthetic2Realistic | Synthetic2Realistic-master/util/data_kitti.py | import numpy as np
import os
import cv2
from collections import Counter
from scipy.interpolate import LinearNDInterpolator
from PIL import Image
from dataloader.image_folder import make_dataset
def compute_errors(ground_truth, predication):
# accuracy
threshold = np.maximum((ground_truth / predication),(predication / ground_truth))
a1 = (threshold < 1.25 ).mean()
a2 = (threshold < 1.25 ** 2 ).mean()
a3 = (threshold < 1.25 ** 3 ).mean()
#MSE
rmse = (ground_truth - predication) ** 2
rmse = np.sqrt(rmse.mean())
#MSE(log)
rmse_log = (np.log(ground_truth) - np.log(predication)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
# Abs Relative difference
abs_rel = np.mean(np.abs(ground_truth - predication) / ground_truth)
# Squared Relative difference
sq_rel = np.mean(((ground_truth - predication) ** 2) / ground_truth)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
def load_depth(file_path, split, max_depth):
depths = []
dataset, _ = make_dataset(file_path)
for data in dataset:
depth = Image.open(data)
if split == 'eigen':
depth = np.array(depth)#[:,:,0]
depth = depth.astype(np.float32) / 255 * max_depth
elif split == 'indoor':
depth = np.array(depth)#[:,:,0]
depth = depth.astype(np.float32) / 255 * max_depth
# depth = (depth - depth.mean()) / depth.std()
depths.append(depth)
return depths
def extract_train_file(args):
if args.split == 'eigen':
train_files = read_text_lines(args.file_path+'eigen_train_files.txt')
gt_files, gt_calib, im_sizes, im_files, cams = read_file_data(train_files,args.data_path)
num_files = len(im_files)
for t_id in range(num_files):
image_name = '%010d.jpg' % (t_id)
depth_path = os.path.join(args.save_path,'trainB/', image_name)
image_path = os.path.join(args.save_path,'trainA/', image_name)
image_name = ''
camera_id = cams[t_id]
depth, depth_interp = generate_depth_map(gt_calib[t_id], gt_files[t_id], im_sizes[t_id], camera_id, True, True)
depth_img = Image.fromarray(np.uint8(depth_interp/80*255))
depth_img.save(depth_path)
rgb_img = cv2.imread(im_files[t_id])
cv2.imwrite(image_path,rgb_img)
###############################################################################
####################### EIGEN
def read_text_lines(file_path):
f = open(file_path, 'r')
lines = f.readlines()
f.close()
lines = [l.rstrip() for l in lines]
return lines
def read_file_data(files, data_root):
gt_files = []
gt_calib = []
im_sizes = []
im_files = []
cams = []
num_probs = 0
for filename in files:
filename = filename.split()[0]
splits = filename.split('/')
camera_id = np.int32(splits[2][-1:]) # 2 is left, 3 is right
date = splits[0]
im_id = splits[4][:10]
file_root = '{}/{}'
im = filename
vel = '{}/{}/velodyne_points/data/{}.bin'.format(splits[0], splits[1], im_id)
if os.path.isfile(data_root + im):
gt_files.append(data_root + vel)
gt_calib.append(data_root + date + '/')
im_sizes.append(cv2.imread(data_root + im).shape[:2])
im_files.append(data_root + im)
cams.append(2)
else:
num_probs += 1
print('{} missing'.format(data_root + im))
print (num_probs, 'files missing')
return gt_files, gt_calib, im_sizes, im_files, cams
def load_velodyne_points(file_name):
# adapted from https://github.com/hunse/kitti
points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
points[:, 3] = 1.0 # homogeneous
return points
def lin_interp(shape, xyd):
# taken from https://github.com/hunse/kitti
m, n = shape
ij, d = xyd[:, 1::-1], xyd[:, 2]
f = LinearNDInterpolator(ij, d, fill_value=0)
J, I = np.meshgrid(np.arange(n), np.arange(m))
IJ = np.vstack([I.flatten(), J.flatten()]).T
disparity = f(IJ).reshape(shape)
return disparity
def read_calib_file(path):
# taken from https://github.com/hunse/kitti
float_chars = set("0123456789.e+- ")
data = {}
with open(path, 'r') as f:
for line in f.readlines():
key, value = line.split(':', 1)
value = value.strip()
data[key] = value
if float_chars.issuperset(value):
# try to cast to float array
try:
data[key] = np.array(list(map(float, value.split(' '))))
except ValueError:
# casting error: data[key] already eq. value, so pass
pass
return data
def get_focal_length_baseline(calib_dir, cam):
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
P2_rect = cam2cam['P_rect_02'].reshape(3, 4)
P3_rect = cam2cam['P_rect_03'].reshape(3, 4)
# cam 2 is left of camera 0 -6cm
# cam 3 is to the right +54cm
b2 = P2_rect[0, 3] / -P2_rect[0, 0]
b3 = P3_rect[0, 3] / -P3_rect[0, 0]
baseline = b3 - b2
if cam == 2:
focal_length = P2_rect[0, 0]
elif cam == 3:
focal_length = P3_rect[0, 0]
return focal_length, baseline
def sub2ind(matrixSize, rowSub, colSub):
m, n = matrixSize
return rowSub * (n - 1) + colSub - 1
def generate_depth_map(calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False):
# load calibration files
cam2cam = read_calib_file(calib_dir + 'calib_cam_to_cam.txt')
velo2cam = read_calib_file(calib_dir + 'calib_velo_to_cam.txt')
# print (velo2cam)
velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))
velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))
# compute projection matrix velodyne->image plane
R_cam2rect = np.eye(4)
R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)
P_rect = cam2cam['P_rect_0' + str(cam)].reshape(3, 4)
P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)
# load velodyne points and remove all behind image plane (approximation)
# each row of the velodyne data is forward, left, up, reflectance
velo = load_velodyne_points(velo_file_name)
velo = velo[velo[:, 0] >= 0, :]
# project the points to the camera
velo_pts_im = np.dot(P_velo2im, velo.T).T
velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]
if vel_depth:
velo_pts_im[:, 2] = velo[:, 0]
# check if in bounds
# use minus 1 to get the exact same value as KITTI matlab code
velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
velo_pts_im = velo_pts_im[val_inds, :]
# project to image
depth = np.zeros((im_shape))
depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]
# find the duplicate points and choose the closest depth
inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
for dd in dupe_inds:
pts = np.where(inds == dd)[0]
x_loc = int(velo_pts_im[pts[0], 0])
y_loc = int(velo_pts_im[pts[0], 1])
depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
depth[depth < 0] = 0
if interp:
# interpolate the depth map to fill in holes
depth_interp = lin_interp(im_shape, velo_pts_im)
return depth, depth_interp
else:
return depth | 7,828 | 33.337719 | 123 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/base_model.py | import os
import torch
from collections import OrderedDict
from util import util
class BaseModel():
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
self.loss_names = []
self.model_names = []
self.visual_names = []
self.image_paths = []
def set_input(self, input):
self.input = input
# update learning rate
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]['lr']
print('learning rate = %.7f' % lr)
# return training loss
def get_current_errors(self):
errors_ret = OrderedDict()
for name in self.loss_names:
if isinstance(name, str):
errors_ret[name] = getattr(self, 'loss_' + name).item()
return errors_ret
# return visualization images
def get_current_visuals(self):
visual_ret = OrderedDict()
for name in self.visual_names:
if isinstance(name, str):
value = getattr(self, name)
if isinstance(value, list):
visual_ret[name] = util.tensor2im(value[-1].data)
else:
visual_ret[name] = util.tensor2im(value.data)
return visual_ret
# save models
def save_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_' + name)
torch.save(net.cpu().state_dict(), save_path)
if len(self.gpu_ids) > 0 and torch.cuda.is_available():
net.cuda()
# load models
def load_networks(self, which_epoch):
for name in self.model_names:
if isinstance(name, str):
save_filename = '%s_net_%s.pth' % (which_epoch, name)
save_path = os.path.join(self.save_dir, save_filename)
net = getattr(self, 'net_'+name)
net.load_state_dict(torch.load(save_path))
if not self.isTrain:
net.eval() | 2,424 | 33.642857 | 71 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/network.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
from torch.optim import lr_scheduler
######################################################################################
# Functions
######################################################################################
def get_norm_layer(norm_type='batch'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_nonlinearity_layer(activation_type='PReLU'):
if activation_type == 'ReLU':
nonlinearity_layer = nn.ReLU(True)
elif activation_type == 'SELU':
nonlinearity_layer = nn.SELU(True)
elif activation_type == 'LeakyReLU':
nonlinearity_layer = nn.LeakyReLU(0.1, True)
elif activation_type == 'PReLU':
nonlinearity_layer = nn.PReLU()
else:
raise NotImplementedError('activation layer [%s] is not found' % activation_type)
return nonlinearity_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch+1+1+opt.epoch_count-opt.niter) / float(opt.niter_decay+1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'exponent':
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=0.95)
else:
raise NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.uniform_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('total number of parameters: %.3f M' % (num_params / 1e6))
def init_net(net, init_type='normal', gpu_ids=[]):
print_network(net)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net = torch.nn.DataParallel(net, gpu_ids)
net.cuda()
init_weights(net, init_type)
return net
def _freeze(*args):
for module in args:
if module:
for p in module.parameters():
p.requires_grad = False
def _unfreeze(*args):
for module in args:
if module:
for p in module.parameters():
p.requires_grad = True
# define the generator(transform, task) network
def define_G(input_nc, output_nc, ngf=64, layers=4, norm='batch', activation='PReLU', model_type='UNet',
init_type='xavier', drop_rate=0, add_noise=False, gpu_ids=[], weight=0.1):
if model_type == 'ResNet':
net = _ResGenerator(input_nc, output_nc, ngf, layers, norm, activation, drop_rate, add_noise, gpu_ids)
elif model_type == 'UNet':
net = _UNetGenerator(input_nc, output_nc, ngf, layers, norm, activation, drop_rate, add_noise, gpu_ids, weight)
# net = _PreUNet16(input_nc, output_nc, ngf, layers, True, norm, activation, drop_rate, gpu_ids)
else:
raise NotImplementedError('model type [%s] is not implemented', model_type)
return init_net(net, init_type, gpu_ids)
# define the discriminator network
def define_D(input_nc, ndf = 64, n_layers = 3, num_D = 1, norm = 'batch', activation = 'PReLU', init_type='xavier', gpu_ids = []):
net = _MultiscaleDiscriminator(input_nc, ndf, n_layers, num_D, norm, activation, gpu_ids)
return init_net(net, init_type, gpu_ids)
# define the feature discriminator network
def define_featureD(input_nc, n_layers=2, norm='batch', activation='PReLU', init_type='xavier', gpu_ids=[]):
net = _FeatureDiscriminator(input_nc, n_layers, norm, activation, gpu_ids)
return init_net(net, init_type, gpu_ids)
######################################################################################
# Basic Operation
######################################################################################
class GaussianNoiseLayer(nn.Module):
def __init__(self):
super(GaussianNoiseLayer, self).__init__()
def forward(self, x):
if self.training == False:
return x
noise = Variable((torch.randn(x.size()).cuda(x.data.get_device()) - 0.5) / 10.0)
return x+noise
class _InceptionBlock(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), width=1, drop_rate=0, use_bias=False):
super(_InceptionBlock, self).__init__()
self.width = width
self.drop_rate = drop_rate
for i in range(width):
layer = nn.Sequential(
nn.ReflectionPad2d(i*2+1),
nn.Conv2d(input_nc, output_nc, kernel_size=3, padding=0, dilation=i*2+1, bias=use_bias)
)
setattr(self, 'layer'+str(i), layer)
self.norm1 = norm_layer(output_nc * width)
self.norm2 = norm_layer(output_nc)
self.nonlinearity = nonlinearity
self.branch1x1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(output_nc * width, output_nc, kernel_size=3, padding=0, bias=use_bias)
)
def forward(self, x):
result = []
for i in range(self.width):
layer = getattr(self, 'layer'+str(i))
result.append(layer(x))
output = torch.cat(result, 1)
output = self.nonlinearity(self.norm1(output))
output = self.norm2(self.branch1x1(output))
if self.drop_rate > 0:
output = F.dropout(output, p=self.drop_rate, training=self.training)
return self.nonlinearity(output+x)
class _EncoderBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_EncoderBlock, self).__init__()
model = [
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.Conv2d(middle_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _DownBlock(nn.Module):
def __init__(self, input_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_DownBlock, self).__init__()
model = [
nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity,
nn.MaxPool2d(kernel_size=2, stride=2),
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _ShuffleUpBlock(nn.Module):
def __init__(self, input_nc, up_scale, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_ShuffleUpBlock, self).__init__()
model = [
nn.Conv2d(input_nc, input_nc*up_scale**2, kernel_size=3, stride=1, padding=1, bias=use_bias),
nn.PixelShuffle(up_scale),
nonlinearity,
nn.Conv2d(input_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=use_bias),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _DecoderUpBlock(nn.Module):
def __init__(self, input_nc, middle_nc, output_nc, norm_layer=nn.BatchNorm2d, nonlinearity=nn.PReLU(), use_bias=False):
super(_DecoderUpBlock, self).__init__()
model = [
nn.ReflectionPad2d(1),
nn.Conv2d(input_nc, middle_nc, kernel_size=3, stride=1, padding=0, bias=use_bias),
norm_layer(middle_nc),
nonlinearity,
nn.ConvTranspose2d(middle_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(output_nc),
nonlinearity
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
class _OutputBlock(nn.Module):
def __init__(self, input_nc, output_nc, kernel_size=3, use_bias=False):
super(_OutputBlock, self).__init__()
model = [
nn.ReflectionPad2d(int(kernel_size/2)),
nn.Conv2d(input_nc, output_nc, kernel_size=kernel_size, padding=0, bias=use_bias),
nn.Tanh()
]
self.model = nn.Sequential(*model)
def forward(self, x):
return self.model(x)
######################################################################################
# Network structure
######################################################################################
class _ResGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_blocks=6, norm='batch', activation='PReLU', drop_rate=0, add_noise=False, gpu_ids=[]):
super(_ResGenerator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
encoder = [
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nonlinearity
]
n_downsampling = 2
mult = 1
for i in range(n_downsampling):
mult_prev = mult
mult = min(2 ** (i+1), 2)
encoder += [
_EncoderBlock(ngf * mult_prev, ngf*mult, ngf*mult, norm_layer, nonlinearity, use_bias),
nn.AvgPool2d(kernel_size=2, stride=2)
]
mult = min(2 ** n_downsampling, 2)
for i in range(n_blocks-n_downsampling):
encoder +=[
_InceptionBlock(ngf*mult, ngf*mult, norm_layer=norm_layer, nonlinearity=nonlinearity, width=1,
drop_rate=drop_rate, use_bias=use_bias)
]
decoder = []
if add_noise:
decoder += [GaussianNoiseLayer()]
for i in range(n_downsampling):
mult_prev = mult
mult = min(2 ** (n_downsampling - i -1), 2)
decoder +=[
_DecoderUpBlock(ngf*mult_prev, ngf*mult_prev, ngf*mult, norm_layer, nonlinearity, use_bias),
]
decoder +=[
nn.ReflectionPad2d(3),
nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
nn.Tanh()
]
self.encoder = nn.Sequential(*encoder)
self.decoder = nn.Sequential(*decoder)
def forward(self, input):
feature = self.encoder(input)
result = [feature]
output = self.decoder(feature)
result.append(output)
return result
class _PreUNet16(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, layers=5, pretrained=False, norm ='batch', activation='PReLu',
drop_rate=0, gpu_ids=[]):
super(_PreUNet16, self).__init__()
self.gpu_ids = gpu_ids
self.layers = layers
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
encoder = models.vgg16(pretrained=pretrained).features
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(encoder[0], self.relu, encoder[2], self.relu)
self.conv2 = nn.Sequential(encoder[5], self.relu, encoder[7], self.relu)
self.conv3 = nn.Sequential(encoder[10], self.relu, encoder[12], self.relu, encoder[14], self.relu)
self.conv4 = nn.Sequential(encoder[17], self.relu, encoder[19], self.relu, encoder[21], self.relu)
for i in range(layers - 4):
conv = _EncoderBlock(ngf * 8, ngf * 8, ngf * 8, norm_layer, nonlinearity, use_bias)
setattr(self, 'down' + str(i), conv.model)
center = []
for i in range(7 - layers):
center += [
_InceptionBlock(ngf * 8, ngf * 8, norm_layer, nonlinearity, 7 - layers, drop_rate, use_bias)
]
center += [_DecoderUpBlock(ngf * 8, ngf * 8, ngf * 4, norm_layer, nonlinearity, use_bias)]
for i in range(layers - 4):
upconv = _DecoderUpBlock(ngf * (8 + 4), ngf * 8, ngf * 4, norm_layer, nonlinearity, use_bias)
setattr(self, 'up' + str(i), upconv.model)
self.deconv4 = _DecoderUpBlock(ngf * (4 + 4), ngf * 8, ngf * 2, norm_layer, nonlinearity, use_bias)
self.deconv3 = _DecoderUpBlock(ngf * (2 + 2) + output_nc, ngf * 4, ngf, norm_layer, nonlinearity, use_bias)
self.deconv2 = _DecoderUpBlock(ngf * (1 + 1) + output_nc, ngf * 2, int(ngf / 2), norm_layer, nonlinearity, use_bias)
self.deconv1 = _OutputBlock(int(ngf / 2) + output_nc, output_nc, kernel_size=7, use_bias=use_bias)
self.output4 = _OutputBlock(ngf * (4 + 4), output_nc, kernel_size=3, use_bias=use_bias)
self.output3 = _OutputBlock(ngf * (2 + 2) + output_nc, output_nc, kernel_size=3, use_bias=use_bias)
self.output2 = _OutputBlock(ngf * (1 + 1) + output_nc, output_nc, kernel_size=3, use_bias=use_bias)
self.center = nn.Sequential(*center)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
def forward(self, input):
conv1 = self.pool(self.conv1(input))
conv2 = self.pool(self.conv2(conv1))
conv3 = self.pool(self.conv3(conv2))
center_in = self.pool(self.conv4(conv3))
middle = [center_in]
for i in range(self.layers - 4):
model = getattr(self, 'down' + str(i))
center_in = self.pool(model(center_in))
middle.append(center_in)
result = [center_in]
center_out = self.center(center_in)
for i in range(self.layers - 4):
model = getattr(self, 'up' + str(i))
center_out = model(torch.cat([center_out, middle[self.layers - 4 - i]], 1))
deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * 0.1], 1))
output4 = self.output4.forward(torch.cat([center_out, conv3 * 0.1], 1))
result.append(output4)
deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * 0.05, self.upsample(output4)], 1))
output3 = self.output3.forward(torch.cat([deconv4, conv2 * 0.05, self.upsample(output4)], 1))
result.append(output3)
deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * 0.01, self.upsample(output3)], 1))
output2 = self.output2.forward(torch.cat([deconv3, conv1 * 0.01, self.upsample(output3)], 1))
result.append(output2)
output1 = self.deconv1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
result.append(output1)
return result
class _UNetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, layers=4, norm='batch', activation='PReLU', drop_rate=0, add_noise=False, gpu_ids=[],
weight=0.1):
super(_UNetGenerator, self).__init__()
self.gpu_ids = gpu_ids
self.layers = layers
self.weight = weight
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# encoder part
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv1 = nn.Sequential(
nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nonlinearity
)
self.conv2 = _EncoderBlock(ngf, ngf*2, ngf*2, norm_layer, nonlinearity, use_bias)
self.conv3 = _EncoderBlock(ngf*2, ngf*4, ngf*4, norm_layer, nonlinearity, use_bias)
self.conv4 = _EncoderBlock(ngf*4, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
for i in range(layers-4):
conv = _EncoderBlock(ngf*8, ngf*8, ngf*8, norm_layer, nonlinearity, use_bias)
setattr(self, 'down'+str(i), conv.model)
center=[]
for i in range(7-layers):
center +=[
_InceptionBlock(ngf*8, ngf*8, norm_layer, nonlinearity, 7-layers, drop_rate, use_bias)
]
center += [
_DecoderUpBlock(ngf*8, ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
]
if add_noise:
center += [GaussianNoiseLayer()]
self.center = nn.Sequential(*center)
for i in range(layers-4):
upconv = _DecoderUpBlock(ngf*(8+4), ngf*8, ngf*4, norm_layer, nonlinearity, use_bias)
setattr(self, 'up' + str(i), upconv.model)
self.deconv4 = _DecoderUpBlock(ngf*(4+4), ngf*8, ngf*2, norm_layer, nonlinearity, use_bias)
self.deconv3 = _DecoderUpBlock(ngf*(2+2)+output_nc, ngf*4, ngf, norm_layer, nonlinearity, use_bias)
self.deconv2 = _DecoderUpBlock(ngf*(1+1)+output_nc, ngf*2, int(ngf/2), norm_layer, nonlinearity, use_bias)
self.output4 = _OutputBlock(ngf*(4+4), output_nc, 3, use_bias)
self.output3 = _OutputBlock(ngf*(2+2)+output_nc, output_nc, 3, use_bias)
self.output2 = _OutputBlock(ngf*(1+1)+output_nc, output_nc, 3, use_bias)
self.output1 = _OutputBlock(int(ngf/2)+output_nc, output_nc, 7, use_bias)
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, input):
conv1 = self.pool(self.conv1(input))
conv2 = self.pool(self.conv2.forward(conv1))
conv3 = self.pool(self.conv3.forward(conv2))
center_in = self.pool(self.conv4.forward(conv3))
middle = [center_in]
for i in range(self.layers-4):
model = getattr(self, 'down'+str(i))
center_in = self.pool(model.forward(center_in))
middle.append(center_in)
center_out = self.center.forward(center_in)
result = [center_in]
for i in range(self.layers-4):
model = getattr(self, 'up'+str(i))
center_out = model.forward(torch.cat([center_out, middle[self.layers-5-i]], 1))
deconv4 = self.deconv4.forward(torch.cat([center_out, conv3 * self.weight], 1))
output4 = self.output4.forward(torch.cat([center_out, conv3 * self.weight], 1))
result.append(output4)
deconv3 = self.deconv3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
output3 = self.output3.forward(torch.cat([deconv4, conv2 * self.weight * 0.5, self.upsample(output4)], 1))
result.append(output3)
deconv2 = self.deconv2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
output2 = self.output2.forward(torch.cat([deconv3, conv1 * self.weight * 0.1, self.upsample(output3)], 1))
result.append(output2)
output1 = self.output1.forward(torch.cat([deconv2, self.upsample(output2)], 1))
result.append(output1)
return result
class _MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, num_D=1, norm='batch', activation='PReLU', gpu_ids=[]):
super(_MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.gpu_ids = gpu_ids
for i in range(num_D):
netD = _Discriminator(input_nc, ndf, n_layers, norm, activation, gpu_ids)
setattr(self, 'scale'+str(i), netD)
self.downsample = nn.AvgPool2d(kernel_size=3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
result = []
for i in range(self.num_D):
netD = getattr(self, 'scale'+str(i))
output = netD.forward(input)
result.append(output)
if i != (self.num_D-1):
input = self.downsample(input)
return result
class _Discriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm='batch', activation='PReLU', gpu_ids=[]):
super(_Discriminator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=1, bias=use_bias),
nonlinearity,
]
nf_mult=1
for i in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**i, 8)
model += [
nn.Conv2d(ndf*nf_mult_prev, ndf*nf_mult, kernel_size=4, stride=2, padding=1, bias=use_bias),
norm_layer(ndf*nf_mult),
nonlinearity,
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
model += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=4, stride=1, padding=1, bias=use_bias),
norm_layer(ndf * 8),
nonlinearity,
nn.Conv2d(ndf*nf_mult, 1, kernel_size=4, stride=1, padding=1)
]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
class _FeatureDiscriminator(nn.Module):
def __init__(self, input_nc, n_layers=2, norm='batch', activation='PReLU', gpu_ids=[]):
super(_FeatureDiscriminator, self).__init__()
self.gpu_ids = gpu_ids
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [
nn.Linear(input_nc * 40 * 12, input_nc),
nonlinearity,
]
for i in range(1, n_layers):
model +=[
nn.Linear(input_nc, input_nc),
nonlinearity
]
model +=[nn.Linear(input_nc, 1)]
self.model = nn.Sequential(*model)
def forward(self, input):
result = []
input = input.view(-1, 512 * 40 * 12)
output = self.model(input)
result.append(output)
return result | 24,337 | 37.028125 | 140 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/TaskModel.py | import torch
from torch.autograd import Variable
import util.task as task
from .base_model import BaseModel
from . import network
class TNetModel(BaseModel):
def name(self):
return 'TNet Model'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.loss_names = ['lab_s', 'lab_t', 'lab_smooth']
self.visual_names = ['img_s', 'lab_s', 'lab_s_g', 'img_t', 'lab_t', 'lab_t_g']
self.model_names = ['img2task']
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
if self.isTrain:
# define the loss function
self.l1loss = torch.nn.L1Loss()
self.l2loss = torch.nn.MSELoss()
self.optimizer_img2task = torch.optim.Adam(self.net_img2task.parameters(), lr=opt.lr_task, betas=(0.9, 0.999))
self.optimizers = []
self.schedulers = []
self.optimizers.append(self.optimizer_img2task)
for optimizer in self.optimizers:
self.schedulers.append(network.get_scheduler(optimizer, opt))
if not self.isTrain or opt.continue_train:
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if self.isTrain:
self.lab_source = input['lab_source']
self.lab_target = input['lab_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda(self.gpu_ids[0], async=True)
self.img_target = self.img_target.cuda(self.gpu_ids[0], async=True)
if self.isTrain:
self.lab_source = self.lab_source.cuda(self.gpu_ids[0], async=True)
self.lab_target = self.lab_target.cuda(self.gpu_ids[0], async=True)
def forward(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
self.lab_s = Variable(self.lab_source)
self.lab_t = Variable(self.lab_target)
def foreward_G_basic(self, net_G, img_s, img_t):
img = torch.cat([img_s, img_t], 0)
fake = net_G(img)
size = len(fake)
f_s, f_t = fake[0].chunk(2)
img_fake = fake[1:]
img_s_fake = []
img_t_fake = []
for img_fake_i in img_fake:
img_s, img_t = img_fake_i.chunk(2)
img_s_fake.append(img_s)
img_t_fake.append(img_t)
return img_s_fake, img_t_fake, f_s, f_t, size
def backward_task(self):
self.lab_s_g, self.lab_t_g, self.lab_f_s, self.lab_f_t, size = \
self.foreward_G_basic(self.net_img2task, self.img_s, self.img_t)
lab_real = task.scale_pyramid(self.lab_s, size-1)
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_s_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_s = task_loss * self.opt.lambda_rec_lab
img_real = task.scale_pyramid(self.img_t, size - 1)
self.loss_lab_smooth = task.get_smooth_weight(self.lab_t_g, img_real, size - 1) * self.opt.lambda_smooth
total_loss = self.loss_lab_s + self.loss_lab_smooth
total_loss.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# task network
self.optimizer_img2task.zero_grad()
self.backward_task()
self.optimizer_img2task.step()
def validation_target(self):
lab_real = task.scale_pyramid(self.lab_t, len(self.lab_t_g))
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_t_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_t = task_loss * self.opt.lambda_rec_lab | 4,032 | 33.767241 | 122 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/models.py |
def create_model(opt):
print(opt.model)
if opt.model == 'wsupervised':
from .T2model import T2NetModel
model = T2NetModel()
elif opt.model == 'supervised':
from .TaskModel import TNetModel
model = TNetModel()
elif opt.model == 'test':
from .test_model import TestModel
model = TestModel()
else:
raise ValueError("Model [%s] not recognized." % opt.model)
model.initialize(opt)
print("model [%s] was created." % (model.name()))
return model | 527 | 30.058824 | 66 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/T2model.py | import torch
from torch.autograd import Variable
import itertools
from util.image_pool import ImagePool
import util.task as task
from .base_model import BaseModel
from . import network
class T2NetModel(BaseModel):
def name(self):
return 'T2Net model'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.loss_names = ['img_rec', 'img_G', 'img_D', 'lab_s', 'lab_t', 'f_G', 'f_D', 'lab_smooth']
self.visual_names = ['img_s', 'img_t', 'lab_s', 'lab_t', 'img_s2t', 'img_t2t', 'lab_s_g', 'lab_t_g']
if self.isTrain:
self.model_names = ['img2task', 's2t', 'img_D', 'f_D']
else:
self.model_names = ['img2task', 's2t']
# define the transform network
self.net_s2t = network.define_G(opt.image_nc, opt.image_nc, opt.ngf, opt.transform_layers, opt.norm,
opt.activation, opt.trans_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the discriminator
if self.isTrain:
self.net_img_D = network.define_D(opt.image_nc, opt.ndf, opt.image_D_layers, opt.num_D, opt.norm,
opt.activation, opt.init_type, opt.gpu_ids)
self.net_f_D = network.define_featureD(opt.image_feature, opt.feature_D_layers, opt.norm,
opt.activation, opt.init_type, opt.gpu_ids)
if self.isTrain:
self.fake_img_pool = ImagePool(opt.pool_size)
# define loss functions
self.l1loss = torch.nn.L1Loss()
self.nonlinearity = torch.nn.ReLU()
# initialize optimizers
self.optimizer_T2Net = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, self.net_s2t.parameters())},
{'params': filter(lambda p: p.requires_grad, self.net_img2task.parameters()),
'lr': opt.lr_task, 'betas': (0.95, 0.999)}],
lr=opt.lr_trans, betas=(0.5, 0.9))
self.optimizer_D = torch.optim.Adam(itertools.chain(filter(lambda p: p.requires_grad, self.net_img_D.parameters()),
filter(lambda p: p.requires_grad, self.net_f_D.parameters())),
lr=opt.lr_trans, betas=(0.5, 0.9))
self.optimizers = []
self.schedulers = []
self.optimizers.append(self.optimizer_T2Net)
self.optimizers.append(self.optimizer_D)
for optimizer in self.optimizers:
self.schedulers.append(network.get_scheduler(optimizer, opt))
if not self.isTrain or opt.continue_train:
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if self.isTrain:
self.lab_source = input['lab_source']
self.lab_target = input['lab_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda(self.gpu_ids[0], async=True)
self.img_target = self.img_target.cuda(self.gpu_ids[0], async=True)
if self.isTrain:
self.lab_source = self.lab_source.cuda(self.gpu_ids[0], async=True)
self.lab_target = self.lab_target.cuda(self.gpu_ids[0], async=True)
def forward(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
self.lab_s = Variable(self.lab_source)
self.lab_t = Variable(self.lab_target)
def backward_D_basic(self, netD, real, fake):
D_loss = 0
for (real_i, fake_i) in zip(real, fake):
# Real
D_real = netD(real_i.detach())
# fake
D_fake = netD(fake_i.detach())
for (D_real_i, D_fake_i) in zip(D_real, D_fake):
D_loss += (torch.mean((D_real_i-1.0)**2) + torch.mean((D_fake_i -0.0)**2))*0.5
D_loss.backward()
return D_loss
def backward_D_image(self):
network._freeze(self.net_s2t, self.net_img2task, self.net_f_D)
network._unfreeze(self.net_img_D)
size = len(self.img_s2t)
fake = []
for i in range(size):
fake.append(self.fake_img_pool.query(self.img_s2t[i]))
real = task.scale_pyramid(self.img_t, size)
self.loss_img_D = self.backward_D_basic(self.net_img_D, real, fake)
def backward_D_feature(self):
network._freeze(self.net_s2t, self.net_img2task, self.net_img_D)
network._unfreeze(self.net_f_D)
self.loss_f_D = self.backward_D_basic(self.net_f_D, [self.lab_f_t], [self.lab_f_s])
def foreward_G_basic(self, net_G, img_s, img_t):
img = torch.cat([img_s, img_t], 0)
fake = net_G(img)
size = len(fake)
f_s, f_t = fake[0].chunk(2)
img_fake = fake[1:]
img_s_fake = []
img_t_fake = []
for img_fake_i in img_fake:
img_s, img_t = img_fake_i.chunk(2)
img_s_fake.append(img_s)
img_t_fake.append(img_t)
return img_s_fake, img_t_fake, f_s, f_t, size
def backward_synthesis2real(self):
# image to image transform
network._freeze(self.net_img2task, self.net_img_D, self.net_f_D)
network._unfreeze(self.net_s2t)
self.img_s2t, self.img_t2t, self.img_f_s, self.img_f_t, size = \
self.foreward_G_basic(self.net_s2t, self.img_s, self.img_t)
# image GAN loss and reconstruction loss
img_real = task.scale_pyramid(self.img_t, size - 1)
G_loss = 0
rec_loss = 0
for i in range(size - 1):
rec_loss += self.l1loss(self.img_t2t[i], img_real[i])
D_fake = self.net_img_D(self.img_s2t[i])
for D_fake_i in D_fake:
G_loss += torch.mean((D_fake_i - 1.0) ** 2)
self.loss_img_G = G_loss * self.opt.lambda_gan_img
self.loss_img_rec = rec_loss * self.opt.lambda_rec_img
total_loss = self.loss_img_G + self.loss_img_rec
total_loss.backward(retain_graph=True)
def backward_translated2depth(self):
# task network
network._freeze(self.net_img_D, self.net_f_D)
network._unfreeze(self.net_s2t, self.net_img2task)
fake = self.net_img2task.forward(self.img_s2t[-1])
size=len(fake)
self.lab_f_s = fake[0]
self.lab_s_g = fake[1:]
#feature GAN loss
D_fake = self.net_f_D(self.lab_f_s)
G_loss = 0
for D_fake_i in D_fake:
G_loss += torch.mean((D_fake_i - 1.0) ** 2)
self.loss_f_G = G_loss * self.opt.lambda_gan_feature
# task loss
lab_real = task.scale_pyramid(self.lab_s, size-1)
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_s_g, lab_real):
task_loss += self.l1loss(lab_fake_i, lab_real_i)
self.loss_lab_s = task_loss * self.opt.lambda_rec_lab
total_loss = self.loss_f_G + self.loss_lab_s
total_loss.backward()
def backward_real2depth(self):
# image2depth
network._freeze(self.net_s2t, self.net_img_D, self.net_f_D)
network._unfreeze(self.net_img2task)
fake = self.net_img2task.forward(self.img_t)
size = len(fake)
# Gan depth
self.lab_f_t = fake[0]
self.lab_t_g = fake[1:]
img_real = task.scale_pyramid(self.img_t, size - 1)
self.loss_lab_smooth = task.get_smooth_weight(self.lab_t_g, img_real, size-1) * self.opt.lambda_smooth
total_loss = self.loss_lab_smooth
total_loss.backward()
def optimize_parameters(self, epoch_iter):
self.forward()
# T2Net
self.optimizer_T2Net.zero_grad()
self.backward_synthesis2real()
self.backward_translated2depth()
self.backward_real2depth()
self.optimizer_T2Net.step()
# Discriminator
self.optimizer_D.zero_grad()
self.backward_D_feature()
self.backward_D_image()
if epoch_iter % 5 == 0:
self.optimizer_D.step()
for p in self.net_f_D.parameters():
p.data.clamp_(-0.01,0.01)
def validation_target(self):
lab_real = task.scale_pyramid(self.lab_t, len(self.lab_t_g))
task_loss = 0
for (lab_fake_i, lab_real_i) in zip(self.lab_t_g, lab_real):
task_loss += task.rec_loss(lab_fake_i, lab_real_i)
self.loss_lab_t = task_loss * self.opt.lambda_rec_lab
| 9,119 | 37.808511 | 130 | py |
Synthetic2Realistic | Synthetic2Realistic-master/model/__init__.py | 0 | 0 | 0 | py |
|
Synthetic2Realistic | Synthetic2Realistic-master/model/test_model.py | import torch
from torch.autograd import Variable
from .base_model import BaseModel
from . import network
from util import util
from collections import OrderedDict
class TestModel(BaseModel):
def name(self):
return 'TestModel'
def initialize(self, opt):
assert (not opt.isTrain)
BaseModel.initialize(self, opt)
self.loss_names = []
self.visual_names =['img_s', 'img_t', 'img_s2t', 'lab_t_g']
self.model_names = ['img2task', 's2t']
#self.model_names = ['img2task']
# define the transform network
self.net_s2t = network.define_G(opt.image_nc, opt.image_nc, opt.ngf, opt.transform_layers, opt.norm,
opt.activation, opt.trans_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
# define the task network
self.net_img2task = network.define_G(opt.image_nc, opt.label_nc, opt.ngf, opt.task_layers, opt.norm,
opt.activation, opt.task_model_type, opt.init_type, opt.drop_rate,
False, opt.gpu_ids, opt.U_weight)
self.load_networks(opt.which_epoch)
def set_input(self, input):
self.input = input
self.img_source = input['img_source']
self.img_target = input['img_target']
if len(self.gpu_ids) > 0:
self.img_source = self.img_source.cuda()
self.img_target = self.img_target.cuda()
self.image_paths = input['img_target_paths']
def test(self):
self.img_s = Variable(self.img_source)
self.img_t = Variable(self.img_target)
with torch.no_grad():
self.img_s2t = self.net_s2t.forward(self.img_s)
self.lab_t_g = self.net_img2task.forward(self.img_t)
# save_results
def save_results(self, visualizer, wed_page):
img_source_paths = self.input['img_source_paths']
img_target_paths = self.input['img_target_paths']
for i in range(self.img_s.size(0)):
img_source = util.tensor2im(self.img_s.data[i])
img_target = util.tensor2im(self.img_t.data[i])
img_source2target = util.tensor2im(self.img_s2t[-1].data[i])
lab_fake_target = util.tensor2im(self.lab_t_g[-1].data[i])
visuals = OrderedDict([('img_s', img_source), ('img_s2t', img_source2target)])
print('process image ......%s' % img_source_paths[0])
visualizer.save_images(wed_page, visuals, img_source_paths)
img_source_paths.pop(0)
visuals = OrderedDict([('img_t', img_target), ('lab_t_g', lab_fake_target)])
print('process image ......%s' % img_target_paths[0])
visualizer.save_images(wed_page, visuals, img_target_paths)
img_target_paths.pop(0) | 2,883 | 39.619718 | 111 | py |
Synthetic2Realistic | Synthetic2Realistic-master/dataloader/data_loader.py | import random
from PIL import Image
import torchvision.transforms as transforms
import torch.utils.data as data
from .image_folder import make_dataset
import torchvision.transforms.functional as F
class CreateDataset(data.Dataset):
def initialize(self, opt):
self.opt = opt
self.img_source_paths, self.img_source_size = make_dataset(opt.img_source_file)
self.img_target_paths, self.img_target_size = make_dataset(opt.img_target_file)
if self.opt.isTrain:
self.lab_source_paths, self.lab_source_size = make_dataset(opt.lab_source_file)
# for visual results, not for training
self.lab_target_paths, self.lab_target_size = make_dataset(opt.lab_target_file)
self.transform_augment = get_transform(opt, True)
self.transform_no_augment = get_transform(opt, False)
def __getitem__(self, item):
index = random.randint(0, self.img_target_size - 1)
img_source_path = self.img_source_paths[item % self.img_source_size]
if self.opt.dataset_mode == 'paired':
img_target_path = self.img_target_paths[item % self.img_target_size]
elif self.opt.dataset_mode == 'unpaired':
img_target_path = self.img_target_paths[index]
else:
raise ValueError('Data mode [%s] is not recognized' % self.opt.dataset_mode)
img_source = Image.open(img_source_path).convert('RGB')
img_target = Image.open(img_target_path).convert('RGB')
img_source = img_source.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
img_target = img_target.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
if self.opt.isTrain:
lab_source_path = self.lab_source_paths[item % self.lab_source_size]
if self.opt.dataset_mode == 'paired':
lab_target_path = self.lab_target_paths[item % self.img_target_size]
elif self.opt.dataset_mode == 'unpaired':
lab_target_path = self.lab_target_paths[index]
else:
raise ValueError('Data mode [%s] is not recognized' % self.opt.dataset_mode)
lab_source = Image.open(lab_source_path)#.convert('RGB')
lab_target = Image.open(lab_target_path)#.convert('RGB')
lab_source = lab_source.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
lab_target = lab_target.resize([self.opt.loadSize[0], self.opt.loadSize[1]], Image.BICUBIC)
img_source, lab_source, scale = paired_transform(self.opt, img_source, lab_source)
img_source = self.transform_augment(img_source)
lab_source = self.transform_no_augment(lab_source)
img_target, lab_target, scale = paired_transform(self.opt, img_target, lab_target)
img_target = self.transform_no_augment(img_target)
lab_target = self.transform_no_augment(lab_target)
return {'img_source': img_source, 'img_target': img_target,
'lab_source': lab_source, 'lab_target': lab_target,
'img_source_paths': img_source_path, 'img_target_paths': img_target_path,
'lab_source_paths': lab_source_path, 'lab_target_paths': lab_target_path
}
else:
img_source = self.transform_augment(img_source)
img_target = self.transform_no_augment(img_target)
return {'img_source': img_source, 'img_target': img_target,
'img_source_paths': img_source_path, 'img_target_paths': img_target_path,
}
def __len__(self):
return max(self.img_source_size, self.img_target_size)
def name(self):
return 'T^2Dataset'
def dataloader(opt):
datasets = CreateDataset()
datasets.initialize(opt)
dataset = data.DataLoader(datasets, batch_size=opt.batchSize, shuffle=opt.shuffle, num_workers=int(opt.nThreads))
return dataset
def paired_transform(opt, image, depth):
scale_rate = 1.0
if opt.flip:
n_flip = random.random()
if n_flip > 0.5:
image = F.hflip(image)
depth = F.hflip(depth)
if opt.rotation:
n_rotation = random.random()
if n_rotation > 0.5:
degree = random.randrange(-500, 500)/100
image = F.rotate(image, degree, Image.BICUBIC)
depth = F.rotate(depth, degree, Image.BILINEAR)
return image, depth, scale_rate
def get_transform(opt, augment):
transforms_list = []
if augment:
if opt.isTrain:
transforms_list.append(transforms.ColorJitter(brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0))
transforms_list += [
transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
return transforms.Compose(transforms_list)
| 4,873 | 41.017241 | 117 | py |
Synthetic2Realistic | Synthetic2Realistic-master/dataloader/image_folder.py | import os
import os.path
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(path_files):
if path_files.find('.txt') != -1:
paths, size = make_dataset_txt(path_files)
else:
paths, size = make_dataset_dir(path_files)
return paths, size
def make_dataset_txt(path_files):
# reading txt file
image_paths = []
with open(path_files) as f:
paths = f.readlines()
for path in paths:
path = path.strip()
image_paths.append(path)
return image_paths, len(image_paths)
def make_dataset_dir(dir):
image_paths = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in os.walk(dir):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
image_paths.append(path)
return image_paths, len(image_paths) | 1,085 | 22.106383 | 76 | py |
Synthetic2Realistic | Synthetic2Realistic-master/dataloader/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.