repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
YouTokenToMe | YouTokenToMe-master/tests/unit_tests/utils_for_testing.py | import random
import shutil
from pathlib import Path
from subprocess import run
BASE_MODEL_FILE = "artifacts/base_model.yttm"
RENAME_ID_MODEL_FILE = "artifacts/rename_model.yttm"
TRAIN_FILE = "artifacts/random_train_text.txt"
TEST_FILE = "artifacts/random_test_text.txt"
BOS_ID = 2
EOS_ID = 3
artifacts_generated = False
def generate_artifacts():
global artifacts_generated
if not artifacts_generated:
shutil.rmtree("artifacts", ignore_errors=True)
Path("artifacts").mkdir()
else:
return
random.seed(19)
artifacts_generated = True
n_lines = 10000
n_characters = 100
for file_name, alphabet in zip([TRAIN_FILE, TEST_FILE], ["abcd ", "abcde "]):
with open(file_name, "w") as fout:
for _ in range(n_lines):
random_line = "".join(
[random.choice(alphabet) for _ in range(n_characters)]
)
print(random_line, file=fout)
cmd_args = [
"yttm",
"bpe",
f"--data={TRAIN_FILE}",
f"--model={BASE_MODEL_FILE}",
"--vocab_size=16000",
"--coverage=0.999",
f"--bos_id={BOS_ID}",
f"--eos_id={EOS_ID}",
]
run(cmd_args, check=True)
cmd_args = [
"yttm",
"bpe",
f"--data={TRAIN_FILE}",
f"--model={RENAME_ID_MODEL_FILE}",
"--vocab_size=16000",
"--coverage=0.999",
"--bos_id=29",
"--eos_id=1148",
"--unk_id=2922",
]
run(cmd_args, check=True)
def file_starts_with(file_name, pattern):
with open(file_name, "r") as fin:
first_line = fin.readline()
res = first_line.startswith(pattern)
if not res:
print("first_line: ", first_line)
assert res
| 1,777 | 24.042254 | 81 | py |
YouTokenToMe | YouTokenToMe-master/youtokentome/yttm_cli.py | import _youtokentome_cython as yttmc
import click
@click.group()
def main():
pass
@click.command()
@click.option(
"--data",
type=click.Path(exists=True),
required=True,
help="Training data file path.",
)
@click.option(
"--model", type=click.Path(), required=True, help="Output model file path."
)
@click.option(
"--vocab_size",
type=click.INT,
required=True,
help="Number of tokens in the final vocabulary.",
)
@click.option(
"--coverage",
type=click.FLOAT,
help="Percentage of characters covered by the model.",
default=1.0,
show_default=True,
)
@click.option(
"--n_threads",
type=click.INT,
help="Number of threads.",
default=-1,
show_default=True,
)
@click.option(
"--pad_id", type=click.INT, help="Padding token id.", default=0, show_default=True
)
@click.option(
"--unk_id", type=click.INT, help="Unknown token id.", default=1, show_default=True
)
@click.option(
"--bos_id",
type=click.INT,
help="Begin of sentence token id.",
default=2,
show_default=True,
)
@click.option(
"--eos_id",
type=click.INT,
help="End of sentence token id.",
default=3,
show_default=True,
)
def bpe(data, model, vocab_size, coverage, n_threads, pad_id, unk_id, bos_id, eos_id):
"""Train BPE model."""
yttmc.BPE.train(
data=data,
model=model,
vocab_size=vocab_size,
coverage=coverage,
n_threads=n_threads,
pad_id=pad_id,
unk_id=unk_id,
bos_id=bos_id,
eos_id=eos_id,
)
@click.command()
@click.option(
"--model",
type=click.Path(exists=True),
required=True,
help="Path to file with learned model.",
)
@click.option(
"--output_type",
type=click.Choice(["id", "subword"]),
required=True,
help="'id' or 'subword'.",
)
@click.option(
"--n_threads",
type=click.INT,
help="Number of threads.",
default=-1,
show_default=True,
)
@click.option("--bos", is_flag=True, help="Add tab begin of sentence.")
@click.option("--eos", is_flag=True, help="Add tab end of sentence.")
@click.option("--reverse", is_flag=True, help="Reverse output sequence of tokens.")
@click.option(
"--stream", is_flag=True, help="Process each line before reading the next one."
)
@click.option(
"--dropout_prob",
type=click.FLOAT,
default=0,
show_default=True,
help="BPE-dropout probability (the probability of a merge being dropped)",
)
def encode(model, output_type, n_threads, bos, eos, reverse, stream, dropout_prob):
"""Encode text to ids or subwords."""
if n_threads < -1 or n_threads == 0:
raise ValueError(
'Invalid value for "--n_threads": must be -1 or positive integer, not "%d"'
% n_threads
)
bpe = yttmc.BPE(model, n_threads)
bpe.encode_cli(output_type, stream, bos, eos, reverse, dropout_prob)
def validate_ignore_ids(ctx, param, value):
try:
if value is not None:
return [int(idx) for idx in value.split(",")]
else:
return None
except ValueError:
raise click.BadParameter(
"Bad format: expected list of comma-separated integers, but got {}"
)
@click.command()
@click.option(
"--model",
type=click.Path(exists=True),
required=True,
help="Path to file with learned model.",
)
@click.option(
"--ignore_ids",
type=click.STRING,
callback=validate_ignore_ids,
required=False,
help="List of indices to ignore for decoding. Example: --ignore_ids=1,2,3",
)
def decode(model, ignore_ids):
"""Decode ids to text."""
bpe = yttmc.BPE(model)
bpe.decode_cli(ignore_ids)
@click.command()
@click.option(
"--model",
type=click.Path(exists=True),
required=True,
help="Path to file with learned model.",
)
@click.option("--verbose", is_flag=True, help="Add merging rules.")
def vocab(model, verbose):
"""Print list of learned subwords."""
bpe = yttmc.BPE(model)
bpe.vocab_cli(verbose)
main.add_command(bpe)
main.add_command(encode)
main.add_command(decode)
main.add_command(vocab)
| 4,130 | 23.3 | 87 | py |
YouTokenToMe | YouTokenToMe-master/youtokentome/__init__.py | from .youtokentome import BPE
from .youtokentome import OutputType
| 67 | 21.666667 | 36 | py |
YouTokenToMe | YouTokenToMe-master/youtokentome/youtokentome.py | import _youtokentome_cython
from enum import Enum
from typing import List, Union, Optional, Collection
class OutputType(Enum):
ID = 1
SUBWORD = 2
class BPE:
def __init__(self, model: str, n_threads: int = -1):
self.model = model
self.n_threads = n_threads
self.bpe_cython = _youtokentome_cython.BPE(
model_path=model, n_threads=n_threads
)
@staticmethod
def train(
data: str,
model: str,
vocab_size: int,
coverage: float = 1.0,
n_threads: int = -1,
pad_id: int = 0,
unk_id: int = 1,
bos_id: int = 2,
eos_id: int = 3,
) -> "BPE":
_youtokentome_cython.BPE.train(
data=data,
model=model,
vocab_size=vocab_size,
n_threads=n_threads,
coverage=coverage,
pad_id=pad_id,
unk_id=unk_id,
bos_id=bos_id,
eos_id=eos_id,
)
return BPE(model=model, n_threads=n_threads)
def encode(
self,
sentences: List[str],
output_type: OutputType = OutputType.ID,
bos: bool = False,
eos: bool = False,
reverse: bool = False,
dropout_prob: float = 0,
) -> Union[List[List[int]], List[List[str]]]:
if not isinstance(output_type, OutputType):
raise TypeError(
"parameter output_type must be youtokentome.OutputType, not %s}"
% str(type(output_type))
)
output_type_str = "id" if output_type == OutputType.ID else "subword"
return self.bpe_cython.encode(
sentences=sentences,
output_type=output_type_str,
bos=bos,
eos=eos,
reverse=reverse,
dropout_prob=dropout_prob,
)
def vocab_size(self) -> int:
return self.bpe_cython.vocab_size()
def vocab(self) -> List[str]:
return self.bpe_cython.vocab()
def subword_to_id(self, subword: str) -> int:
return self.bpe_cython.subword_to_id(subword)
def id_to_subword(self, id: int) -> str:
return self.bpe_cython.id_to_subword(id)
def decode(
self,
ids: Union[List[int], List[List[int]]],
ignore_ids: Optional[Collection[int]] = None,
) -> List[str]:
return self.bpe_cython.decode(ids, ignore_ids)
def __getstate__(self):
return {"model": self.model, "n_threads": self.n_threads}
def __setstate__(self, dict):
self.model = dict["model"]
self.n_threads = dict["n_threads"]
self.bpe_cython = _youtokentome_cython.BPE(
model_path=self.model, n_threads=self.n_threads
)
| 2,732 | 26.33 | 80 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/test.py | import numpy as np
import torch
import random
from learner import Learner
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)')
# training
parser.add_argument("--batch_size", help="batch_size", default=256, type=int)
parser.add_argument("--lr",help='learning rate',default=1e-3, type=float)
parser.add_argument("--weight_decay",help='weight_decay',default=0.0, type=float)
parser.add_argument("--momentum",help='momentum',default=0.9, type=float)
parser.add_argument("--num_workers", help="workers number", default=16, type=int)
parser.add_argument("--exp", help='experiment name', default='Test', type=str)
parser.add_argument("--device", help="cuda or cpu", default='cuda', type=str)
parser.add_argument("--num_steps", help="# of iterations", default= 500 * 100, type=int)
parser.add_argument("--target_attr_idx", help="target_attr_idx", default= 0, type=int)
parser.add_argument("--bias_attr_idx", help="bias_attr_idx", default= 1, type=int)
parser.add_argument("--dataset", help="data to train, [cmnist, cifar10, bffhq]", default= 'cmnist', type=str)
parser.add_argument("--percent", help="percentage of conflict", default= "1pct", type=str)
parser.add_argument("--use_lr_decay", action='store_true', help="whether to use learning rate decay")
parser.add_argument("--lr_decay_step", help="learning rate decay steps", type=int, default=10000)
parser.add_argument("--q", help="GCE parameter q", type=float, default=0.7)
parser.add_argument("--lr_gamma", help="lr gamma", type=float, default=0.1)
parser.add_argument("--lambda_dis_align", help="lambda_dis in Eq.2", type=float, default=1.0)
parser.add_argument("--lambda_swap_align", help="lambda_swap_b in Eq.3", type=float, default=1.0)
parser.add_argument("--lambda_swap", help="lambda swap (lambda_swap in Eq.4)", type=float, default=1.0)
parser.add_argument("--ema_alpha", help="use weight mul", type=float, default=0.7)
parser.add_argument("--curr_step", help="curriculum steps", type=int, default= 0)
parser.add_argument("--use_type0", action='store_true', help="whether to use type 0 CIFAR10C")
parser.add_argument("--use_type1", action='store_true', help="whether to use type 1 CIFAR10C")
parser.add_argument("--use_resnet20", help="Use Resnet20", action="store_true") # ResNet 20 was used in Learning From Failure CifarC10 (We used ResNet18 in our paper)
parser.add_argument("--model", help="which network, [MLP, ResNet18, ResNet20, ResNet50]", default= 'MLP', type=str)
# logging
parser.add_argument("--log_dir", help='path for loading data', default='./log', type=str)
parser.add_argument("--data_dir", help='path for saving models & logs', default='dataset', type=str)
parser.add_argument("--valid_freq", help='frequency to evaluate on valid/test set', default=500, type=int)
parser.add_argument("--log_freq", help='frequency to log on tensorboard', default=500, type=int)
parser.add_argument("--save_freq", help='frequency to save model checkpoint', default=1000, type=int)
parser.add_argument("--wandb", action="store_true", help="whether to use wandb")
parser.add_argument("--tensorboard", action="store_true", help="whether to use tensorboard")
# experiment
parser.add_argument("--pretrained_path", help="path for pretrained model", type=str)
args = parser.parse_args()
# init learner
learner = Learner(args)
# actual training
print('Official Pytorch Code of "Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)"')
print('Test starts ...')
learner.test_ours(args)
| 3,784 | 63.152542 | 170 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/learner.py | from tqdm import tqdm
import wandb
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import os
import torch.optim as optim
from data.util import get_dataset, IdxDataset
from module.loss import GeneralizedCELoss
from module.util import get_model
from util import EMA
class Learner(object):
def __init__(self, args):
data2model = {'cmnist': "MLP",
'cifar10c': "ResNet18",
'bffhq': "ResNet18"}
data2batch_size = {'cmnist': 256,
'cifar10c': 256,
'bffhq': 64}
data2preprocess = {'cmnist': None,
'cifar10c': True,
'bffhq': True}
if args.wandb:
import wandb
wandb.init(project='Learning-Debiased-Disetangled')
wandb.run.name = args.exp
run_name = args.exp
if args.tensorboard:
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(f'result/summary/{run_name}')
self.model = data2model[args.dataset]
self.batch_size = data2batch_size[args.dataset]
print(f'model: {self.model} || dataset: {args.dataset}')
print(f'working with experiment: {args.exp}...')
self.log_dir = os.makedirs(os.path.join(args.log_dir, args.dataset, args.exp), exist_ok=True)
self.device = torch.device(args.device)
self.args = args
print(self.args)
# logging directories
self.log_dir = os.path.join(args.log_dir, args.dataset, args.exp)
self.summary_dir = os.path.join(args.log_dir, args.dataset, "summary", args.exp)
self.summary_gradient_dir = os.path.join(self.log_dir, "gradient")
self.result_dir = os.path.join(self.log_dir, "result")
os.makedirs(self.summary_dir, exist_ok=True)
os.makedirs(self.result_dir, exist_ok=True)
self.train_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="train",
transform_split="train",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
self.valid_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="valid",
transform_split="valid",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
self.test_dataset = get_dataset(
args.dataset,
data_dir=args.data_dir,
dataset_split="test",
transform_split="valid",
percent=args.percent,
use_preprocess=data2preprocess[args.dataset],
use_type0=args.use_type0,
use_type1=args.use_type1
)
train_target_attr = []
for data in self.train_dataset.data:
train_target_attr.append(int(data.split('_')[-2]))
train_target_attr = torch.LongTensor(train_target_attr)
attr_dims = []
attr_dims.append(torch.max(train_target_attr).item() + 1)
self.num_classes = attr_dims[0]
self.train_dataset = IdxDataset(self.train_dataset)
# make loader
self.train_loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
drop_last=True
)
self.valid_loader = DataLoader(
self.valid_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
self.test_loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True,
)
# define model and optimizer
self.model_b = get_model(self.model, attr_dims[0]).to(self.device)
self.model_d = get_model(self.model, attr_dims[0]).to(self.device)
self.optimizer_b = torch.optim.Adam(
self.model_b.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
self.optimizer_d = torch.optim.Adam(
self.model_d.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
# define loss
self.criterion = nn.CrossEntropyLoss(reduction='none')
self.bias_criterion = nn.CrossEntropyLoss(reduction='none')
print(f'self.criterion: {self.criterion}')
print(f'self.bias_criterion: {self.bias_criterion}')
self.sample_loss_ema_b = EMA(torch.LongTensor(train_target_attr), num_classes=self.num_classes, alpha=args.ema_alpha)
self.sample_loss_ema_d = EMA(torch.LongTensor(train_target_attr), num_classes=self.num_classes, alpha=args.ema_alpha)
print(f'alpha : {self.sample_loss_ema_d.alpha}')
self.best_valid_acc_b, self.best_test_acc_b = 0., 0.
self.best_valid_acc_d, self.best_test_acc_d = 0., 0.
print('finished model initialization....')
# evaluation code for vanilla
def evaluate(self, model, data_loader):
model.eval()
total_correct, total_num = 0, 0
for data, attr, index in tqdm(data_loader, leave=False):
label = attr[:, 0]
data = data.to(self.device)
label = label.to(self.device)
with torch.no_grad():
logit = model(data)
pred = logit.data.max(1, keepdim=True)[1].squeeze(1)
correct = (pred == label).long()
total_correct += correct.sum()
total_num += correct.shape[0]
accs = total_correct/float(total_num)
model.train()
return accs
# evaluation code for ours
def evaluate_ours(self,model_b, model_l, data_loader, model='label'):
model_b.eval()
model_l.eval()
total_correct, total_num = 0, 0
for data, attr, index in tqdm(data_loader, leave=False):
label = attr[:, 0]
# label = attr
data = data.to(self.device)
label = label.to(self.device)
with torch.no_grad():
if self.args.dataset == 'cmnist':
z_l = model_l.extract(data)
z_b = model_b.extract(data)
else:
z_l, z_b = [], []
hook_fn = self.model_l.avgpool.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
hook_fn = self.model_b.avgpool.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_origin = torch.cat((z_l, z_b), dim=1)
if model == 'bias':
pred_label = model_b.fc(z_origin)
else:
pred_label = model_l.fc(z_origin)
pred = pred_label.data.max(1, keepdim=True)[1].squeeze(1)
correct = (pred == label).long()
total_correct += correct.sum()
total_num += correct.shape[0]
accs = total_correct/float(total_num)
model_b.train()
model_l.train()
return accs
def save_vanilla(self, step, best=None):
if best:
model_path = os.path.join(self.result_dir, "best_model.th")
else:
model_path = os.path.join(self.result_dir, "model_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_b.state_dict(),
'optimizer': self.optimizer_b.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
print(f'{step} model saved ...')
def save_ours(self, step, best=None):
if best:
model_path = os.path.join(self.result_dir, "best_model_l.th")
else:
model_path = os.path.join(self.result_dir, "model_l_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_l.state_dict(),
'optimizer': self.optimizer_l.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
if best:
model_path = os.path.join(self.result_dir, "best_model_b.th")
else:
model_path = os.path.join(self.result_dir, "model_b_{}.th".format(step))
state_dict = {
'steps': step,
'state_dict': self.model_b.state_dict(),
'optimizer': self.optimizer_b.state_dict(),
}
with open(model_path, "wb") as f:
torch.save(state_dict, f)
print(f'{step} model saved ...')
def board_vanilla_loss(self, step, loss_b):
if self.args.wandb:
wandb.log({
"loss_b_train": loss_b,
}, step=step,)
if self.args.tensorboard:
self.writer.add_scalar(f"loss/loss_b_train", loss_b, step)
def board_ours_loss(self, step, loss_dis_conflict, loss_dis_align, loss_swap_conflict, loss_swap_align, lambda_swap):
if self.args.wandb:
wandb.log({
"loss_dis_conflict": loss_dis_conflict,
"loss_dis_align": loss_dis_align,
"loss_swap_conflict": loss_swap_conflict,
"loss_swap_align": loss_swap_align,
"loss": (loss_dis_conflict + loss_dis_align) + lambda_swap * (loss_swap_conflict + loss_swap_align)
}, step=step,)
if self.args.tensorboard:
self.writer.add_scalar(f"loss/loss_dis_conflict", loss_dis_conflict, step)
self.writer.add_scalar(f"loss/loss_dis_align", loss_dis_align, step)
self.writer.add_scalar(f"loss/loss_swap_conflict", loss_swap_conflict, step)
self.writer.add_scalar(f"loss/loss_swap_align", loss_swap_align, step)
self.writer.add_scalar(f"loss/loss", (loss_dis_conflict + loss_dis_align) + lambda_swap * (loss_swap_conflict + loss_swap_align), step)
def board_vanilla_acc(self, step, epoch, inference=None):
valid_accs_b = self.evaluate(self.model_b, self.valid_loader)
test_accs_b = self.evaluate(self.model_b, self.test_loader)
print(f'epoch: {epoch}')
if valid_accs_b >= self.best_valid_acc_b:
self.best_valid_acc_b = valid_accs_b
if test_accs_b >= self.best_test_acc_b:
self.best_test_acc_b = test_accs_b
self.save_vanilla(step, best=True)
if self.args.wandb:
wandb.log({
"acc_b_valid": valid_accs_b,
"acc_b_test": test_accs_b,
},
step=step,)
wandb.log({
"best_acc_b_valid": self.best_valid_acc_b,
"best_acc_b_test": self.best_test_acc_b,
},
step=step, )
print(f'valid_b: {valid_accs_b} || test_b: {test_accs_b}')
if self.args.tensorboard:
self.writer.add_scalar(f"acc/acc_b_valid", valid_accs_b, step)
self.writer.add_scalar(f"acc/acc_b_test", test_accs_b, step)
self.writer.add_scalar(f"acc/best_acc_b_valid", self.best_valid_acc_b, step)
self.writer.add_scalar(f"acc/best_acc_b_test", self.best_test_acc_b, step)
def board_ours_acc(self, step, inference=None):
# check label network
valid_accs_d = self.evaluate_ours(self.model_b, self.model_l, self.valid_loader, model='label')
test_accs_d = self.evaluate_ours(self.model_b, self.model_l, self.test_loader, model='label')
if inference:
print(f'test acc: {test_accs_d.item()}')
import sys
sys.exit(0)
if valid_accs_d >= self.best_valid_acc_d:
self.best_valid_acc_d = valid_accs_d
if test_accs_d >= self.best_test_acc_d:
self.best_test_acc_d = test_accs_d
self.save_ours(step, best=True)
if self.args.wandb:
wandb.log({
"acc_d_valid": valid_accs_d,
"acc_d_test": test_accs_d,
},
step=step, )
wandb.log({
"best_acc_d_valid": self.best_valid_acc_d,
"best_acc_d_test": self.best_test_acc_d,
},
step=step, )
if self.args.tensorboard:
self.writer.add_scalar(f"acc/acc_d_valid", valid_accs_d, step)
self.writer.add_scalar(f"acc/acc_d_test", test_accs_d, step)
self.writer.add_scalar(f"acc/best_acc_d_valid", self.best_valid_acc_d, step)
self.writer.add_scalar(f"acc/best_acc_d_test", self.best_test_acc_d, step)
print(f'valid_d: {valid_accs_d} || test_d: {test_accs_d} ')
def concat_dummy(self, z):
def hook(model, input, output):
z.append(output.squeeze())
return torch.cat((output, torch.zeros_like(output)), dim=1)
return hook
def train_vanilla(self, args):
# training vanilla ...
train_iter = iter(self.train_loader)
train_num = len(self.train_dataset.dataset)
epoch, cnt = 0, 0
for step in tqdm(range(args.num_steps)):
try:
index, data, attr, _ = next(train_iter)
except:
train_iter = iter(self.train_loader)
index, data, attr, _ = next(train_iter)
data = data.to(self.device)
attr = attr.to(self.device)
label = attr[:, args.target_attr_idx]
logit_b = self.model_b(data)
loss_b_update = self.criterion(logit_b, label)
loss = loss_b_update.mean()
self.optimizer_b.zero_grad()
loss.backward()
self.optimizer_b.step()
##################################################
#################### LOGGING #####################
##################################################
if step % args.save_freq == 0:
self.save_vanilla(step)
if step % args.log_freq == 0:
self.board_vanilla_loss(step, loss_b=loss)
if step % args.valid_freq == 0:
self.board_vanilla_acc(step, epoch)
cnt += len(index)
if cnt == train_num:
print(f'finished epoch: {epoch}')
epoch += 1
cnt = 0
def train_ours(self, args):
epoch, cnt = 0, 0
print('************** main training starts... ************** ')
train_num = len(self.train_dataset)
# self.model_l : model for predicting intrinsic attributes ((E_i,C_i) in the main paper)
# self.model_l.fc: fc layer for predicting intrinsic attributes (C_i in the main paper)
# self.model_b : model for predicting bias attributes ((E_b, C_b) in the main paper)
# self.model_b.fc: fc layer for predicting bias attributes (C_b in the main paper)
if args.dataset == 'cmnist':
self.model_l = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
else:
if self.args.use_resnet20: # Use this option only for comparing with LfF
self.model_l = get_model('ResNet20_OURS', self.num_classes).to(self.device)
self.model_b = get_model('ResNet20_OURS', self.num_classes).to(self.device)
print('our resnet20....')
else:
self.model_l = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.optimizer_l = torch.optim.Adam(
self.model_l.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
self.optimizer_b = torch.optim.Adam(
self.model_b.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
)
if args.use_lr_decay:
self.scheduler_b = optim.lr_scheduler.StepLR(self.optimizer_b, step_size=args.lr_decay_step, gamma=args.lr_gamma)
self.scheduler_l = optim.lr_scheduler.StepLR(self.optimizer_l, step_size=args.lr_decay_step, gamma=args.lr_gamma)
self.bias_criterion = GeneralizedCELoss(q=0.7)
print(f'criterion: {self.criterion}')
print(f'bias criterion: {self.bias_criterion}')
train_iter = iter(self.train_loader)
for step in tqdm(range(args.num_steps)):
try:
index, data, attr, image_path = next(train_iter)
except:
train_iter = iter(self.train_loader)
index, data, attr, image_path = next(train_iter)
data = data.to(self.device)
attr = attr.to(self.device)
label = attr[:, args.target_attr_idx].to(self.device)
# Feature extraction
# Prediction by concatenating zero vectors (dummy vectors).
# We do not use the prediction here.
if args.dataset == 'cmnist':
z_l = self.model_l.extract(data)
z_b = self.model_b.extract(data)
else:
z_b = []
# Use this only for reproducing CIFARC10 of LfF
if self.args.use_resnet20:
hook_fn = self.model_b.layer3.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_l = []
hook_fn = self.model_l.layer3.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
else:
hook_fn = self.model_b.avgpool.register_forward_hook(self.concat_dummy(z_b))
_ = self.model_b(data)
hook_fn.remove()
z_b = z_b[0]
z_l = []
hook_fn = self.model_l.avgpool.register_forward_hook(self.concat_dummy(z_l))
_ = self.model_l(data)
hook_fn.remove()
z_l = z_l[0]
# z=[z_l, z_b]
# Gradients of z_b are not backpropagated to z_l (and vice versa) in order to guarantee disentanglement of representation.
z_conflict = torch.cat((z_l, z_b.detach()), dim=1)
z_align = torch.cat((z_l.detach(), z_b), dim=1)
# Prediction using z=[z_l, z_b]
pred_conflict = self.model_l.fc(z_conflict)
pred_align = self.model_b.fc(z_align)
loss_dis_conflict = self.criterion(pred_conflict, label).detach()
loss_dis_align = self.criterion(pred_align, label).detach()
# EMA sample loss
self.sample_loss_ema_d.update(loss_dis_conflict, index)
self.sample_loss_ema_b.update(loss_dis_align, index)
# class-wise normalize
loss_dis_conflict = self.sample_loss_ema_d.parameter[index].clone().detach()
loss_dis_align = self.sample_loss_ema_b.parameter[index].clone().detach()
loss_dis_conflict = loss_dis_conflict.to(self.device)
loss_dis_align = loss_dis_align.to(self.device)
for c in range(self.num_classes):
class_index = torch.where(label == c)[0].to(self.device)
max_loss_conflict = self.sample_loss_ema_d.max_loss(c)
max_loss_align = self.sample_loss_ema_b.max_loss(c)
loss_dis_conflict[class_index] /= max_loss_conflict
loss_dis_align[class_index] /= max_loss_align
loss_weight = loss_dis_align / (loss_dis_align + loss_dis_conflict + 1e-8) # Eq.1 (reweighting module) in the main paper
loss_dis_conflict = self.criterion(pred_conflict, label) * loss_weight.to(self.device) # Eq.2 W(z)CE(C_i(z),y)
loss_dis_align = self.bias_criterion(pred_align, label) # Eq.2 GCE(C_b(z),y)
# feature-level augmentation : augmentation after certain iteration (after representation is disentangled at a certain level)
if step > args.curr_step:
indices = np.random.permutation(z_b.size(0))
z_b_swap = z_b[indices] # z tilde
label_swap = label[indices] # y tilde
# Prediction using z_swap=[z_l, z_b tilde]
# Again, gradients of z_b tilde are not backpropagated to z_l (and vice versa) in order to guarantee disentanglement of representation.
z_mix_conflict = torch.cat((z_l, z_b_swap.detach()), dim=1)
z_mix_align = torch.cat((z_l.detach(), z_b_swap), dim=1)
# Prediction using z_swap
pred_mix_conflict = self.model_l.fc(z_mix_conflict)
pred_mix_align = self.model_b.fc(z_mix_align)
loss_swap_conflict = self.criterion(pred_mix_conflict, label) * loss_weight.to(self.device) # Eq.3 W(z)CE(C_i(z_swap),y)
loss_swap_align = self.bias_criterion(pred_mix_align, label_swap) # Eq.3 GCE(C_b(z_swap),y tilde)
lambda_swap = self.args.lambda_swap # Eq.3 lambda_swap_b
else:
# before feature-level augmentation
loss_swap_conflict = torch.tensor([0]).float()
loss_swap_align = torch.tensor([0]).float()
lambda_swap = 0
loss_dis = loss_dis_conflict.mean() + args.lambda_dis_align * loss_dis_align.mean() # Eq.2 L_dis
loss_swap = loss_swap_conflict.mean() + args.lambda_swap_align * loss_swap_align.mean() # Eq.3 L_swap
loss = loss_dis + lambda_swap * loss_swap # Eq.4 Total objective
self.optimizer_l.zero_grad()
self.optimizer_b.zero_grad()
loss.backward()
self.optimizer_l.step()
self.optimizer_b.step()
if step >= args.curr_step and args.use_lr_decay:
self.scheduler_b.step()
self.scheduler_l.step()
if args.use_lr_decay and step % args.lr_decay_step == 0:
print('******* learning rate decay .... ********')
print(f"self.optimizer_b lr: { self.optimizer_b.param_groups[-1]['lr']}")
print(f"self.optimizer_l lr: { self.optimizer_l.param_groups[-1]['lr']}")
if step % args.save_freq == 0:
self.save_ours(step)
if step % args.log_freq == 0:
bias_label = attr[:, 1]
align_flag = torch.where(label == bias_label)[0]
self.board_ours_loss(
step=step,
loss_dis_conflict=loss_dis_conflict.mean(),
loss_dis_align=args.lambda_dis_align * loss_dis_align.mean(),
loss_swap_conflict=loss_swap_conflict.mean(),
loss_swap_align=args.lambda_swap_align * loss_swap_align.mean(),
lambda_swap=lambda_swap
)
if step % args.valid_freq == 0:
self.board_ours_acc(step)
cnt += data.shape[0]
if cnt == train_num:
print(f'finished epoch: {epoch}')
epoch += 1
cnt = 0
def test_ours(self, args):
if args.dataset == 'cmnist':
self.model_l = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('mlp_DISENTANGLE', self.num_classes).to(self.device)
else:
self.model_l = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_b = get_model('resnet_DISENTANGLE', self.num_classes).to(self.device)
self.model_l.load_state_dict(torch.load(os.path.join(args.pretrained_path, 'best_model_l.th'))['state_dict'])
self.model_b.load_state_dict(torch.load(os.path.join(args.pretrained_path, 'best_model_b.th'))['state_dict'])
self.board_ours_acc(step=0, inference=True)
| 25,007 | 39.400646 | 161 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/util.py | '''Modified from https://github.com/alinlab/LfF/blob/master/util.py'''
import io
import torch
import numpy as np
import torch.nn as nn
class EMA:
def __init__(self, label, num_classes=None, alpha=0.9):
self.label = label.cuda()
self.alpha = alpha
self.parameter = torch.zeros(label.size(0))
self.updated = torch.zeros(label.size(0))
self.num_classes = num_classes
self.max = torch.zeros(self.num_classes).cuda()
def update(self, data, index, curve=None, iter_range=None, step=None):
self.parameter = self.parameter.to(data.device)
self.updated = self.updated.to(data.device)
index = index.to(data.device)
if curve is None:
self.parameter[index] = self.alpha * self.parameter[index] + (1 - self.alpha * self.updated[index]) * data
else:
alpha = curve ** -(step / iter_range)
self.parameter[index] = alpha * self.parameter[index] + (1 - alpha * self.updated[index]) * data
self.updated[index] = 1
def max_loss(self, label):
label_index = torch.where(self.label == label)[0]
return self.parameter[label_index].max()
| 1,178 | 35.84375 | 118 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/train.py | import numpy as np
import torch
import random
from learner import Learner
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)')
# training
parser.add_argument("--batch_size", help="batch_size", default=256, type=int)
parser.add_argument("--lr",help='learning rate',default=1e-3, type=float)
parser.add_argument("--weight_decay",help='weight_decay',default=0.0, type=float)
parser.add_argument("--momentum",help='momentum',default=0.9, type=float)
parser.add_argument("--num_workers", help="workers number", default=16, type=int)
parser.add_argument("--exp", help='experiment name', default='debugging', type=str)
parser.add_argument("--device", help="cuda or cpu", default='cuda', type=str)
parser.add_argument("--num_steps", help="# of iterations", default= 500 * 100, type=int)
parser.add_argument("--target_attr_idx", help="target_attr_idx", default= 0, type=int)
parser.add_argument("--bias_attr_idx", help="bias_attr_idx", default= 1, type=int)
parser.add_argument("--dataset", help="data to train, [cmnist, cifar10, bffhq]", default= 'cmnist', type=str)
parser.add_argument("--percent", help="percentage of conflict", default= "1pct", type=str)
parser.add_argument("--use_lr_decay", action='store_true', help="whether to use learning rate decay")
parser.add_argument("--lr_decay_step", help="learning rate decay steps", type=int, default=10000)
parser.add_argument("--q", help="GCE parameter q", type=float, default=0.7)
parser.add_argument("--lr_gamma", help="lr gamma", type=float, default=0.1)
parser.add_argument("--lambda_dis_align", help="lambda_dis in Eq.2", type=float, default=1.0)
parser.add_argument("--lambda_swap_align", help="lambda_swap_b in Eq.3", type=float, default=1.0)
parser.add_argument("--lambda_swap", help="lambda swap (lambda_swap in Eq.4)", type=float, default=1.0)
parser.add_argument("--ema_alpha", help="use weight mul", type=float, default=0.7)
parser.add_argument("--curr_step", help="curriculum steps", type=int, default= 0)
parser.add_argument("--use_type0", action='store_true', help="whether to use type 0 CIFAR10C")
parser.add_argument("--use_type1", action='store_true', help="whether to use type 1 CIFAR10C")
parser.add_argument("--use_resnet20", help="Use Resnet20", action="store_true") # ResNet 20 was used in Learning From Failure CifarC10 (We used ResNet18 in our paper)
parser.add_argument("--model", help="which network, [MLP, ResNet18, ResNet20, ResNet50]", default= 'MLP', type=str)
# logging
parser.add_argument("--log_dir", help='path for saving model', default='./log', type=str)
parser.add_argument("--data_dir", help='path for loading data', default='dataset', type=str)
parser.add_argument("--valid_freq", help='frequency to evaluate on valid/test set', default=500, type=int)
parser.add_argument("--log_freq", help='frequency to log on tensorboard', default=500, type=int)
parser.add_argument("--save_freq", help='frequency to save model checkpoint', default=1000, type=int)
parser.add_argument("--wandb", action="store_true", help="whether to use wandb")
parser.add_argument("--tensorboard", action="store_true", help="whether to use tensorboard")
# experiment
parser.add_argument("--train_ours", action="store_true", help="whether to train our method")
parser.add_argument("--train_vanilla", action="store_true", help="whether to train vanilla")
args = parser.parse_args()
# init learner
learner = Learner(args)
# actual training
print('Official Pytorch Code of "Learning Debiased Representation via Disentangled Feature Augmentation (NeurIPS 21 Oral)"')
print('Training starts ...')
if args.train_ours:
learner.train_ours(args)
elif args.train_vanilla:
learner.train_vanilla(args)
else:
print('choose one of the two options ...')
import sys
sys.exit(0)
| 4,084 | 59.970149 | 170 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/resnet.py | ''' From https://github.com/alinlab/LfF/blob/master/module/resnet.py '''
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.autograd import Variable
__all__ = [
"ResNet",
"resnet20",
"resnet32",
"resnet44",
"resnet56",
"resnet110",
"resnet1202",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(
3, 16, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
feat = out.view(out.size(0), -1)
return feat
def predict(self, x):
prediction = self.fc(x)
return prediction
def forward(self, x, mode=None):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = F.avg_pool2d(out, out.size()[3])
# out = out.view(out.size(0), -1)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
final_out = self.fc(out)
if mode == 'tsne' or mode == 'mixup':
return out, final_out
else:
return final_out
def resnet20(num_classes):
return ResNet(BasicBlock, [3, 3, 3], num_classes)
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(
lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters(),
)
)
),
)
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith("resnet"):
print(net_name)
test(globals()[net_name]())
print()
| 6,270 | 27.375566 | 78 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/mlp.py | ''' Modified from https://github.com/alinlab/LfF/blob/master/module/mlp.py'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class MLP_DISENTANGLE(nn.Module):
def __init__(self, num_classes = 10):
super(MLP_DISENTANGLE, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 16),
nn.ReLU()
)
self.fc = nn.Linear(32, num_classes)
def extract(self, x):
x = x.view(x.size(0), -1) / 255
feat = self.feature(x)
return feat
def predict(self, x):
prediction = self.classifier(x)
return prediction
def forward(self, x, mode=None, return_feat=False):
x = x.view(x.size(0), -1) / 255
feat = x = self.feature(x)
final_x = self.classifier(x)
if mode == 'tsne' or mode == 'mixup':
return x, final_x
else:
if return_feat:
return final_x, feat
else:
return final_x
class MLP(nn.Module):
def __init__(self, num_classes = 10):
super(MLP, self).__init__()
self.feature = nn.Sequential(
nn.Linear(3*28*28, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.ReLU(),
nn.Linear(100, 16),
nn.ReLU()
)
self.classifier = nn.Linear(16, num_classes)
def forward(self, x, mode=None, return_feat=False):
x = x.view(x.size(0), -1) / 255
feat = x = self.feature(x)
final_x = self.classifier(x)
if mode == 'tsne' or mode == 'mixup':
return x, final_x
else:
if return_feat:
return final_x, feat
else:
return final_x
class Noise_MLP(nn.Module):
def __init__(self, n_dim=16, n_layer=3):
super(Noise_MLP, self).__init__()
layers = []
for i in range(n_layer):
layers.append(nn.Linear(n_dim, n_dim))
layers.append(nn.LeakyReLU(0.2))
self.style = nn.Sequential(*layers)
def forward(self, z):
x = self.style(z)
return x
| 2,245 | 26.728395 | 77 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/loss.py | '''From https://github.com/alinlab/LfF/blob/master/module/loss.py'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class GeneralizedCELoss(nn.Module):
def __init__(self, q=0.7):
super(GeneralizedCELoss, self).__init__()
self.q = q
def forward(self, logits, targets):
p = F.softmax(logits, dim=1)
if np.isnan(p.mean().item()):
raise NameError('GCE_p')
Yg = torch.gather(p, 1, torch.unsqueeze(targets, 1))
# modify gradient of cross entropy
loss_weight = (Yg.squeeze().detach()**self.q)*self.q
if np.isnan(Yg.mean().item()):
raise NameError('GCE_Yg')
loss = F.cross_entropy(logits, targets, reduction='none') * loss_weight
return loss
| 813 | 28.071429 | 79 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/module/util.py | ''' Modified from https://github.com/alinlab/LfF/blob/master/module/util.py '''
import torch.nn as nn
from module.resnet import resnet20
from module.mlp import *
from torchvision.models import resnet18, resnet50
def get_model(model_tag, num_classes):
if model_tag == "ResNet20":
return resnet20(num_classes)
elif model_tag == "ResNet20_OURS":
model = resnet20(num_classes)
model.fc = nn.Linear(128, num_classes)
return model
elif model_tag == "ResNet18":
print('bringing no pretrained resnet18 ...')
model = resnet18(pretrained=False)
model.fc = nn.Linear(512, num_classes)
return model
elif model_tag == "MLP":
return MLP(num_classes=num_classes)
elif model_tag == "mlp_DISENTANGLE":
return MLP_DISENTANGLE(num_classes=num_classes)
elif model_tag == 'resnet_DISENTANGLE':
print('bringing no pretrained resnet18 disentangle...')
model = resnet18(pretrained=False)
model.fc = nn.Linear(1024, num_classes)
return model
else:
raise NotImplementedError
| 1,099 | 34.483871 | 79 | py |
Learning-Debiased-Disentangled | Learning-Debiased-Disentangled-master/data/util.py | '''Modified from https://github.com/alinlab/LfF/blob/master/data/util.py'''
import os
import torch
from torch.utils.data.dataset import Dataset, Subset
from torchvision import transforms as T
from glob import glob
from PIL import Image
class IdxDataset(Dataset):
def __init__(self, dataset):
self.dataset = dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
return (idx, *self.dataset[idx])
class ZippedDataset(Dataset):
def __init__(self, datasets):
super(ZippedDataset, self).__init__()
self.dataset_sizes = [len(d) for d in datasets]
self.datasets = datasets
def __len__(self):
return max(self.dataset_sizes)
def __getitem__(self, idx):
items = []
for dataset_idx, dataset_size in enumerate(self.dataset_sizes):
items.append(self.datasets[dataset_idx][idx % dataset_size])
item = [torch.stack(tensors, dim=0) for tensors in zip(*items)]
return item
class CMNISTDataset(Dataset):
def __init__(self,root,split,transform=None, image_path_list=None):
super(CMNISTDataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(root,split,"*"))
elif split=='test':
self.data = glob(os.path.join(root, '../test',"*","*"))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor([int(self.data[index].split('_')[-2]),int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
class CIFAR10Dataset(Dataset):
def __init__(self, root, split, transform=None, image_path_list=None, use_type0=None, use_type1=None):
super(CIFAR10Dataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(root,split,"*", "*"))
elif split=='test':
self.data = glob(os.path.join(root, '../test',"*","*"))
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor(
[int(self.data[index].split('_')[-2]), int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
class bFFHQDataset(Dataset):
def __init__(self, root, split, transform=None, image_path_list=None):
super(bFFHQDataset, self).__init__()
self.transform = transform
self.root = root
self.image2pseudo = {}
self.image_path_list = image_path_list
if split=='train':
self.align = glob(os.path.join(root, 'align',"*","*"))
self.conflict = glob(os.path.join(root, 'conflict',"*","*"))
self.data = self.align + self.conflict
elif split=='valid':
self.data = glob(os.path.join(os.path.dirname(root), split, "*"))
elif split=='test':
self.data = glob(os.path.join(os.path.dirname(root), split, "*"))
data_conflict = []
for path in self.data:
target_label = path.split('/')[-1].split('.')[0].split('_')[1]
bias_label = path.split('/')[-1].split('.')[0].split('_')[2]
if target_label != bias_label:
data_conflict.append(path)
self.data = data_conflict
def __len__(self):
return len(self.data)
def __getitem__(self, index):
attr = torch.LongTensor(
[int(self.data[index].split('_')[-2]), int(self.data[index].split('_')[-1].split('.')[0])])
image = Image.open(self.data[index]).convert('RGB')
if self.transform is not None:
image = self.transform(image)
return image, attr, self.data[index]
transforms = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([T.Resize((224,224)), T.ToTensor()]),
"valid": T.Compose([T.Resize((224,224)), T.ToTensor()]),
"test": T.Compose([T.Resize((224,224)), T.ToTensor()])
},
"cifar10c": {
"train": T.Compose([T.ToTensor(),]),
"valid": T.Compose([T.ToTensor(),]),
"test": T.Compose([T.ToTensor(),]),
},
}
transforms_preprcs = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([
T.Resize((224,224)),
T.RandomCrop(224, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
},
"cifar10c": {
"train": T.Compose(
[
T.RandomCrop(32, padding=4),
# T.RandomResizedCrop(32),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
}
transforms_preprcs_ae = {
"cmnist": {
"train": T.Compose([T.ToTensor()]),
"valid": T.Compose([T.ToTensor()]),
"test": T.Compose([T.ToTensor()])
},
"bffhq": {
"train": T.Compose([
T.Resize((224,224)),
T.RandomCrop(224, padding=4),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose([
T.Resize((224,224)),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
},
"cifar10c": {
"train": T.Compose(
[
# T.RandomCrop(32, padding=4),
T.RandomResizedCrop(32),
T.RandomHorizontalFlip(),
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"valid": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
"test": T.Compose(
[
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
),
},
}
def get_dataset(dataset, data_dir, dataset_split, transform_split, percent, use_preprocess=None, image_path_list=None, use_type0=None, use_type1=None):
dataset_category = dataset.split("-")[0]
if use_preprocess:
transform = transforms_preprcs[dataset_category][transform_split]
else:
transform = transforms[dataset_category][transform_split]
dataset_split = "valid" if (dataset_split == "eval") else dataset_split
if dataset == 'cmnist':
root = data_dir + f"/cmnist/{percent}"
dataset = CMNISTDataset(root=root,split=dataset_split,transform=transform, image_path_list=image_path_list)
elif 'cifar10c' in dataset:
# if use_type0:
# root = data_dir + f"/cifar10c_0805_type0/{percent}"
# elif use_type1:
# root = data_dir + f"/cifar10c_0805_type1/{percent}"
# else:
root = data_dir + f"/cifar10c/{percent}"
dataset = CIFAR10Dataset(root=root, split=dataset_split, transform=transform, image_path_list=image_path_list, use_type0=use_type0, use_type1=use_type1)
elif dataset == "bffhq":
root = data_dir + f"/bffhq/{percent}"
dataset = bFFHQDataset(root=root, split=dataset_split, transform=transform, image_path_list=image_path_list)
else:
print('wrong dataset ...')
import sys
sys.exit(0)
return dataset
| 9,788 | 31.73913 | 160 | py |
fast-dpsgd | fast-dpsgd-main/opacusdp.py | '''
Opacus experiments for all the models
'''
import time
import torch
import torch.nn.functional as F
from opacus import PrivacyEngine
from opacus.layers import DPLSTM
from torch import nn, optim
import data
import utils
from pytorch import get_data, model_dict
class LSTMNet(nn.Module):
def __init__(self, vocab_size: int, batch_size):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 100)
self.h_init = torch.randn(1, batch_size, 100).cuda()
self.c_init = torch.randn(1, batch_size, 100).cuda()
self.hidden = (self.h_init, self.c_init)
self.lstm = DPLSTM(100, 100, batch_first=True)
self.fc1 = nn.Linear(100, 2)
def forward(self, x):
x = self.emb(x) # batch_size, seq_len, embed_dim
# x has to be of shape [batch_size, seq_len, input_dim]
x, _ = self.lstm(x, self.hidden) # batch_size, seq_len, lstm_dim
x = x.mean(1) # batch_size, lstm_dim
x = self.fc1(x) # batch_size, fc_dim
return x
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
mdict = model_dict.copy()
mdict['lstm'] = LSTMNet
train_data, train_labels = get_data(args)
model = mdict[args.experiment](vocab_size=args.max_features, batch_size=args.batch_size).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
privacy_engine = PrivacyEngine(
model,
batch_size=args.batch_size,
sample_size=len(train_data),
alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
noise_multiplier=args.sigma,
max_grad_norm=args.max_per_sample_grad_norm,
)
privacy_engine.attach(optimizer)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
torch.cuda.synchronize()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if args.dpsgd:
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(args.delta)
print(f"Train Epoch: {epoch} \t"
# f"Loss: {np.mean(losses):.6f} "
f"(ε = {epsilon:.2f}, δ = {args.delta}) for α = {best_alpha}")
else:
print(f"Train Epoch: {epoch} \t Loss: {np.mean(losses):.6f}")
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument(
"--sigma",
type=float,
default=1.0,
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
help="Target delta (default: 1e-5)",
)
args = parser.parse_args()
main(args)
| 3,656 | 31.078947 | 99 | py |
fast-dpsgd | fast-dpsgd-main/runtime_experiment.py | import argparse
import pprint
import subprocess
from utils import pr_green, pr_red
def launch(expt, batch_size, epochs):
"""Runs expt at batch_size for all the scripts"""
errors = []
# yapf: disable
cmds = [
('jax', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('tf1', f'CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('pytorch', f'CUDA_VISIBLE_DEVICES=0 python pytorch.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size}'),
('jaxdp', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2dp', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('tf1dp', f'CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla'),
('opacusdp', f'CUDA_VISIBLE_DEVICES=0 python opacusdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('backpackdp', f'CUDA_VISIBLE_DEVICES=0 python backpackdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('pyvacydp', f'CUDA_VISIBLE_DEVICES=0 python pyvacydp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('owkindp', f'CUDA_VISIBLE_DEVICES=0 python owkindp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size}'),
('tf2xla', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf2dpxla', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf1xla', f'TF_XLA_FLAGS=--tf_xla_auto_jit=2 CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --no_dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
('tf1dpxla', f'TF_XLA_FLAGS=--tf_xla_auto_jit=2 CUDA_VISIBLE_DEVICES=0 python tf1dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla'),
# For Ablations:
('jaxdp_nv', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_vmap'),
# Outside of JIT compilation, the dynamic_unroll's LSTM (using scan) is faster than the static_unroll'd version.
('jaxdp_nj', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_jit --dynamic_unroll'),
('jaxdp_nvj', f'CUDA_VISIBLE_DEVICES=0 python jaxdp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_vmap --no_jit --dynamic_unroll'),
('tf2dp_nv', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla --no_vmap'),
('tf2dp_nvj', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --no_xla --no_vmap --no_jit'),
('tf2dpxla_nv', f'CUDA_VISIBLE_DEVICES=0 python tf2dp.py {expt} --dpsgd --epochs {epochs} --batch_size {batch_size} --xla --no_vmap'),
]
# yapf: enable
for name, cmd in cmds:
if expt == 'lstm':
if 'jax' in name:
# Due to https://github.com/deepmind/dm-haiku/issues/77, we disable
# omnistaging when running the LSTM in JAX (it will fail to compile).
cmd = 'JAX_OMNISTAGING=0 ' + cmd
if name in ('tf1', 'tf2', 'tf1xla', 'tf2dp_nv'):
# The dynamically unrolled LSTM uses the cudNN LSTM implementation
# in the non-vectorized_map case, making it faster.
cmd = cmd + ' --no_unroll'
pr_green(f'Starting {name}: {cmd}')
out = subprocess.run([cmd],
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
print(out.stdout)
if out.returncode != 0:
errors.append(name)
pr_red(out.stderr)
print()
pr_red(f'Done {name}: {cmd} \n')
else:
pr_green(f'Done {name}: {cmd} \n')
pr_green(f'Done {expt} at batch size {batch_size}.')
return errors
def main(args):
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(args)
failed = {}
for expt in args.experiments:
for bs in args.batch_sizes:
failed[(expt, bs)] = launch(expt, bs, args.epochs)
pr_red('\nFailed Experiments: \n')
pp.pprint(failed)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Returns Max Batch Size before OOM')
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--experiments',
default=['logreg', 'ffnn', 'mnist', 'embed', 'lstm', 'cifar10'],
nargs='+')
parser.add_argument('--batch_sizes', default=[256, 128, 64, 32, 16], nargs='+', type=int)
args = parser.parse_args()
main(args)
| 5,190 | 59.360465 | 163 | py |
fast-dpsgd | fast-dpsgd-main/pytorch.py | '''
Model file and non-differentially private file
'''
import time
import torch
import torch.nn.functional as F
from torch import nn, optim
import data
import utils
class EmbeddingNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 16)
self.fc1 = nn.Linear(16, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.mean(1) # batch_size, embed_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class LSTMNet(nn.Module):
def __init__(self, vocab_size: int, **_):
super().__init__()
# Embedding dimension: vocab_size + <unk>, <pad>, <eos>, <sos>
self.emb = nn.Embedding(vocab_size + 4, 100)
self.lstm = nn.LSTM(100, 100)
self.fc1 = nn.Linear(100, 2)
def forward(self, x):
# x: batch_size, seq_len
x = self.emb(x) # batch_size, seq_len, embed_dim
x = x.transpose(0, 1) # seq_len, batch_size, embed_dim
x, _ = self.lstm(x) # seq_len, batch_size, lstm_dim
x = x.mean(0) # batch_size, lstm_dim
x = self.fc1(x) # batch_size, fc_dim
return x
class MNISTNet(nn.Module):
def __init__(self, **_):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
class FFNN(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 50)
self.fc2 = nn.Linear(50, 2)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
class Logistic(nn.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = nn.Linear(104, 1)
def forward(self, x):
out = self.fc1(x)
out = F.sigmoid(out)
return out
class CIFAR10Model(nn.Module):
def __init__(self, **_):
super().__init__()
self.layer_list = nn.ModuleList([
nn.Sequential(nn.Conv2d(3, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(32, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(32, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(64, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(64, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Sequential(nn.Conv2d(128, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
nn.Sequential(nn.Conv2d(128, 256, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.Conv2d(256, 10, (3, 3), padding=1, stride=(1, 1)),
])
def forward(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return torch.mean(x, dim=(2, 3))
model_dict = {
'mnist': MNISTNet,
'lstm': LSTMNet,
'embed': EmbeddingNet,
'ffnn': FFNN,
'logreg': Logistic,
'cifar10': CIFAR10Model,
}
def get_data(args):
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NCHW',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
train_data, _ = data_fn(**kwargs)
for d in train_data: # train_data, train_labels
d = torch.from_numpy(d)
if d.dtype == torch.int32:
d = d.long()
if args.experiment == 'logreg' and d.dtype != torch.float32:
d = d.float()
yield d
def main(args):
print(args)
assert not args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
args = parser.parse_args()
main(args)
| 5,651 | 30.4 | 92 | py |
fast-dpsgd | fast-dpsgd-main/utils.py | import argparse
import pickle
def get_parser(experiments):
parser = argparse.ArgumentParser()
parser.add_argument('experiment', choices=experiments)
parser.add_argument('--dpsgd', dest='dpsgd', action='store_true')
parser.add_argument('--no_dpsgd', dest='dpsgd', action='store_false')
parser.add_argument('--learning_rate', default=0.15, type=float)
parser.add_argument('--noise_multiplier', default=1.1, type=float)
parser.add_argument('--l2_norm_clip', default=1.0, type=float)
parser.add_argument('--batch_size', default=100, type=int)
parser.add_argument('--microbatches', default=None)
parser.add_argument('--epochs', default=60, type=int)
parser.add_argument('--dummy_data', default=False, action='store_true')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--no_save', dest='no_save', action='store_true')
# imdb specific
parser.add_argument('--max_features', default=10_000, type=int)
parser.add_argument('--max_len', default=256, type=int)
return parser
def save_runtimes(filename, args, timings, append_to_name=''):
d = {'args': args, 'timings': timings}
pickle_name = f'{filename}_{args.experiment}_bs_{args.batch_size}_priv_{args.dpsgd}'
if hasattr(args, 'use_xla'):
pickle_name += f'_xla_{args.use_xla}'
pickle_name += append_to_name
full_path = './results/raw/' + pickle_name + '.pkl'
print('Saving to: ', full_path)
with open(full_path, 'wb') as handle:
pickle.dump(d, handle, protocol=pickle.HIGHEST_PROTOCOL)
def pr_red(text):
print("\033[91m{}\033[00m".format(text))
def pr_green(text):
print("\033[92m{}\033[00m".format(text))
| 1,703 | 36.043478 | 88 | py |
fast-dpsgd | fast-dpsgd-main/data.py | import numpy as np
import tensorflow as tf
from keras.preprocessing import sequence
def dataloader(x, y, batch_size):
if batch_size > len(x):
raise ValueError('Batch Size too big.')
num_eg = len(x)
assert num_eg == len(y)
for i in range(0, num_eg, batch_size):
yield x[i:i + batch_size], y[i:i + batch_size]
def load_cifar10(format='NHWC', **_):
train, test = tf.keras.datasets.cifar10.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.asarray(train_data, dtype=np.float32) / 255.
test_data = np.asarray(test_data, dtype=np.float32) / 255.
if format == 'NHWC':
pass
elif format == 'NCHW':
train_data = train_data.transpose((0, 3, 1, 2))
test_data = test_data.transpose((0, 3, 1, 2))
else:
raise ValueError('Invalid format.')
train_labels = np.asarray(train_labels, dtype=np.int32).squeeze()
test_labels = np.asarray(test_labels, dtype=np.int32).squeeze()
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_cifar10(num_examples, format='NHWC', **_):
train_labels = np.random.randint(0, 10, num_examples).astype(np.int32)
if format == 'NHWC':
train_data = np.random.random((num_examples, 32, 32, 3)).astype(np.float32)
elif format == 'NCHW':
train_data = np.random.random((num_examples, 3, 32, 32)).astype(np.float32)
else:
raise ValueError('Invalid format.')
return (train_data, train_labels), (train_data, train_labels)
def load_mnist(format='NHWC', **_):
"""Loads MNIST and preprocesses to combine training and validation data."""
train, test = tf.keras.datasets.mnist.load_data()
train_data, train_labels = train
test_data, test_labels = test
train_data = np.asarray(train_data, dtype=np.float32) / 255.
test_data = np.asarray(test_data, dtype=np.float32) / 255.
if format == 'NHWC':
train_data, test_data = train_data[..., None], test_data[..., None]
elif format == 'NCHW':
train_data, test_data = train_data[:, None], test_data[:, None]
else:
raise ValueError('Invalid format.')
train_labels = np.asarray(train_labels, dtype=np.int32)
test_labels = np.asarray(test_labels, dtype=np.int32)
assert train_data.min() == 0.
assert train_data.max() == 1.
assert test_data.min() == 0.
assert test_data.max() == 1.
assert train_labels.ndim == 1
assert test_labels.ndim == 1
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_mnist(num_examples, format='NHWC', **_):
train_data = np.random.random((num_examples, 28, 28)).astype(np.float32)
train_labels = np.random.randint(0, 10, num_examples).astype(np.int32)
if format == 'NHWC':
train_data = train_data[..., None]
elif format == 'NCHW':
train_data = train_data[:, None]
else:
raise ValueError('Invalid format.')
return (train_data, train_labels), (train_data, train_labels)
def load_imdb(max_features=10_000, max_len=256, **_):
"""Load IMDB movie reviews data."""
train, test = tf.keras.datasets.imdb.load_data(num_words=max_features)
(train_data, train_labels), (test_data, test_labels) = train, test
train_data = sequence.pad_sequences(train_data, maxlen=max_len).astype(np.int32)
test_data = sequence.pad_sequences(test_data, maxlen=max_len).astype(np.int32)
train_labels, test_labels = train_labels.astype(np.int32), test_labels.astype(np.int32)
return (train_data, train_labels), (test_data, test_labels)
def load_dummy_imdb(num_examples, max_features=10_000, max_len=256, **_):
train_data = np.random.randint(0, max_features, (num_examples, max_len)).astype(np.int32)
train_labels = np.random.random(num_examples).round().astype(np.int32)
return (train_data, train_labels), (train_data, train_labels)
def load_adult(**_):
"""Loads ADULT a2a as in LIBSVM and preprocesses to combine training and validation data."""
# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
data_x = np.load('adult_processed_x.npy')
data_y = np.load('adult_processed_y.npy')
data_y[data_y == -1] = 0
train_data = data_x.astype(np.float32)
train_labels = data_y.astype(np.int32)
return (train_data, train_labels), None
def load_dummy_adult(num_examples, **_):
train_data = np.random.random((num_examples, 104)).astype(np.float32)
train_labels = np.random.random(num_examples).round().astype(np.int32)
return (train_data, train_labels), None
data_fn_dict = {
'mnist': (load_mnist, load_dummy_mnist),
'lstm': (load_imdb, load_dummy_imdb),
'embed': (load_imdb, load_dummy_imdb),
'ffnn': (load_adult, load_dummy_adult),
'logreg': (load_adult, load_dummy_adult),
'cifar10': (load_cifar10, load_dummy_cifar10),
}
if __name__ == '__main__':
# Test Functionality
names = ['mnist', 'imdb', 'adult', 'cifar10']
data_fns = [load_mnist, load_imdb, load_adult, load_cifar10]
dummy_data_fns = [load_dummy_mnist, load_dummy_imdb, load_dummy_adult, load_dummy_cifar10]
for name, data_fn, dummy_data_fn in zip(names, data_fns, dummy_data_fns):
print(f'Checking {name}')
(x, y), _ = data_fn()
(dx, dy), _ = dummy_data_fn(x.shape[0])
assert x.shape == dx.shape, f'Original: {x.shape}, Dummy: {dx.shape}'
assert y.shape == dy.shape, f'Original: {y.shape}, Dummy: {dy.shape}'
assert x.dtype == dx.dtype, f'Original: {x.dtype}, Dummy: {dx.dtype}'
assert y.dtype == dy.dtype, f'Original: {y.dtype}, Dummy: {dy.dtype}'
| 5,648 | 36.410596 | 96 | py |
fast-dpsgd | fast-dpsgd-main/jaxdp.py | '''
Code for JAX implementations presented in: Enabling Fast
Differentially Private SGD via Just-in-Time Compilation and Vectorization
'''
import itertools
import time
from functools import partial
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from jax import grad, jit, random, vmap
from jax.experimental import optimizers, stax
from jax.lib import pytree
from jax.tree_util import tree_flatten, tree_multimap, tree_unflatten
from keras.utils.np_utils import to_categorical
from tensorflow_privacy.privacy.analysis.rdp_accountant import (compute_rdp, get_privacy_spent)
import data
import utils
def logistic_model(features, **_):
return hk.Sequential([hk.Linear(1), jax.nn.sigmoid])(features)
def ffnn_model(features, **_):
return hk.Sequential([hk.Linear(50), jax.nn.relu, hk.Linear(2)])(features)
def mnist_model(features, **_):
return hk.Sequential([
hk.Conv2D(16, (8, 8), padding='SAME', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Conv2D(32, (4, 4), padding='VALID', stride=(2, 2)),
jax.nn.relu,
hk.MaxPool(2, 1, padding='VALID'), # matches stax
hk.Flatten(),
hk.Linear(32),
jax.nn.relu,
hk.Linear(10),
])(features)
def lstm_model(x, vocab_size=10_000, seq_len=256, args=None, **_):
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 100, w_init=embed_init)
o2 = token_embedding_map(x)
o2 = jnp.reshape(o2, (o2.shape[1], o2.shape[0], o2.shape[2]))
# LSTM Part of Network
core = hk.LSTM(100)
if args and args.dynamic_unroll:
outs, state = hk.dynamic_unroll(core, o2, core.initial_state(x.shape[0]))
else:
outs, state = hk.static_unroll(core, o2, core.initial_state(x.shape[0]))
outs = outs.reshape(outs.shape[1], outs.shape[0], outs.shape[2])
# Avg Pool -> Linear
red_dim_outs = hk.avg_pool(outs, seq_len, seq_len, "SAME").squeeze()
final_layer = hk.Linear(2)
ret = final_layer(red_dim_outs)
return ret
def embedding_model(arr, vocab_size=10_000, seq_len=256, **_):
# embedding part of network
x = arr
embed_init = hk.initializers.TruncatedNormal(stddev=0.02)
token_embedding_map = hk.Embed(vocab_size + 4, 16, w_init=embed_init)
o2 = token_embedding_map(x)
# avg pool -> linear
o3 = hk.avg_pool(o2, seq_len, seq_len, "SAME").squeeze()
fcnn = hk.Sequential([hk.Linear(16), jax.nn.relu, hk.Linear(2)])
return fcnn(o3)
def cifar_model(features, **_):
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(features)
out = jax.nn.relu(out)
out = hk.Conv2D(32, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(64, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(128, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.AvgPool(2, strides=2, padding='VALID')(out)
out = hk.Conv2D(256, (3, 3), padding='SAME', stride=(1, 1))(out)
out = jax.nn.relu(out)
out = hk.Conv2D(10, (3, 3), padding='SAME', stride=(1, 1))(out)
return out.mean((1, 2))
def multiclass_loss(model, params, batch):
inputs, targets = batch
logits = model.apply(params, None, inputs)
# convert the outputs to one hot shape according to the same shape as
# logits for vectorized dot product
one_hot = jax.nn.one_hot(targets, logits.shape[-1])
logits = stax.logsoftmax(logits) # log normalize
return -jnp.mean(jnp.sum(logits * one_hot, axis=-1)) # cross entropy loss
def logistic_loss(model, params, batch):
inputs, targets = batch[0], batch[1]
# have to always supply the RNG field
logits = model.apply(params, None, inputs)
logits = jnp.reshape(logits, -1) # needs to be only scalar per index
# max_val is required for numerical stability
max_val = jnp.clip(logits, 0, None)
loss = jnp.mean(logits - logits * targets + max_val +
jnp.log(jnp.exp(-max_val) + jnp.exp((-logits - max_val))))
return loss
def accuracy(model, params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=1)
predicted_class = jnp.argmax(model.apply(params, None, inputs), axis=1)
return jnp.mean(predicted_class == target_class)
def clipped_grad(model, loss, params, l2_norm_clip, single_example_batch):
"""Evaluate gradient for a single-example batch and clip its grad norm."""
grads = grad(partial(loss, model))(params, single_example_batch)
nonempty_grads, tree_def = tree_flatten(grads)
total_grad_norm = jnp.linalg.norm([jnp.linalg.norm(neg.ravel()) for neg in nonempty_grads])
divisor = jnp.maximum(total_grad_norm / l2_norm_clip, 1.)
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_unflatten(tree_def, normalized_nonempty_grads)
def private_grad(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier, batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = vmap(partial(clipped_grad, model, loss), (None, None, 0))(params, l2_norm_clip,
batch)
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
def private_grad_no_vmap(model, loss, params, batch, rng, l2_norm_clip, noise_multiplier,
batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
clipped_grads = tree_multimap(
lambda *xs: jnp.stack(xs),
*(clipped_grad(model, loss, params, l2_norm_clip, eg) for eg in zip(*batch)))
clipped_grads_flat, grads_treedef = tree_flatten(clipped_grads)
aggregated_clipped_grads = [g.sum(0) for g in clipped_grads_flat]
rngs = random.split(rng, len(aggregated_clipped_grads))
noised_aggregated_clipped_grads = [
g + l2_norm_clip * noise_multiplier * random.normal(r, g.shape)
for r, g in zip(rngs, aggregated_clipped_grads)
]
normalized_noised_aggregated_clipped_grads = [
g / batch_size for g in noised_aggregated_clipped_grads
]
return tree_unflatten(grads_treedef, normalized_noised_aggregated_clipped_grads)
model_dict = {
'mnist': mnist_model,
'lstm': lstm_model,
'embed': embedding_model,
'ffnn': ffnn_model,
'logreg': logistic_model,
'cifar10': cifar_model,
}
def main(args):
print(args)
if args.microbatches:
raise NotImplementedError('Microbatches < batch size not currently supported')
if args.experiment == 'lstm' and args.no_jit:
raise ValueError('LSTM with no JIT will fail.')
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
# train_labels, test_labels = to_categorical(train_labels), to_categorical(
# test_labels)
num_train = train_data.shape[0]
num_complete_batches, leftover = divmod(num_train, args.batch_size)
num_batches = num_complete_batches + bool(leftover)
key = random.PRNGKey(args.seed)
model = hk.transform(
partial(model_dict[args.experiment],
args=args,
vocab_size=args.max_features,
seq_len=args.max_len))
rng = jax.random.PRNGKey(42)
init_params = model.init(key, train_data[:args.batch_size])
opt_init, opt_update, get_params = optimizers.sgd(args.learning_rate)
loss = logistic_loss if args.experiment == 'logreg' else multiclass_loss
if args.dpsgd:
train_data, train_labels = train_data[:, None], train_labels[:, None]
# regular update -- non-private
def update(_, i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(partial(loss, model))(params, batch), opt_state)
grad_fn = private_grad_no_vmap if args.no_vmap else private_grad
# differentially private update
def private_update(rng, i, opt_state, batch):
params = get_params(opt_state)
rng = random.fold_in(rng, i) # get new key for new random numbers
return opt_update(
i,
grad_fn(model, loss, params, batch, rng, args.l2_norm_clip, args.noise_multiplier,
args.batch_size), opt_state)
opt_state = opt_init(init_params)
itercount = itertools.count()
train_fn = private_update if args.dpsgd else update
if args.no_vmap:
print('No vmap for dpsgd!')
if not args.no_jit:
train_fn = jit(train_fn)
else:
print('No jit!')
dummy = jnp.array(1.)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
for i, batch in enumerate(data.dataloader(train_data, train_labels, args.batch_size)):
opt_state = train_fn(
key,
next(itercount),
opt_state,
batch,
)
(dummy * dummy).block_until_ready() # synchronize CUDA.
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
print('Trained with DP SGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
append_to_name = ''
if args.no_jit: append_to_name += '_nojit'
if args.no_vmap: append_to_name += '_novmap'
utils.save_runtimes(__file__.split('.')[0], args, timings, append_to_name)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--no_vmap', dest='no_vmap', action='store_true')
parser.add_argument('--no_jit', dest='no_jit', action='store_true')
parser.add_argument('--dynamic_unroll', dest='dynamic_unroll', action='store_true')
args = parser.parse_args()
main(args)
| 11,056 | 35.734219 | 99 | py |
fast-dpsgd | fast-dpsgd-main/tf1dp.py | """Based on: https://github.com/tensorflow/privacy/blob/master/tutorials/mnist_dpsgd_tutorial_vectorized.py"""
import os
import time
from functools import partial
import tensorflow.compat.v1 as tf
from tensorflow_privacy.privacy.analysis.gdp_accountant import (compute_eps_poisson,
compute_mu_poisson)
from tensorflow_privacy.privacy.analysis.rdp_accountant import (compute_rdp, get_privacy_spent)
from tensorflow_privacy.privacy.optimizers import dp_optimizer_vectorized
import data
import utils
from tf2dp import model_dict
def nn_model_fn(model, loss_fn, args, features, labels, mode):
# the model has to be created inside the estimator function to be on the right graph.
logits = model()(features['x'])
vector_loss = loss_fn(labels=labels, logits=logits)
scalar_loss = tf.reduce_mean(vector_loss)
if mode == tf.estimator.ModeKeys.TRAIN:
if args.dpsgd:
# Use DP version of GradientDescentOptimizer. Other optimizers are
# available in dp_optimizer. Most optimizers inheriting from
# tf.train.Optimizer should be wrappable in differentially private
# counterparts by calling dp_optimizer.optimizer_from_args().
optimizer = dp_optimizer_vectorized.VectorizedDPSGD(
l2_norm_clip=args.l2_norm_clip,
noise_multiplier=args.noise_multiplier,
num_microbatches=args.microbatches,
learning_rate=args.learning_rate)
opt_loss = vector_loss
else:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
opt_loss = scalar_loss
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss=opt_loss, global_step=global_step)
# In the following, we pass the mean of the loss (scalar_loss) rather than
# the vector_loss because tf.estimator requires a scalar loss. This is only
# used for evaluation and debugging by tf.estimator. The actual loss being
# minimized is opt_loss defined above and passed to optimizer.minimize().
return tf.estimator.EstimatorSpec(mode=mode, loss=scalar_loss, train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
# This branch is unused (but kept from the TFP tutorial.)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels,
predictions=tf.argmax(input=logits, axis=1))
}
return tf.estimator.EstimatorSpec(mode=mode,
loss=scalar_loss,
eval_metric_ops=eval_metric_ops)
def compute_epsilon(epoch, num_train_eg, args):
"""Computes epsilon value for given hyperparameters."""
steps = epoch * num_train_eg // args.batch_size
if args.noise_multiplier == 0.0:
return float('inf')
orders = [1 + x / 10. for x in range(1, 100)] + list(range(12, 64))
sampling_probability = args.batch_size / num_train_eg
rdp = compute_rdp(q=sampling_probability,
noise_multiplier=args.noise_multiplier,
steps=steps,
orders=orders)
# Delta is set to approximate 1 / (number of training points).
return get_privacy_spent(orders, rdp, target_delta=1e-5)[0]
def main(args):
print(args)
tf.disable_eager_execution() # TFP is designed to run in TF1 graph mode.
if args.memory_limit: # Option to limit GPU memory
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=args.memory_limit)])
assert args.microbatches is None # vectorized TFP only supports microbatches=1
args.microbatches = args.batch_size
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
num_train_eg = train_data.shape[0]
loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits
if args.experiment == 'logreg':
loss_fn = lambda labels, logits: tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=tf.squeeze(logits))
train_labels = train_labels.astype('float32')
model = partial(model_dict[args.experiment],
features=train_data,
max_features=args.max_features,
args=args)
if args.use_xla:
# Setting both the environment flag and session_config is redundant, but
# we do this just in case.
assert os.environ['TF_XLA_FLAGS'] == '--tf_xla_auto_jit=2'
session_config = tf.ConfigProto()
session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_2
run_config = tf.estimator.RunConfig(session_config=session_config)
print('Using XLA!')
else:
run_config = None
print('NOT using XLA!')
model_obj = tf.estimator.Estimator(model_fn=partial(nn_model_fn, model, loss_fn, args),
config=run_config)
train_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': train_data},
y=train_labels,
batch_size=args.batch_size,
num_epochs=args.epochs,
shuffle=True)
steps_per_epoch = num_train_eg // args.batch_size
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
model_obj.train(input_fn=train_input_fn, steps=steps_per_epoch)
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
# eps = compute_epsilon(epoch, num_train_eg, args)
# print('For delta=1e-5, the current epsilon is: %.2f' % eps)
print('Trained with DPSGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--memory_limit', default=None, type=int)
parser.add_argument('--xla', dest='use_xla', action='store_true')
parser.add_argument('--no_xla', dest='use_xla', action='store_false')
parser.add_argument('--no_unroll', dest='no_unroll', action='store_true')
args = parser.parse_args()
main(args)
| 7,118 | 43.49375 | 110 | py |
fast-dpsgd | fast-dpsgd-main/tf2dp.py | import time
from functools import partial
import tensorflow as tf
from tensorflow_privacy.privacy.analysis.gdp_accountant import (compute_eps_poisson,
compute_mu_poisson)
from jax.tree_util import tree_multimap
import data
import utils
def get_logreg_model(features, batch_size=None, **_):
return tf.keras.Sequential(
[tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Dense(1)])
def get_ffnn_model(features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Dense(50, activation='relu'),
tf.keras.layers.Dense(2, activation='relu')
])
def get_mnist_model(features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Conv2D(16, 8, strides=2, padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Conv2D(32, 4, strides=2, padding='valid', activation='relu'),
tf.keras.layers.MaxPool2D(2, 1),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(10)
])
def get_imdb_model(features, max_features, args, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Embedding(max_features + 4, 100),
tf.keras.layers.LSTM(100, return_sequences=True, unroll=(not args.no_unroll)),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(2)
])
def get_embedding_model(features, max_features, batch_size=None, **_):
return tf.keras.Sequential([
tf.keras.Input(shape=features.shape[1:], batch_size=batch_size),
tf.keras.layers.Embedding(max_features + 4, 16),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(2)
])
class CIFAR10Model(tf.keras.Model):
def __init__(self, features, batch_size=None, **_):
super().__init__()
layers = tf.keras.layers
self.layer_list = [
layers.Conv2D(32, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(32, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(64, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(64, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(128, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(128, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.AveragePooling2D(2, strides=2, padding='VALID'),
layers.Conv2D(256, (3, 3), padding='SAME', strides=(1, 1), activation='relu'),
layers.Conv2D(10, (3, 3), padding='SAME', strides=(1, 1)),
]
def call(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return tf.reduce_mean(x, axis=(1, 2))
def reduce_noise_normalize_batch(args, stacked_grads):
summed_grads = tf.reduce_sum(input_tensor=stacked_grads, axis=0)
noise_stddev = args.l2_norm_clip * args.noise_multiplier
noise = tf.random.normal(tf.shape(input=summed_grads), stddev=noise_stddev)
noised_grads = summed_grads + noise
return noised_grads / tf.cast(args.microbatches, tf.float32)
def compute_per_eg_grad(model, optimizer, loss_fn, args, data):
features, labels = data
with tf.GradientTape() as tape:
# We need to add the extra dimension to features because model
# expects batched input.
logits = model(features[None])
loss = loss_fn(labels=labels, logits=tf.squeeze(logits))
grads_list = tape.gradient(
loss,
model.trainable_variables,
# This argument should not be necessary, but we include it in case:
unconnected_gradients=tf.UnconnectedGradients.ZERO)
# We expect grads_list to be flat already, but we use this structure to mirror TFP.
grads_flat = tf.nest.flatten(grads_list)
squared_l2_norms = [tf.reduce_sum(input_tensor=tf.square(g)) for g in grads_flat]
global_norm = tf.sqrt(tf.add_n(squared_l2_norms))
div = tf.maximum(global_norm / args.l2_norm_clip, 1.)
clipped_flat = [g / div for g in grads_flat]
clipped_grads = tf.nest.pack_sequence_as(grads_list, clipped_flat)
return loss, clipped_grads
def private_train_step(model, optimizer, loss_fn, args, data):
if args.no_vmap:
x, y = data
# Manually compute per-example gradients via a loop, then stack the results.
loss, clipped_grads = tree_multimap(
lambda *xs: tf.stack(xs),
*(compute_per_eg_grad(model, optimizer, loss_fn, args, (x[i], y[i]))
for i in range(x.shape[0])))
else:
loss, clipped_grads = tf.vectorized_map(
partial(compute_per_eg_grad, model, optimizer, loss_fn, args),
data) # , fallback_to_while_loop=False)
final_grads = tf.nest.map_structure(partial(reduce_noise_normalize_batch, args), clipped_grads)
optimizer.apply_gradients(zip(final_grads, model.trainable_variables))
return loss
def train_step(model, optimizer, loss_fn, args, data):
features, labels = data
with tf.GradientTape() as tape:
logits = model(features)
loss = tf.reduce_mean(loss_fn(labels=labels, logits=logits))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
def evaluate(model, test_data, test_labels):
# This function is unused.
loss_mean = tf.keras.metrics.Mean()
acc_mean = tf.keras.metrics.SparseCategoricalAccuracy()
for features, labels in zip(batch_gen(test_data), batch_gen(test_labels)):
loss, logits = compute_scalar_loss(model, features, labels)
loss_mean.update_state(loss)
acc_mean.update_state(labels, logits)
return {'loss': loss_mean.result(), 'accuracy': acc_mean.result()}
model_dict = {
'mnist': get_mnist_model,
'lstm': get_imdb_model,
'embed': get_embedding_model,
'ffnn': get_ffnn_model,
'logreg': get_logreg_model,
'cifar10': CIFAR10Model,
}
def main(args):
print(args)
if args.memory_limit: # Option to limit GPU memory.
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.config.experimental.set_virtual_device_configuration(
physical_devices[0],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=args.memory_limit)])
assert args.microbatches is None # Only support microbatches=1.
args.microbatches = args.batch_size
data_fn = data.data_fn_dict[args.experiment][int(args.dummy_data)]
kwargs = {
'max_features': args.max_features,
'max_len': args.max_len,
'format': 'NHWC',
}
if args.dummy_data:
kwargs['num_examples'] = args.batch_size * 2
(train_data, train_labels), _ = data_fn(**kwargs)
train_data, train_labels = tf.constant(train_data), tf.constant(train_labels)
num_train_eg = train_data[0].shape[0]
loss_fn = tf.nn.sparse_softmax_cross_entropy_with_logits
if args.experiment == 'logreg':
loss_fn = lambda labels, logits: tf.nn.sigmoid_cross_entropy_with_logits(
labels=labels, logits=tf.squeeze(logits))
train_labels = tf.cast(train_labels, tf.float32)
model_bs = 1 if args.dpsgd else args.batch_size
model = model_dict[args.experiment](
train_data,
max_features=args.max_features,
# batch_size=model_bs,
args=args)
optimizer = tf.keras.optimizers.SGD(learning_rate=args.learning_rate)
train_fn = private_train_step if args.dpsgd else train_step
train_fn = partial(train_fn, model, optimizer, loss_fn, args)
if args.no_vmap:
print('No vmap for dpsgd!')
if args.no_jit:
print('No jit!')
else:
train_fn = tf.function(experimental_compile=args.use_xla)(train_fn)
with tf.device('GPU'):
dummy = tf.convert_to_tensor(1.) # we use this to force CUDA sychronization
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
for i, batch in enumerate(data.dataloader(train_data, train_labels, args.batch_size)):
train_fn(batch)
_ = dummy.numpy() # forces a host->device transfer, synchronizing CUDA.
duration = time.perf_counter() - start
print("Time Taken: ", duration)
timings.append(duration)
if args.dpsgd:
# eps = compute_eps_poisson(epoch, args.noise_multiplier, num_train_eg, args.batch_size,
# 1e-5)
# mu = compute_mu_poisson(epoch, args.noise_multiplier, num_train_eg, args.batch_size)
# print('For delta=1e-5, the current epsilon is: %.2f' % eps)
# print('For delta=1e-5, the current mu is: %.2f' % mu)
print('Trained with DPSGD optimizer')
else:
print('Trained with vanilla non-private SGD optimizer')
if not args.no_save:
append_to_name = ''
if args.no_jit: append_to_name += '_nojit'
if args.no_vmap: append_to_name += '_novmap'
utils.save_runtimes(__file__.split('.')[0], args, timings, append_to_name)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument('--xla', dest='use_xla', action='store_true')
parser.add_argument('--no_xla', dest='use_xla', action='store_false')
parser.add_argument('--memory_limit', default=None, type=int)
parser.add_argument('--no_unroll', dest='no_unroll', action='store_true')
parser.add_argument('--no_vmap', dest='no_vmap', action='store_true')
parser.add_argument('--no_jit', dest='no_jit', action='store_true')
args = parser.parse_args()
main(args)
| 10,368 | 39.346304 | 100 | py |
fast-dpsgd | fast-dpsgd-main/pyvacydp.py | '''
Pyvacy implementations
'''
import time
import torch
import torch.nn.functional as F
from pyvacy import analysis, optim
from torch import nn
import data
import utils
from pytorch import get_data, model_dict
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
num_complete_batches, leftover = divmod(len(train_data), args.batch_size)
num_batches = num_complete_batches + bool(leftover)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
opt = optim.DPSGD(params=model.parameters(),
l2_norm_clip=args.l2_norm_clip,
noise_multiplier=args.noise_multiplier,
minibatch_size=args.batch_size,
microbatch_size=1,
lr=args.learning_rate)
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x_mb, y_mb) in enumerate(dataloader):
x_mb, y_mb = x_mb.cuda(non_blocking=True), y_mb.cuda(non_blocking=True)
for x, y in zip(x_mb, y_mb):
opt.zero_microbatch_grad()
out = model(x[None])
curr_loss = loss_function(out, y[None])
curr_loss.backward()
opt.microbatch_step()
opt.step()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
args = parser.parse_args()
main(args)
| 1,971 | 29.8125 | 90 | py |
fast-dpsgd | fast-dpsgd-main/owkindp.py | '''
Code for Grad-CNN implementations
'''
import time
import torch
import torch.nn.functional as F
from gradcnn import crb, make_optimizer
from torch import nn, optim
import data
import utils
from pytorch import get_data
class MNISTNet(crb.Module):
def __init__(self, **_):
super().__init__()
self.conv1 = crb.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = crb.Conv2d(16, 32, 4, 2)
self.fc1 = crb.Linear(32 * 4 * 4, 32)
self.fc2 = crb.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
class FFNN(crb.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = crb.Linear(104, 50)
self.fc2 = crb.Linear(50, 2)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
class Logistic(crb.Module):
def __init__(self, **_):
super().__init__()
self.fc1 = crb.Linear(104, 1)
def forward(self, x):
out = self.fc1(x)
out = F.sigmoid(out)
return out
class CIFAR10Model(crb.Module):
def __init__(self, **_):
super().__init__()
self.layer_list = crb.ModuleList([
crb.Sequential(crb.Conv2d(3, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
crb.Sequential(crb.Conv2d(32, 32, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
crb.Sequential(crb.Conv2d(32, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
crb.Sequential(crb.Conv2d(64, 64, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
crb.Sequential(crb.Conv2d(64, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
crb.Sequential(crb.Conv2d(128, 128, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
nn.AvgPool2d(2, stride=2),
crb.Sequential(crb.Conv2d(128, 256, (3, 3), padding=1, stride=(1, 1)), nn.ReLU()),
crb.Conv2d(256, 10, (3, 3), padding=1, stride=(1, 1)),
])
def forward(self, x):
for layer in self.layer_list:
x = layer(x)
# print(x.shape)
return torch.mean(x, dim=(2, 3))
model_dict = {
'mnist': MNISTNet,
'ffnn': FFNN,
'logreg': Logistic,
'cifar10': CIFAR10Model,
}
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
model.get_detail(True)
optimizer = make_optimizer(
cls=optim.SGD,
noise_multiplier=args.noise_multiplier,
l2_norm_clip=args.l2_norm_clip,
)(model.parameters(), lr=args.learning_rate)
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
loss.backward()
optimizer.step()
torch.cuda.synchronize()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(model_dict.keys())
parser.add_argument(
"--sigma",
type=float,
default=1.0,
help="Noise multiplier (default 1.0)",
)
parser.add_argument(
"-c",
"--max-per-sample-grad_norm",
type=float,
default=1.0,
help="Clip per-sample gradients to this norm (default 1.0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
help="Target delta (default: 1e-5)",
)
args = parser.parse_args()
main(args)
| 4,604 | 28.519231 | 94 | py |
fast-dpsgd | fast-dpsgd-main/memory_experiment.py | import argparse
import pickle
import subprocess
from utils import pr_green, pr_red
# yapf: disable
CMDS = dict((
('jax', 'python jaxdp.py {} --no_dpsgd --no_save --dummy_data'),
('tf2', 'python tf2dp.py {} --no_dpsgd --no_xla --no_save --dummy_data'),
('tf1', 'python tf1dp.py {} --no_dpsgd --no_xla --no_save --dummy_data'),
('pytorch', 'python pytorch.py {} --no_dpsgd --no_save --dummy_data'),
('jaxdp', 'python jaxdp.py {} --dpsgd --no_save --dummy_data'),
('tf2dp', 'python tf2dp.py {} --dpsgd --no_xla --no_save --dummy_data'),
('tf1dp', 'python tf1dp.py {} --dpsgd --no_xla --no_save --dummy_data'),
('opacusdp', 'python opacusdp.py {} --dpsgd --no_save --dummy_data'),
('backpackdp', 'python backpackdp.py {} --dpsgd --no_save --dummy_data'),
('owkindp', 'python owkindp.py {} --dpsgd --no_save --dummy_data'),
('tf2xla', 'python tf2dp.py {} --no_dpsgd --xla --no_save --dummy_data'),
('tf2dpxla', 'python tf2dp.py {} --dpsgd --xla --no_save --dummy_data'),
('tf1xla', 'TF_XLA_FLAGS=--tf_xla_auto_jit=2 python tf1dp.py {} --no_dpsgd --xla --no_save --dummy_data'),
('tf1dpxla', 'TF_XLA_FLAGS=--tf_xla_auto_jit=2 python tf1dp.py {} --dpsgd --xla --no_save --dummy_data'),
# PyVacy processes examples individually irrespective of batch size, so it won't OOM, so we don't test it.
# ('pyvacydp', 'python pyvacydp.py {} --dpsgd --no_save --dummy_data'),
))
# yapf: enable
def oom_fn(bs, cmd, print_error=False):
"""Runs script at batch size bs and checks if the script OOMs"""
proc = subprocess.run(
[cmd + f' --batch_size {bs}'],
# check=True,
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True)
lower = proc.stdout.lower()
# last condiiton is because of a (hard to reproduce) Pytorch bug. When the batch size
# is slightly too big, we'll get a CuDNN error instead of an OOM error.
oom = ('out of memory' in lower or 'oom' in lower or 'resourceexhausted' in lower
or 'cudnn error' in lower)
if oom and print_error:
pr_red(proc.stdout)
pr_red(proc.stderr)
if not oom and proc.returncode != 0:
pr_red(proc.stdout)
pr_red(proc.stderr)
raise ValueError('Not OOM but returncode != 0')
s = '' if oom else 'not'
print(f'Batch Size {bs} {s} OOM.')
return oom
def binary_search(low, high, cmd, args):
if high - low > args.thresh:
mid = int((high + low) // 2)
oom = oom_fn(mid, cmd)
if oom:
return binary_search(low, mid, cmd, args)
else:
return binary_search(mid, high, cmd, args)
else:
return low
def get_max_batchsize(run, expt, args):
bs = args.init_bs
oom = False
cmd = f'CUDA_VISIBLE_DEVICES={args.device} {CMDS[run].format(expt)} --epochs {args.epochs}'
if expt == 'lstm':
if 'jax' in run:
cmd = 'JAX_OMNISTAGING=0 ' + cmd
if run in ('tf1', 'tf2', 'tf1xla'):
cmd = cmd + ' --no_unroll'
pr_green(cmd)
out = subprocess.run([cmd + f' --batch_size {bs}'],
shell=True,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
universal_newlines=True).stdout
print(out)
if 'Error' in out:
return (-1, -1)
# Get a reasonable range for the batch size
while not oom:
bs *= 2
oom = oom_fn(bs, cmd, print_error=True)
max_bs = binary_search(bs / 2, bs, cmd, args)
pr_green(f'Max Batch Size: {max_bs}')
return (max_bs, max_bs + args.thresh)
def main(args):
print(args)
name = '_' + args.name if args.name else ''
save_list = []
for run in args.runs:
for expt in args.experiments:
save_list.append((run, expt, *get_max_batchsize(run, expt, args)))
with open(f'results/raw/memory_expt{name}.pkl', 'wb') as handle:
pickle.dump(save_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(f'results/raw/memory_expt{name}.pkl', 'wb') as handle:
pickle.dump(save_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f'Done! Saved to results/raw/memory_expt{name}.pkl')
if __name__ == '__main__':
parser = argparse.ArgumentParser('Returns Max Batch Size before OOM')
parser.add_argument('--epochs', default=2, type=int)
parser.add_argument('--name', default='', type=str)
parser.add_argument('--init_bs', default=64, type=int)
parser.add_argument('--thresh', default=8, type=int)
parser.add_argument('--device', default=0, type=int)
parser.add_argument('--experiments',
default=['logreg', 'ffnn', 'mnist', 'embed', 'lstm', 'cifar10'],
nargs='+')
parser.add_argument('--runs', default=CMDS.keys(), nargs='+')
args = parser.parse_args()
main(args)
| 5,000 | 37.767442 | 114 | py |
fast-dpsgd | fast-dpsgd-main/backpackdp.py | '''
BackPACK experiments in this file
'''
import time
import torch
import torch.nn.functional as F
from backpack import backpack, extend
from backpack.extensions import BatchGrad, BatchL2Grad
from torch import nn
from torch.optim import Optimizer
import data
import utils
from pytorch import get_data, model_dict
def make_broadcastable(v, X):
"""Returns a view of `v` that can be broadcast with `X`.
If `v` is a one-dimensional tensor [N] and `X` is a tensor of shape
`[N, ..., ]`, returns a view of v with singleton dimensions appended.
Example:
`v` is a tensor of shape `[10]` and `X` is a tensor of shape `[10, 3, 3]`.
We want to multiply each `[3, 3]` element of `X` by the corresponding
element of `v` to get a matrix `Y` of shape `[10, 3, 3]` such that
`Y[i, a, b] = v[i] * X[i, a, b]`.
`w = make_broadcastable(v, X)` gives a `w` of shape `[10, 1, 1]`,
and we can now broadcast `Y = w * X`.
"""
broadcasting_shape = (-1, *[1 for _ in X.shape[1:]])
return v.reshape(broadcasting_shape)
class DP_SGD(Optimizer):
"""Differentially Private SGD.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): coefficient that scale delta before it is applied
to the parameters (default: 1.0)
max_norm (float, optional): maximum norm of the individual gradient,
to which they will be clipped if exceeded (default: 0.01)
stddev (float, optional): standard deviation of the added noise
(default: 1.0)
"""
def __init__(self, params, lr=0.1, max_norm=1.0, stddev=1.0):
self.lr = lr
self.max_norm = max_norm
self.stddev = stddev
super().__init__(params, dict())
def step(self):
"""Performs a single optimization step.
The function expects the gradients to have been computed by BackPACK
and the parameters to have a ``batch_l2`` and ``grad_batch`` attribute.
"""
l2_norms_all_params_list = []
for group in self.param_groups:
for p in group["params"]:
l2_norms_all_params_list.append(p.batch_l2)
l2_norms_all_params = torch.stack(l2_norms_all_params_list)
total_norms = torch.sqrt(torch.sum(l2_norms_all_params, dim=0))
scaling_factors = torch.clamp_max(total_norms / self.max_norm, 1.0)
for group in self.param_groups:
for p in group["params"]:
clipped_grads = p.grad_batch * make_broadcastable(scaling_factors, p.grad_batch)
clipped_grad = torch.sum(clipped_grads, dim=0)
noise_magnitude = self.stddev * self.max_norm
noise = torch.randn_like(clipped_grad) * noise_magnitude
perturbed_update = clipped_grad + noise
p.data.add_(-self.lr * perturbed_update)
dpsgd_kwargs = {
'mnist': {
'max_norm': 0.01,
'stddev': 2.0
},
# 'lstm': {'max_norm': 1.0, 'stddev': 1.1},
# 'embed': {'max_norm': 1.0, 'stddev': 1.1},
'ffnn': {
'max_norm': 1.0,
'stddev': 1.1
},
'logreg': {
'max_norm': 1.0,
'stddev': 1.1
},
'cifar10': {
'max_norm': 1.0,
'stddev': 1.1
},
}
def main(args):
print(args)
assert args.dpsgd
torch.backends.cudnn.benchmark = True
train_data, train_labels = get_data(args)
model = model_dict[args.experiment](vocab_size=args.max_features).cuda()
model = extend(model)
optimizer = DP_SGD(model.parameters(), lr=args.learning_rate, **dpsgd_kwargs[args.experiment])
loss_function = nn.CrossEntropyLoss() if args.experiment != 'logreg' else nn.BCELoss()
timings = []
for epoch in range(1, args.epochs + 1):
start = time.perf_counter()
dataloader = data.dataloader(train_data, train_labels, args.batch_size)
for batch_idx, (x, y) in enumerate(dataloader):
x, y = x.cuda(non_blocking=True), y.cuda(non_blocking=True)
model.zero_grad()
outputs = model(x)
loss = loss_function(outputs, y)
with backpack(BatchGrad(), BatchL2Grad()):
loss.backward()
optimizer.step()
torch.cuda.synchronize()
duration = time.perf_counter() - start
print("Time Taken for Epoch: ", duration)
timings.append(duration)
if not args.no_save:
utils.save_runtimes(__file__.split('.')[0], args, timings)
else:
print('Not saving!')
print('Done!')
if __name__ == '__main__':
parser = utils.get_parser(dpsgd_kwargs.keys())
args = parser.parse_args()
main(args)
| 4,755 | 31.8 | 98 | py |
nocturne | nocturne-main/setup.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run via ```python setup.py develop``` to install Nocturne in your environment."""
import logging
import multiprocessing
import os
import re
import subprocess
import sys
from distutils.version import LooseVersion
from setuptools import Extension, setup
from setuptools.command.build_ext import build_ext
# Reference:
# https://www.benjack.io/2017/06/12/python-cpp-tests.html
class CMakeExtension(Extension):
"""Use CMake to construct the Nocturne extension."""
def __init__(self, name, src_dir=""):
Extension.__init__(self, name, sources=[])
self.src_dir = os.path.abspath(src_dir)
class CMakeBuild(build_ext):
"""Utility class for building Nocturne."""
def run(self):
"""Run cmake."""
try:
cmake_version = subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
cmake_version = LooseVersion(
re.search(r"version\s*([\d.]+)", cmake_version.decode()).group(1))
if cmake_version < "3.14":
raise RuntimeError("CMake >= 3.14 is required.")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
"""Run the C++ build commands."""
ext_dir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + ext_dir,
"-DPYTHON_EXECUTABLE=" + sys.executable
]
cfg = "Debug" if self.debug else "Release"
build_args = ["--config", cfg]
cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg]
build_args += ["--", f"-j{multiprocessing.cpu_count()}"]
env = os.environ.copy()
env["CXXFLAGS"] = f'{env.get("CXXFLAGS", "")} \
-DVERSION_INFO="{self.distribution.get_version()}"'
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
cmd = ["cmake", ext.src_dir] + cmake_args
try:
subprocess.check_call(cmd, cwd=self.build_temp, env=env)
except subprocess.CalledProcessError:
logging.error(f"Aborting due to errors when running command {cmd}")
sys.exit(1)
cmd = ["cmake", "--build", "."] + build_args
try:
subprocess.check_call(cmd, cwd=self.build_temp)
except subprocess.CalledProcessError:
logging.error(f"Aborting due to errors when running command {cmd}")
sys.exit(1)
print() # Add an empty line for cleaner output
def main():
"""Build the C++ code."""
# with open("./requirements.txt", "r") as f:
# requires = f.read().splitlines()
setup(
ext_modules=[CMakeExtension("nocturne", "./nocturne")],
cmdclass=dict(build_ext=CMakeBuild),
)
if __name__ == "__main__":
main()
| 3,205 | 30.431373 | 84 | py |
nocturne | nocturne-main/cfgs/config.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Set path to all the Waymo data and the parsed Waymo files."""
import os
from pathlib import Path
from hydra import compose, initialize
from hydra.core.global_hydra import GlobalHydra
from omegaconf import OmegaConf
from pyvirtualdisplay import Display
VERSION_NUMBER = 2
PROJECT_PATH = Path.resolve(Path(__file__).parent.parent)
DATA_FOLDER = '/checkpoint/eugenevinitsky/waymo_open/motion_v1p1/uncompressed/scenario/'
TRAIN_DATA_PATH = os.path.join(DATA_FOLDER, 'training')
VALID_DATA_PATH = os.path.join(DATA_FOLDER, 'validation')
TEST_DATA_PATH = os.path.join(DATA_FOLDER, 'testing')
PROCESSED_TRAIN_NO_TL = os.path.join(
DATA_FOLDER, f'formatted_json_v{VERSION_NUMBER}_no_tl_train')
PROCESSED_VALID_NO_TL = os.path.join(
DATA_FOLDER, f'formatted_json_v{VERSION_NUMBER}_no_tl_valid')
PROCESSED_TRAIN = os.path.join(DATA_FOLDER,
f'formatted_json_v{VERSION_NUMBER}_train')
PROCESSED_VALID = os.path.join(DATA_FOLDER,
f'formatted_json_v{VERSION_NUMBER}_valid')
ERR_VAL = -1e4
def get_scenario_dict(hydra_cfg):
"""Convert the `scenario` key in the hydra config to a true dict."""
if isinstance(hydra_cfg['scenario'], dict):
return hydra_cfg['scenario']
else:
return OmegaConf.to_container(hydra_cfg['scenario'], resolve=True)
def get_default_scenario_dict():
"""Construct the `scenario` dict without w/o hydra decorator."""
GlobalHydra.instance().clear()
initialize(config_path="./")
cfg = compose(config_name="config")
return get_scenario_dict(cfg)
def set_display_window():
"""Set a virtual display for headless machines."""
if "DISPLAY" not in os.environ:
disp = Display()
disp.start()
| 1,939 | 35.603774 | 88 | py |
nocturne | nocturne-main/examples/create_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test step and rendering functions."""
import hydra
from cfgs.config import set_display_window
from nocturne import Action
from nocturne.envs.wrappers import create_env
@hydra.main(config_path="../cfgs/", config_name="config")
def create_rl_env(cfg):
"""Test step and rendering functions."""
set_display_window()
env = create_env(cfg)
_ = env.reset()
# quick check that rendering works
_ = env.scenario.getConeImage(
env.scenario.getVehicles()[0],
# how far the agent can see
view_dist=cfg['subscriber']['view_dist'],
# the angle formed by the view cone
view_angle=cfg['subscriber']['view_angle'],
# the agent's head angle
head_angle=0.0,
# whether to draw the goal position in the image
draw_target_position=False)
for _ in range(80):
# grab the list of vehicles that actually need to
# move some distance to get to their goal
moving_vehs = env.scenario.getObjectsThatMoved()
# obs, rew, done, info
# each of these objects is a dict keyed by the vehicle ID
# info[veh_id] contains the following useful keys:
# 'collided': did the agent collide with a road object or edge
# 'veh_veh_collision': did the agent collide with a vehicle
# 'veh_edge_collision': did the agent collide with a road edge
# 'goal_achieved': did we get to our target
_, _, _, _ = env.step({
veh.id: Action(acceleration=2.0, steering=1.0, head_angle=0.5)
for veh in moving_vehs
})
if __name__ == '__main__':
create_rl_env()
| 1,824 | 36.244898 | 74 | py |
nocturne | nocturne-main/examples/rendering.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Example of how to make movies of Nocturne scenarios."""
import os
import hydra
import imageio
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROJECT_PATH, get_scenario_dict, set_display_window
from nocturne import Simulation
def get_sim(cfg):
"""Initialize the scenario."""
# load scenario, set vehicles to be expert-controlled
sim = Simulation(scenario_path=str(PROJECT_PATH / 'examples' /
'example_scenario.json'),
config=get_scenario_dict(cfg))
for obj in sim.getScenario().getObjectsThatMoved():
obj.expert_control = True
return sim
def make_movie(cfg,
scenario_fn,
output_path='./vid.mp4',
dt=0.1,
steps=90,
fps=10):
"""Make a movie from the scenario."""
sim = get_sim(cfg)
scenario = sim.getScenario()
movie_frames = []
timestep = 0
movie_frames.append(scenario_fn(scenario, timestep))
for i in range(steps):
sim.step(dt)
timestep += 1
movie_frames.append(scenario_fn(scenario, timestep))
movie_frames = np.array(movie_frames)
imageio.mimwrite(output_path, movie_frames, fps=fps)
print('>', output_path)
del sim
del movie_frames
def make_image(cfg, scenario_fn, output_path='./img.png'):
"""Make a single image from the scenario."""
sim = get_sim(cfg)
scenario = sim.getScenario()
img = scenario_fn(scenario)
dpi = 100
height, width, depth = img.shape
figsize = width / float(dpi), height / float(dpi)
plt.figure(figsize=figsize, dpi=dpi)
plt.axis('off')
plt.imshow(img)
plt.savefig(output_path)
print('>', output_path)
@hydra.main(config_path="../cfgs/", config_name="config")
def main(cfg):
"""See file docstring."""
# NOTE: don't run this file all at once since the memory usage for
# rendering all the videos will be dozens of gigabytes
set_display_window()
if not os.path.exists(PROJECT_PATH / 'examples/rendering'):
os.makedirs(PROJECT_PATH / 'examples/rendering')
# movie of whole scenario
make_movie(
cfg,
scenario_fn=lambda scenario, _: scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'movie_whole_scenario.mp4',
)
# movie around a vehicle
make_movie(
cfg,
scenario_fn=lambda scenario, _: scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
source=scenario.getVehicles()[3],
view_width=120,
view_height=120,
rotate_with_source=True,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'movie_around_vehicle.mp4',
)
# movie around a vehicle (without rotating with source)
make_movie(
cfg,
scenario_fn=lambda scenario, _: scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
source=scenario.getObjectsThatMoved()[0],
view_width=120,
view_height=120,
rotate_with_source=False,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'movie_around_vehicle_stable.mp4',
)
# movie of cone around vehicle
make_movie(
cfg,
scenario_fn=lambda scenario, _: scenario.getConeImage(
source=scenario.getObjectsThatMoved()[0],
view_dist=80,
view_angle=np.pi * (120 / 180),
head_angle=0.0,
img_width=1600,
img_height=1600,
padding=50.0,
draw_target_position=True,
),
output_path=PROJECT_PATH / 'examples/rendering' / 'movie_cone.mp4',
)
# movie of cone around vehicle with varying head angle
make_movie(
cfg,
scenario_fn=lambda scenario, timestep: scenario.getConeImage(
source=scenario.getVehicles()[6],
view_dist=80.0,
view_angle=np.pi * (120 / 180),
head_angle=0.8 * np.sin(timestep / 10),
img_width=1600,
img_height=1600,
padding=50.0,
draw_target_position=True,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'movie_cone_head_angle.mp4',
)
# image of whole scenario
make_image(
cfg,
scenario_fn=lambda scenario: scenario.getImage(
img_width=2000,
img_height=2000,
padding=50.0,
draw_target_positions=True,
),
output_path=PROJECT_PATH / 'examples/rendering' / 'img_scenario.png',
)
# image of cone
make_image(
cfg,
scenario_fn=lambda scenario: scenario.getConeImage(
source=scenario.getVehicles()[9],
view_dist=80,
view_angle=np.pi * (120 / 180),
head_angle=np.pi / 8.0,
img_width=2000,
img_height=2000,
padding=50.0,
draw_target_position=True,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'img_cone_tilted.png',
)
# image of visible state
make_image(
cfg,
scenario_fn=lambda scenario: scenario.getFeaturesImage(
source=scenario.getVehicles()[9],
view_dist=80,
view_angle=np.pi * (120 / 180),
head_angle=np.pi / 8.0,
img_width=2000,
img_height=2000,
padding=50.0,
draw_target_position=True,
),
output_path=PROJECT_PATH / 'examples/rendering' /
'img_features_tilted.png',
)
if __name__ == '__main__':
main()
| 6,141 | 28.960976 | 77 | py |
nocturne | nocturne-main/examples/nocturne_functions.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Example of how to make movies of Nocturne scenarios."""
import os
import hydra
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROJECT_PATH, get_scenario_dict, set_display_window
from nocturne import Simulation, Action
def save_image(img, output_path='./img.png'):
"""Make a single image from the scenario."""
dpi = 100
height, width, depth = img.shape
figsize = width / float(dpi), height / float(dpi)
plt.figure(figsize=figsize, dpi=dpi)
plt.axis('off')
plt.imshow(img)
plt.savefig(output_path)
print('>', output_path)
@hydra.main(config_path="../cfgs/", config_name="config")
def main(cfg):
"""Initialize the scenario."""
set_display_window()
if not os.path.exists(PROJECT_PATH / 'examples/rendering'):
os.makedirs(PROJECT_PATH / 'examples/rendering')
# load scenario. by default this won't have pedestrians or cyclists
sim = Simulation(scenario_path=str(PROJECT_PATH / 'examples' /
'example_scenario.json'),
config=get_scenario_dict(cfg))
scenario = sim.getScenario()
img = scenario.getImage(
img_width=2000,
img_height=2000,
padding=50.0,
draw_target_positions=True,
)
save_image(img,
PROJECT_PATH / 'examples/rendering' / 'scene_with_no_peds.png')
# grab all the vehicles
vehs = scenario.getVehicles()
# grab all the vehicles that moved and show some things
# we can do with them
vehs = scenario.getObjectsThatMoved()
vehs[0].highlight = True # draw a circle around it on the rendered image
# setting a vehicle to expert_control will cause
# this agent will replay expert data starting frmo
# the current time in the simulation
vehs[0].expert_control = True
print(f'width is {vehs[0].width}, length is {vehs[0].length}')
print(f'speed is {vehs[0].speed}, heading is {vehs[0].heading}')
print(f'position is {vehs[0].width}, length is {vehs[0].length}')
# for efficiency, we return position as a custom Vector2D object
# this object can be converted to and from numpy and comes with
# support for a variety of algebraic operations
print(f'position is {vehs[0].position}')
print(f'position as numpy array is {vehs[0].position.numpy()}')
print(f'norm of position is {vehs[0].position.norm()}')
print(f'angle in a world-centered frame {vehs[0].position.angle()}')
print(f'rotated position is {vehs[0].position.rotate(np.pi).numpy()}')
# we can set vehicle accel, steering, head angle directly
vehs[0].acceleration = -1
vehs[0].steering = 1
vehs[0].head_angle = np.pi
# we can also set them all directly using an action object
vehs[0].apply_action(Action(acceleration=-1, steering=1, head_angle=np.pi))
# we can grab the state for this vehicle in two way:
# 1) a flattened vector corresponding to the set of visible objects
# concatenated according to [visible objects, visible road points,
# visible stop signs, visible traffic lights]
# note that since we want to make a fixed length vector, for each of these
# types the config, under the scenario key has the following items
# max_visible_objects: 16
# max_visible_road_points: 1000
# max_visible_traffic_lights: 20
# max_visible_stop_signs: 4
# we grab all the visible items for each type, sort them by distance from
# the vehicle and return the closest. If we have fewer than the maximum
# we pad with 0s.
flattened_vector = scenario.flattened_visible_state(object=vehs[0],
view_dist=80,
view_angle=120 *
(np.pi / 180),
head_angle=0.0)
# we can also grab a dict of all of the objects
# if padding is true we will add extra objects to the dict
# to ensure we hit the maximum number of objects for each type
visible_dict = scenario.visible_state(object=vehs[0],
view_dist=80,
view_angle=120 * (np.pi / 180),
padding=False)
# step the scenario. By default we step at 0.1s.
# you can use any step you want, but, if you do so make sure
# not to make any vehicle an expert as the expert positions / speeds / headings
# are only available in increments of 0.1 seconds
sim.step(cfg['dt'])
# load scenario, this time with pedestrians and cyclists
cfg['scenario']['allow_non_vehicles'] = True
sim = Simulation(scenario_path=str(PROJECT_PATH / 'examples' /
'example_scenario.json'),
config=get_scenario_dict(cfg))
scenario = sim.getScenario()
img = scenario.getImage(
img_width=2000,
img_height=2000,
padding=50.0,
draw_target_positions=True,
)
save_image(img,
PROJECT_PATH / 'examples/rendering' / 'scene_with_peds.png')
# now we need to be slightly more careful about how we select objects
# since getMovingObjects will return pedestrians and cyclists
# and getVehicles will return vehicles that don't necessarily need to move
objects_that_moved = scenario.getObjectsThatMoved()
objects_of_interest = [
obj for obj in scenario.getVehicles() if obj in objects_that_moved
] # noqa: 841
vehicles = scenario.getVehicles()
cyclists = scenario.getCyclists()
pedestrians = scenario.getPedestrians()
all_objects = scenario.getObjects()
if __name__ == '__main__':
main()
| 5,998 | 43.768657 | 83 | py |
nocturne | nocturne-main/examples/imitation_learning/waymo_data_loader.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Dataloader for imitation learning in Nocturne."""
from collections import defaultdict
import random
import torch
from pathlib import Path
import numpy as np
from cfgs.config import ERR_VAL
from nocturne import Simulation
def _get_waymo_iterator(paths, dataloader_config, scenario_config):
# if worker has no paths, return an empty iterator
if len(paths) == 0:
return
# load dataloader config
tmin = dataloader_config.get('tmin', 0)
tmax = dataloader_config.get('tmax', 90)
view_dist = dataloader_config.get('view_dist', 80)
view_angle = dataloader_config.get('view_angle', np.radians(120))
dt = dataloader_config.get('dt', 0.1)
expert_action_bounds = dataloader_config.get('expert_action_bounds',
[[-3, 3], [-0.7, 0.7]])
expert_position = dataloader_config.get('expert_position', True)
state_normalization = dataloader_config.get('state_normalization', 100)
n_stacked_states = dataloader_config.get('n_stacked_states', 5)
while True:
# select a random scenario path
scenario_path = np.random.choice(paths)
# create simulation
sim = Simulation(str(scenario_path), scenario_config)
scenario = sim.getScenario()
# set objects to be expert-controlled
for obj in scenario.getObjects():
obj.expert_control = True
# we are interested in imitating vehicles that moved
objects_that_moved = scenario.getObjectsThatMoved()
objects_of_interest = [
obj for obj in scenario.getVehicles() if obj in objects_that_moved
]
# initialize values if stacking states
stacked_state = defaultdict(lambda: None)
initial_warmup = n_stacked_states - 1
state_list = []
action_list = []
# iterate over timesteps and objects of interest
for time in range(tmin, tmax):
for obj in objects_of_interest:
# get state
ego_state = scenario.ego_state(obj)
visible_state = scenario.flattened_visible_state(
obj, view_dist=view_dist, view_angle=view_angle)
state = np.concatenate((ego_state, visible_state))
# normalize state
state /= state_normalization
# stack state
if n_stacked_states > 1:
if stacked_state[obj.getID()] is None:
stacked_state[obj.getID()] = np.zeros(
len(state) * n_stacked_states, dtype=state.dtype)
stacked_state[obj.getID()] = np.roll(
stacked_state[obj.getID()], len(state))
stacked_state[obj.getID()][:len(state)] = state
if np.isclose(obj.position.x, ERR_VAL):
continue
if not expert_position:
# get expert action
expert_action = scenario.expert_action(obj, time)
# check for invalid action (because no value available for taking derivative)
# or because the vehicle is at an invalid state
if expert_action is None:
continue
expert_action = expert_action.numpy()
# now find the corresponding expert actions in the grids
# throw out actions containing NaN or out-of-bound values
if np.isnan(expert_action).any() \
or expert_action[0] < expert_action_bounds[0][0] \
or expert_action[0] > expert_action_bounds[0][1] \
or expert_action[1] < expert_action_bounds[1][0] \
or expert_action[1] > expert_action_bounds[1][1]:
continue
else:
expert_pos_shift = scenario.expert_pos_shift(obj, time)
if expert_pos_shift is None:
continue
expert_pos_shift = expert_pos_shift.numpy()
expert_heading_shift = scenario.expert_heading_shift(
obj, time)
if expert_heading_shift is None \
or expert_pos_shift[0] < expert_action_bounds[0][0] \
or expert_pos_shift[0] > expert_action_bounds[0][1] \
or expert_pos_shift[1] < expert_action_bounds[1][0] \
or expert_pos_shift[1] > expert_action_bounds[1][1] \
or expert_heading_shift < expert_action_bounds[2][0] \
or expert_heading_shift > expert_action_bounds[2][1]:
continue
expert_action = np.concatenate(
(expert_pos_shift, [expert_heading_shift]))
# yield state and expert action
if stacked_state[obj.getID()] is not None:
if initial_warmup <= 0: # warmup to wait for stacked state to be filled up
state_list.append(stacked_state[obj.getID()])
action_list.append(expert_action)
else:
state_list.append(state)
action_list.append(expert_action)
# step the simulation
sim.step(dt)
if initial_warmup > 0:
initial_warmup -= 1
if len(state_list) > 0:
temp = list(zip(state_list, action_list))
random.shuffle(temp)
state_list, action_list = zip(*temp)
for state_return, action_return in zip(state_list, action_list):
yield (state_return, action_return)
class WaymoDataset(torch.utils.data.IterableDataset):
"""Waymo dataset loader."""
def __init__(self,
data_path,
dataloader_config={},
scenario_config={},
file_limit=None):
super(WaymoDataset).__init__()
# save configs
self.dataloader_config = dataloader_config
self.scenario_config = scenario_config
# get paths of dataset files (up to file_limit paths)
self.file_paths = list(
Path(data_path).glob('tfrecord*.json'))[:file_limit]
print(f'WaymoDataset: loading {len(self.file_paths)} files.')
# sort the paths for reproducibility if testing on a small set of files
self.file_paths.sort()
def __iter__(self):
"""Partition files for each worker and return an (state, expert_action) iterable."""
# get info on current worker process
worker_info = torch.utils.data.get_worker_info()
if worker_info is None:
# single-process data loading, return the whole set of files
return _get_waymo_iterator(self.file_paths, self.dataloader_config,
self.scenario_config)
# distribute a unique set of file paths to each worker process
worker_file_paths = np.array_split(
self.file_paths, worker_info.num_workers)[worker_info.id]
return _get_waymo_iterator(list(worker_file_paths),
self.dataloader_config,
self.scenario_config)
if __name__ == '__main__':
dataset = WaymoDataset(data_path='dataset/tf_records',
file_limit=20,
dataloader_config={
'view_dist': 80,
'n_stacked_states': 3,
},
scenario_config={
'start_time': 0,
'allow_non_vehicles': True,
'spawn_invalid_objects': True,
})
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=32,
num_workers=4,
pin_memory=True,
)
for i, x in zip(range(100), data_loader):
print(i, x[0].shape, x[1].shape)
| 8,395 | 40.564356 | 97 | py |
nocturne | nocturne-main/examples/imitation_learning/model.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Model for an imitation learning agent."""
import torch
from torch import nn
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.categorical import Categorical
from examples.imitation_learning.filters import MeanStdFilter
class ImitationAgent(nn.Module):
"""Pytorch Module for imitation. Output is a Multivariable Gaussian."""
def __init__(self, cfg):
"""Initialize."""
super(ImitationAgent, self).__init__()
self.n_states = cfg['n_inputs']
self.hidden_layers = cfg.get('hidden_layers', [256, 256])
self.discrete = cfg['discrete']
if self.discrete:
self.actions_discretizations = cfg['actions_discretizations']
self.actions_bounds = cfg['actions_bounds']
self.actions_grids = [
torch.linspace(a_min, a_max, a_count,
requires_grad=False).to(cfg['device'])
for (a_min, a_max), a_count in zip(
self.actions_bounds, self.actions_discretizations)
]
else:
# neural network outputs between -1 and 1 (tanh filter)
# then output is sampled from a Gaussian distribution
# N(nn output * mean_scalings, std_devs)
self.mean_scalings = torch.tensor(cfg['mean_scalings'])
self.std_devs = torch.tensor(cfg['std_devs'])
self.covariance_matrix = torch.diag_embed(self.std_devs)
self._build_model()
def _build_model(self):
"""Build agent MLP that outputs an action mean and variance from a state input."""
if self.hidden_layers is None or len(self.hidden_layers) == 0:
self.nn = nn.Identity()
pre_head_size = self.n_states
else:
self.nn = nn.Sequential(
MeanStdFilter(self.n_states),
nn.Linear(self.n_states, self.hidden_layers[0]),
nn.Tanh(),
*[
nn.Sequential(
nn.Linear(self.hidden_layers[i],
self.hidden_layers[i + 1]),
nn.Tanh(),
) for i in range(len(self.hidden_layers) - 1)
],
)
pre_head_size = self.hidden_layers[-1]
if self.discrete:
self.heads = nn.ModuleList([
nn.Linear(pre_head_size, discretization)
for discretization in self.actions_discretizations
])
else:
self.head = nn.Sequential(
nn.Linear(pre_head_size, len(self.mean_scalings)), nn.Tanh())
def dist(self, state):
"""Construct a distribution from tensor input."""
x_out = self.nn(state)
if self.discrete:
return [Categorical(logits=head(x_out)) for head in self.heads]
else:
return MultivariateNormal(
self.head(x_out) * self.mean_scalings, self.covariance_matrix)
def forward(self, state, deterministic=False, return_indexes=False):
"""Generate an output from tensor input."""
dists = self.dist(state)
if self.discrete:
actions_idx = [
d.logits.argmax(axis=-1) if deterministic else d.sample()
for d in dists
]
actions = [
action_grid[action_idx] for action_grid, action_idx in zip(
self.actions_grids, actions_idx)
]
return (actions, actions_idx) if return_indexes else actions
else:
return [dist.argmax(axis=-1) for dist in dists
] if deterministic else [dist.sample() for dist in dists]
def log_prob(self, state, ground_truth_action, return_indexes=False):
"""Compute the log prob of the expert action for a given input tensor."""
dist = self.dist(state)
if self.discrete:
# find indexes in actions grids whose values are the closest to the ground truth actions
actions_idx = self.action_to_grid_idx(ground_truth_action)
# sum log probs of actions indexes wrt. Categorial variables for each action dimension
log_prob = sum(
[d.log_prob(actions_idx[:, i]) for i, d in enumerate(dist)])
return (log_prob, actions_idx) if return_indexes else log_prob
else:
return dist.log_prob(ground_truth_action)
def action_to_grid_idx(self, action):
"""Convert a batch of actions to a batch of action indexes (for discrete actions only)."""
# action is of shape (batch_size, n_actions)
# we want to transform it into an array of same shape, but with indexes instead of actions
# credits https://stackoverflow.com/a/46184652/16207351
output = torch.zeros_like(action)
for i, action_grid in enumerate(self.actions_grids):
actions = action[:, i]
# get indexes where actions would be inserted in action_grid to keep it sorted
idxs = torch.searchsorted(action_grid, actions)
# if it would be inserted at the end, we're looking at the last action
idxs[idxs == len(action_grid)] -= 1
# find indexes where previous index is closer (simple grid has constant sampling intervals)
idxs[action_grid[idxs] - actions > torch.diff(action_grid).mean() *
0.5] -= 1
# write indexes in output
output[:, i] = idxs
return output
if __name__ == '__main__':
model_cfg = {
'n_inputs': 100,
'hidden_layers': [256, 256],
'discrete': False,
'mean_scalings': [1, 10, 10000],
'std_devs': [1.0, 1.0, 1.0],
}
if True:
model_cfg.update({
'discrete': True,
'actions_discretizations': [5, 10],
'actions_bounds': [[-3, 3], [0, 10]],
})
model = ImitationAgent(model_cfg)
sample_states = torch.rand(3, model_cfg['n_inputs'])
actions = model(sample_states)
print(actions)
print(model.log_prob(sample_states, actions))
| 6,354 | 39.221519 | 103 | py |
nocturne | nocturne-main/examples/imitation_learning/filters.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""A streaming mean-std filter used to whiten inputs."""
import torch
from torch import nn
class MeanStdFilter(nn.Module):
"""adapted from https://www.johndcook.com/blog/standard_deviation/."""
def __init__(self, input_shape, eps=1e-05):
super().__init__()
self.input_shape = input_shape
self.eps = eps
self.track_running_states = True
self.counter = 0
self._M = nn.Parameter(torch.zeros(input_shape), requires_grad=False)
self._S = nn.Parameter(torch.zeros(input_shape), requires_grad=False)
self._n = 0
def train(self, mode):
"""Turn on updates to mean and standard deviation."""
self.track_running_states = True
def eval(self):
"""Turn off updates to mean and standard deviation."""
self.track_running_states = False
def forward(self, x):
"""Whiten and optionally update."""
if self.track_running_states:
for i in range(x.shape[0]):
self.push(x[i])
x = x - self.mean
x = x / (self.std + self.eps)
return x
def push(self, x):
"""Unvectorized update of the running statistics."""
if x.shape != self._M.shape:
raise ValueError(
"Unexpected input shape {}, expected {}, value = {}".format(
x.shape, self._M.shape, x))
n1 = self._n
self._n += 1
if self._n == 1:
self._M[...] = x
else:
delta = x - self._M
self._M[...] += delta / self._n
self._S[...] += delta * delta * n1 / self._n
@property
def n(self):
"""Return the number of samples."""
return self._n
@property
def mean(self):
"""Return the mean."""
return self._M
@property
def var(self):
"""Compute the variance."""
return self._S / (self._n - 1) if self._n > 1 else torch.square(
self._M)
@property
def std(self):
"""Compute the standard deviation."""
return torch.sqrt(self.var)
@property
def shape(self):
"""Get the means shape."""
return self._M.shape
| 2,385 | 28.825 | 77 | py |
nocturne | nocturne-main/examples/imitation_learning/train.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Imitation learning training script (behavioral cloning)."""
from datetime import datetime
from pathlib import Path
import pickle
import random
import json
import hydra
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
import wandb
from examples.imitation_learning.model import ImitationAgent
from examples.imitation_learning.waymo_data_loader import WaymoDataset
def set_seed_everywhere(seed):
"""Ensure determinism."""
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
@hydra.main(config_path="../../cfgs/imitation", config_name="config")
def main(args):
"""Train an IL model."""
set_seed_everywhere(args.seed)
# create dataset and dataloader
if args.actions_are_positions:
expert_bounds = [[-0.5, 3], [-3, 3], [-0.07, 0.07]]
actions_discretizations = [21, 21, 21]
actions_bounds = [[-0.5, 3], [-3, 3], [-0.07, 0.07]]
mean_scalings = [3, 3, 0.07]
std_devs = [0.1, 0.1, 0.02]
else:
expert_bounds = [[-6, 6], [-0.7, 0.7]]
actions_bounds = expert_bounds
actions_discretizations = [15, 43]
mean_scalings = [3, 0.7]
std_devs = [0.1, 0.02]
dataloader_cfg = {
'tmin': 0,
'tmax': 90,
'view_dist': args.view_dist,
'view_angle': args.view_angle,
'dt': 0.1,
'expert_action_bounds': expert_bounds,
'expert_position': args.actions_are_positions,
'state_normalization': 100,
'n_stacked_states': args.n_stacked_states,
}
scenario_cfg = {
'start_time': 0,
'allow_non_vehicles': True,
'spawn_invalid_objects': True,
'max_visible_road_points': args.max_visible_road_points,
'sample_every_n': 1,
'road_edge_first': False,
}
dataset = WaymoDataset(
data_path=args.path,
file_limit=args.num_files,
dataloader_config=dataloader_cfg,
scenario_config=scenario_cfg,
)
data_loader = iter(
DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.n_cpus,
pin_memory=True,
))
# create model
sample_state, _ = next(data_loader)
n_states = sample_state.shape[-1]
model_cfg = {
'n_inputs': n_states,
'hidden_layers': [1024, 256, 128],
'discrete': args.discrete,
'mean_scalings': mean_scalings,
'std_devs': std_devs,
'actions_discretizations': actions_discretizations,
'actions_bounds': actions_bounds,
'device': args.device
}
model = ImitationAgent(model_cfg).to(args.device)
model.train()
print(model)
# create optimizer
optimizer = Adam(model.parameters(), lr=args.lr)
# create exp dir
time_str = datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
exp_dir = Path.cwd() / Path('train_logs') / time_str
exp_dir.mkdir(parents=True, exist_ok=True)
# save configs
configs_path = exp_dir / 'configs.json'
configs = {
'scenario_cfg': scenario_cfg,
'dataloader_cfg': dataloader_cfg,
'model_cfg': model_cfg,
}
with open(configs_path, 'w') as fp:
json.dump(configs, fp, sort_keys=True, indent=4)
print('Wrote configs at', configs_path)
# tensorboard writer
if args.write_to_tensorboard:
writer = SummaryWriter(log_dir=str(exp_dir))
# wandb logging
if args.wandb:
wandb_mode = "online"
wandb.init(config=args,
project=args.wandb_project,
name=args.experiment,
group=args.experiment,
resume="allow",
settings=wandb.Settings(start_method="fork"),
mode=wandb_mode)
# train loop
print('Exp dir created at', exp_dir)
print(f'`tensorboard --logdir={exp_dir}`\n')
for epoch in range(args.epochs):
print(f'\nepoch {epoch+1}/{args.epochs}')
n_samples = epoch * args.batch_size * (args.samples_per_epoch //
args.batch_size)
for i in tqdm(range(args.samples_per_epoch // args.batch_size),
unit='batch'):
# get states and expert actions
states, expert_actions = next(data_loader)
states = states.to(args.device)
expert_actions = expert_actions.to(args.device)
# compute loss
if args.discrete:
log_prob, expert_idxs = model.log_prob(states,
expert_actions,
return_indexes=True)
else:
dist = model.dist(states)
log_prob = dist.log_prob(expert_actions.float())
loss = -log_prob.mean()
metrics_dict = {}
# optim step
optimizer.zero_grad()
loss.backward()
# grad clipping
total_norm = 0
for p in model.parameters():
if p.grad is not None:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item()**2
total_norm = total_norm**0.5
metrics_dict['train/grad_norm'] = total_norm
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
total_norm = 0
for p in model.parameters():
if p.grad is not None:
param_norm = p.grad.detach().data.norm(2)
total_norm += param_norm.item()**2
total_norm = total_norm**0.5
metrics_dict['train/post_clip_grad_norm'] = total_norm
optimizer.step()
# tensorboard logging
metrics_dict['train/loss'] = loss.item()
if args.actions_are_positions:
metrics_dict['train/x_logprob'] = log_prob[0]
metrics_dict['train/y_logprob'] = log_prob[1]
metrics_dict['train/steer_logprob'] = log_prob[2]
else:
metrics_dict['train/accel_logprob'] = log_prob[0]
metrics_dict['train/steer_logprob'] = log_prob[1]
if not model_cfg['discrete']:
diff_actions = torch.mean(torch.abs(dist.mean -
expert_actions),
axis=0)
metrics_dict['train/accel_diff'] = diff_actions[0]
metrics_dict['train/steer_diff'] = diff_actions[1]
metrics_dict['train/l2_dist'] = torch.norm(
dist.mean - expert_actions.float())
if model_cfg['discrete']:
with torch.no_grad():
model_actions, model_idxs = model(states,
deterministic=True,
return_indexes=True)
accuracy = [
(model_idx == expert_idx).float().mean(axis=0)
for model_idx, expert_idx in zip(model_idxs, expert_idxs.T)
]
if args.actions_are_positions:
metrics_dict['train/x_pos_acc'] = accuracy[0]
metrics_dict['train/y_pos_acc'] = accuracy[1]
metrics_dict['train/heading_acc'] = accuracy[2]
else:
metrics_dict['train/accel_acc'] = accuracy[0]
metrics_dict['train/steer_acc'] = accuracy[1]
for key, val in metrics_dict.items():
if args.write_to_tensorboard:
writer.add_scalar(key, val, n_samples)
if args.wandb:
wandb.log(metrics_dict, step=n_samples)
# save model checkpoint
if (epoch + 1) % 10 == 0 or epoch == args.epochs - 1:
model_path = exp_dir / f'model_{epoch+1}.pth'
torch.save(model, str(model_path))
pickle.dump(filter, open(exp_dir / f"filter_{epoch+1}.pth", "wb"))
print(f'\nSaved model at {model_path}')
if args.discrete:
if args.actions_are_positions:
print('xpos')
print('model: ', model_idxs[0][0:10])
print('expert: ', expert_idxs[0:10, 0])
print('ypos')
print('model: ', model_idxs[1][0:10])
print('expert: ', expert_idxs[0:10, 1])
print('steer')
print('model: ', model_idxs[2][0:10])
print('expert: ', expert_idxs[0:10, 2])
else:
print('accel')
print('model: ', model_idxs[0][0:10])
print('expert: ', expert_idxs[0:10, 0])
print('steer')
print('model: ', model_idxs[1][0:10])
print('expert: ', expert_idxs[0:10, 1])
print('Done, exp dir is', exp_dir)
writer.flush()
writer.close()
if __name__ == '__main__':
main()
| 9,424 | 35.111111 | 79 | py |
nocturne | nocturne-main/examples/imitation_learning/replay_video.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Replay a video of a trained controller."""
from collections import defaultdict
import json
from pathlib import Path
import sys
import imageio
import numpy as np
import subprocess
import torch
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROJECT_PATH, set_display_window
from nocturne import Simulation, Vector2D
OUTPUT_PATH = str(PROJECT_PATH / 'vids')
MODEL_PATH = Path(
'/checkpoint/eugenevinitsky/nocturne/test/2022.06.05/test/14.23.17/\
++device=cuda,++file_limit=1000/train_logs/2022_06_05_14_23_23/model_220.pth'
)
CONFIG_PATH = MODEL_PATH.parent / 'configs.json'
GOAL_TOLERANCE = 1.0
if __name__ == '__main__':
set_display_window()
output_dir = Path(OUTPUT_PATH)
output_dir.mkdir(exist_ok=True)
with open(CONFIG_PATH, 'r') as f:
configs = json.load(f)
data_path = PROCESSED_TRAIN_NO_TL
files = [
file for file in Path(data_path).iterdir() if 'tfrecord' in file.stem
]
scenario_config = configs['scenario_cfg']
dataloader_config = configs['dataloader_cfg']
files = files[:600]
np.random.shuffle(files)
model = torch.load(MODEL_PATH).to('cpu')
model.eval()
for traj_path in files:
sim = Simulation(str(traj_path), scenario_config)
output_str = traj_path.stem.split('.')[0].split('/')[-1]
def policy(state):
"""Get model output."""
state = torch.as_tensor(np.array([state]), dtype=torch.float32)
return model.forward(state,
deterministic=True,
return_indexes=False)
with torch.no_grad():
for expert_control_vehicles, mp4_name in [
(False, f'{output_str}_policy_rollout.mp4'),
(True, f'{output_str}_true_rollout.mp4')
]:
frames = []
sim.reset()
scenario = sim.getScenario()
objects_of_interest = [
obj for obj in scenario.getVehicles()
if obj in scenario.getObjectsThatMoved()
]
for obj in objects_of_interest:
obj.expert_control = True
relevant_obj_ids = [
obj.getID() for obj in objects_of_interest[0:2]
]
view_dist = configs['dataloader_cfg']['view_dist']
view_angle = configs['dataloader_cfg']['view_angle']
state_normalization = configs['dataloader_cfg'][
'state_normalization']
dt = configs['dataloader_cfg']['dt']
n_stacked_states = configs['dataloader_cfg'][
'n_stacked_states']
state_size = configs['model_cfg'][
'n_inputs'] // n_stacked_states
state_dict = defaultdict(
lambda: np.zeros(state_size * n_stacked_states))
for i in range(n_stacked_states):
for veh in objects_of_interest:
ego_state = scenario.ego_state(veh)
visible_state = scenario.flattened_visible_state(
veh, view_dist=view_dist, view_angle=view_angle)
state = np.concatenate(
(ego_state, visible_state)) / state_normalization
state_dict[veh.getID()] = np.roll(
state_dict[veh.getID()], len(state))
state_dict[veh.getID()][:len(state)] = state
sim.step(dt)
for obj in scenario.getObjectsThatMoved():
obj.expert_control = True
# we only actually want to take control once the vehicle
# has been placed into the network
for veh in objects_of_interest:
if np.isclose(veh.position.x, -10000.0):
veh.expert_control = True
else:
if veh.getID() in relevant_obj_ids:
veh.expert_control = expert_control_vehicles
veh.highlight = True
for i in range(90 - n_stacked_states):
# we only actually want to take control once the vehicle
# has been placed into the network
# so vehicles that should be controlled by our agent
# are overriden to be expert controlled
# until they are actually spawned in the scene
for veh in objects_of_interest:
if np.isclose(veh.position.x, -10000.0):
veh.expert_control = True
else:
if veh.getID() in relevant_obj_ids:
veh.expert_control = expert_control_vehicles
veh.highlight = True
print(
f'...{i+1}/{90 - n_stacked_states} ({traj_path} ; {mp4_name})'
)
img = scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
)
frames.append(img)
for veh in objects_of_interest:
veh_state = np.concatenate(
(np.array(scenario.ego_state(veh), copy=False),
np.array(scenario.flattened_visible_state(
veh,
view_dist=view_dist,
view_angle=view_angle),
copy=False)))
ego_state = scenario.ego_state(veh)
visible_state = scenario.flattened_visible_state(
veh, view_dist=view_dist, view_angle=view_angle)
state = np.concatenate(
(ego_state, visible_state)) / state_normalization
state_dict[veh.getID()] = np.roll(
state_dict[veh.getID()], len(state))
state_dict[veh.getID()][:len(state)] = state
action = policy(state_dict[veh.getID()])
if dataloader_config['expert_position']:
if configs['model_cfg']['discrete']:
pos_diff = np.array([
pos.cpu().numpy()[0] for pos in action[0:2]
])
heading = action[2:3][0].cpu().numpy()[0]
else:
pos_diff = action[0:2]
heading = action[2:3]
veh.position = Vector2D.from_numpy(
pos_diff + veh.position.numpy())
veh.heading += heading
else:
veh.acceleration = action[0].cpu().numpy()
veh.steering = action[1].cpu().numpy()
sim.step(dt)
for veh in scenario.getObjectsThatMoved():
if (veh.position -
veh.target_position).norm() < GOAL_TOLERANCE:
scenario.removeVehicle(veh)
imageio.mimsave(mp4_name, np.stack(frames, axis=0), fps=30)
print(f'> {mp4_name}')
# stack the movies side by side
output_name = traj_path.stem.split('.')[0].split('/')[-1]
output_path = f'{output_name}_output.mp4'
ffmpeg_command = f'ffmpeg -y -i {output_str}_true_rollout.mp4 ' \
f'-i {output_str}_policy_rollout.mp4 -filter_complex hstack {output_path}'
print(ffmpeg_command)
subprocess.call(ffmpeg_command.split(' '))
print(f'> {output_path}')
sys.exit()
| 8,334 | 43.572193 | 86 | py |
nocturne | nocturne-main/examples/sample_factory_files/visualize_sample_factory.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Use to create movies of trained policies."""
import argparse
from collections import deque
import json
import sys
import time
import os
import imageio
import matplotlib.pyplot as plt
import numpy as np
import torch
from sample_factory.algorithms.appo.actor_worker import transform_dict_observations
from sample_factory.algorithms.appo.learner import LearnerWorker
from sample_factory.algorithms.appo.model import create_actor_critic
from sample_factory.algorithms.appo.model_utils import get_hidden_size
from sample_factory.algorithms.utils.action_distributions import ContinuousActionDistribution, \
CategoricalActionDistribution
from sample_factory.algorithms.utils.arguments import load_from_checkpoint
from sample_factory.algorithms.utils.multi_agent_wrapper import MultiAgentWrapper, is_multiagent_env
from sample_factory.envs.create_env import create_env
from sample_factory.utils.utils import log, AttrDict
from run_sample_factory import register_custom_components
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROCESSED_VALID_NO_TL, PROJECT_PATH, set_display_window # noqa: F401
def run_eval(cfg_dict, max_num_frames=1e9):
"""Run evaluation over a single file. Exits when one episode finishes.
Args:
cfg (dict): configuration file for instantiating the agents and environment.
max_num_frames (int, optional): Deprecated. Should be removed.
Returns
-------
None: None
"""
cfg = load_from_checkpoint(cfg_dict)
render_action_repeat = cfg.render_action_repeat if cfg.render_action_repeat is not None else cfg.env_frameskip
if render_action_repeat is None:
log.warning('Not using action repeat!')
render_action_repeat = 1
log.debug('Using action repeat %d during evaluation', render_action_repeat)
cfg.env_frameskip = 1 # for evaluation
cfg.num_envs = 1
cfg.seed = np.random.randint(10000)
cfg.scenario_path = cfg_dict.scenario_path
def make_env_func(env_config):
return create_env(cfg.env, cfg=cfg, env_config=env_config)
env = make_env_func(AttrDict({'worker_index': 0, 'vector_index': 0}))
is_multiagent = is_multiagent_env(env)
if not is_multiagent:
env = MultiAgentWrapper(env)
if hasattr(env.unwrapped, 'reset_on_init'):
# reset call ruins the demo recording for VizDoom
env.unwrapped.reset_on_init = False
actor_critic = create_actor_critic(cfg, env.observation_space,
env.action_space)
device = torch.device('cpu' if cfg.device == 'cpu' else 'cuda')
actor_critic.model_to_device(device)
policy_id = cfg.policy_index
checkpoints = LearnerWorker.get_checkpoints(
LearnerWorker.checkpoint_dir(cfg, policy_id))
checkpoint_dict = LearnerWorker.load_checkpoint(checkpoints, device)
actor_critic.load_state_dict(checkpoint_dict['model'])
episode_rewards = [deque([], maxlen=100) for _ in range(env.num_agents)]
true_rewards = [deque([], maxlen=100) for _ in range(env.num_agents)]
num_frames = 0
last_render_start = time.time()
def max_frames_reached(frames):
return max_num_frames is not None and frames > max_num_frames
obs = env.reset()
print(os.path.join(env.cfg['scenario_path'], env.unwrapped.file))
rnn_states = torch.zeros(
[env.num_agents, get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
episode_reward = np.zeros(env.num_agents)
finished_episode = [False] * env.num_agents
if not cfg.no_render:
fig = plt.figure()
frames = []
ego_frames = []
feature_frames = []
with torch.no_grad():
while not max_frames_reached(num_frames):
obs_torch = AttrDict(transform_dict_observations(obs))
for key, x in obs_torch.items():
obs_torch[key] = torch.from_numpy(x).to(device).float()
policy_outputs = actor_critic(obs_torch,
rnn_states,
with_action_distribution=True)
# sample actions from the distribution by default
actions = policy_outputs.actions
action_distribution = policy_outputs.action_distribution
if isinstance(action_distribution, ContinuousActionDistribution):
if not cfg.continuous_actions_sample: # TODO: add similar option for discrete actions
actions = action_distribution.means
if isinstance(action_distribution, CategoricalActionDistribution):
if not cfg.discrete_actions_sample:
actions = policy_outputs['action_logits'].argmax(axis=1)
actions = actions.cpu().numpy()
rnn_states = policy_outputs.rnn_states
for _ in range(render_action_repeat):
if not cfg.no_render:
target_delay = 1.0 / cfg.fps if cfg.fps > 0 else 0
current_delay = time.time() - last_render_start
time_wait = target_delay - current_delay
if time_wait > 0:
# log.info('Wait time %.3f', time_wait)
time.sleep(time_wait)
last_render_start = time.time()
img = env.render()
frames.append(img)
ego_img = env.render_ego()
if ego_img is not None:
ego_frames.append(ego_img)
feature_img = env.render_features()
if feature_img is not None:
feature_frames.append(feature_img)
obs, rew, done, infos = env.step(actions)
episode_reward += rew
num_frames += 1
for agent_i, done_flag in enumerate(done):
if done_flag:
finished_episode[agent_i] = True
episode_rewards[agent_i].append(
episode_reward[agent_i])
true_rewards[agent_i].append(infos[agent_i].get(
'true_reward', episode_reward[agent_i]))
log.info(
'Episode finished for agent %d at %d frames. Reward: %.3f, true_reward: %.3f',
agent_i, num_frames, episode_reward[agent_i],
true_rewards[agent_i][-1])
rnn_states[agent_i] = torch.zeros(
[get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
episode_reward[agent_i] = 0
# if episode terminated synchronously for all agents, pause a bit before starting a new one
if all(done):
if not cfg.no_render:
imageio.mimsave(os.path.join(PROJECT_PATH,
'animation.mp4'),
np.array(frames),
fps=30)
plt.close(fig)
imageio.mimsave(os.path.join(PROJECT_PATH,
'animation_ego.mp4'),
np.array(ego_frames),
fps=30)
plt.close(fig)
imageio.mimsave(os.path.join(PROJECT_PATH,
'animation_feature.mp4'),
np.array(feature_frames),
fps=30)
plt.close(fig)
if not cfg.no_render:
env.render()
time.sleep(0.05)
if all(finished_episode):
finished_episode = [False] * env.num_agents
avg_episode_rewards_str, avg_true_reward_str = '', ''
for agent_i in range(env.num_agents):
avg_rew = np.mean(episode_rewards[agent_i])
avg_true_rew = np.mean(true_rewards[agent_i])
if not np.isnan(avg_rew):
if avg_episode_rewards_str:
avg_episode_rewards_str += ', '
avg_episode_rewards_str += f'#{agent_i}: {avg_rew:.3f}'
if not np.isnan(avg_true_rew):
if avg_true_reward_str:
avg_true_reward_str += ', '
avg_true_reward_str += f'#{agent_i}: {avg_true_rew:.3f}'
avg_goal = infos[0]['episode_extra_stats']['goal_achieved']
avg_collisions = infos[0]['episode_extra_stats'][
'collided']
log.info(f'Avg goal achieved, {avg_goal}')
log.info(f'Avg num collisions, {avg_collisions}')
log.info('Avg episode rewards: %s, true rewards: %s',
avg_episode_rewards_str, avg_true_reward_str)
log.info(
'Avg episode reward: %.3f, avg true_reward: %.3f',
np.mean([
np.mean(episode_rewards[i])
for i in range(env.num_agents)
]),
np.mean([
np.mean(true_rewards[i])
for i in range(env.num_agents)
]))
return avg_goal
env.close()
def main():
"""Script entry point."""
set_display_window()
register_custom_components()
parser = argparse.ArgumentParser()
parser.add_argument('cfg_path', type=str)
args = parser.parse_args()
file_path = os.path.join(args.cfg_path, 'cfg.json')
with open(file_path, 'r') as file:
cfg_dict = json.load(file)
cfg_dict['cli_args'] = {}
cfg_dict['fps'] = 0
cfg_dict['render_action_repeat'] = None
cfg_dict['no_render'] = False
cfg_dict['policy_index'] = 0
cfg_dict['record_to'] = os.path.join(os.getcwd(), '..', 'recs')
cfg_dict['continuous_actions_sample'] = True
cfg_dict['discrete_actions_sample'] = False
cfg_dict['remove_at_collide'] = True
cfg_dict['remove_at_goal'] = True
cfg_dict['scenario_path'] = PROCESSED_VALID_NO_TL
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
cfg = Bunch(cfg_dict)
avg_goals = []
for _ in range(1):
avg_goal = run_eval(cfg)
avg_goals.append(avg_goal)
print(avg_goals)
print('the total average goal achieved is {}'.format(np.mean(avg_goals)))
if __name__ == '__main__':
sys.exit(main())
| 11,171 | 39.923077 | 116 | py |
nocturne | nocturne-main/examples/sample_factory_files/run_sample_factory.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Runner script for sample factory.
To run in single agent mode on one file for testing.
python -m run_sample_factory algorithm=APPO ++algorithm.train_in_background_thread=True \
++algorithm.num_workers=10 ++algorithm.experiment=EXPERIMENT_NAME \
++max_num_vehicles=1 ++num_files=1
To run in multiagent mode on one file for testing
python -m run_sample_factory algorithm=APPO ++algorithm.train_in_background_thread=True \
++algorithm.num_workers=10 ++algorithm.experiment=EXPERIMENT_NAME \
++num_files=1
To run on all files set ++num_files=-1
For debugging
python -m run_sample_factory algorithm=APPO ++algorithm.train_in_background_thread=False \
++algorithm.num_workers=1 ++force_envs_single_thread=False
After training for a desired period of time, evaluate the policy by running:
python -m sample_factory_examples.enjoy_custom_multi_env --algo=APPO \
--env=my_custom_multi_env_v1 --experiment=example
"""
import os
import sys
import hydra
import numpy as np
from omegaconf import OmegaConf
from sample_factory.envs.env_registry import global_env_registry
from sample_factory.run_algorithm import run_algorithm
from sample_factory_examples.train_custom_env_custom_model import override_default_params_func
from sample_factory.algorithms.appo.model_utils import get_obs_shape, EncoderBase, nonlinearity, register_custom_encoder
from torch import nn
from nocturne.envs.wrappers import create_env
class SampleFactoryEnv():
"""Wrapper environment that converts between our dicts and Sample Factory format."""
def __init__(self, env):
"""Initialize wrapper.
Args
----
env (BaseEnv): Base environment that we are wrapping.
"""
self.env = env
self.num_agents = self.env.cfg['max_num_vehicles']
self.agent_ids = [i for i in range(self.num_agents)]
self.is_multiagent = True
_ = self.env.reset()
# used to track which agents are done
self.already_done = [False for _ in self.agent_ids]
self.episode_rewards = np.zeros(self.num_agents)
def step(self, actions):
"""Convert between environment dicts and sample factory lists.
Important to note:
1) Items in info['episode_extra_stats'] will be logged by sample factory.
2) sample factory does not reset the environment for you
so we reset it if the env returns __all__ in its done dict
Args:
actions ({str: numpy array}): agent actions
Returns
-------
obs_n ([np.array]): N length list of agent observations
rew_n ([float]): N length list of agent rewards
info_n ([{str: float}]): N length list of info dicts
done_n ([bool]): N length list of whether agents are done
"""
agent_actions = {}
for action, agent_id, already_done in zip(actions, self.agent_ids,
self.already_done):
if already_done:
continue
agent_actions[self.agent_id_to_env_id_map[agent_id]] = action
next_obses, rew, done, info = self.env.step(agent_actions)
rew_n = []
done_n = []
info_n = []
for agent_id in self.agent_ids:
# first check that the agent_id ever had a corresponding vehicle
# and then check that there's actually an observation for it i.e. it's not done
if agent_id in self.agent_id_to_env_id_map.keys(
) and self.agent_id_to_env_id_map[agent_id] in next_obses.keys():
map_key = self.agent_id_to_env_id_map[agent_id]
# since the environment may have just reset, we don't actually have
# reward objects yet
rew_n.append(rew.get(map_key, 0))
agent_info = info.get(map_key, {})
# track the per-agent reward for later logging
self.episode_rewards[agent_id] += rew.get(map_key, 0)
self.num_steps[agent_id] += 1
self.goal_achieved[agent_id] = self.goal_achieved[
agent_id] or agent_info['goal_achieved']
self.collided[agent_id] = self.collided[
agent_id] or agent_info['collided']
self.veh_edge_collided[agent_id] = self.veh_edge_collided[
agent_id] or agent_info['veh_edge_collision']
self.veh_veh_collided[agent_id] = self.veh_veh_collided[
agent_id] or agent_info['veh_veh_collision']
else:
rew_n.append(0)
agent_info = {}
if self.already_done[agent_id]:
agent_info['is_active'] = False
else:
agent_info['is_active'] = True
info_n.append(agent_info)
# now stick in some extra state information if needed
# anything in episode_extra_stats is logged at the end of the episode
if done['__all__']:
# log any extra info that you need
avg_rew = np.mean(self.episode_rewards[self.valid_indices])
avg_len = np.mean(self.num_steps[self.valid_indices])
avg_goal_achieved = np.mean(self.goal_achieved[self.valid_indices])
avg_collided = np.mean(self.collided[self.valid_indices])
avg_veh_edge_collided = np.mean(
self.veh_edge_collided[self.valid_indices])
avg_veh_veh_collided = np.mean(
self.veh_veh_collided[self.valid_indices])
for info in info_n:
info['episode_extra_stats'] = {}
info['episode_extra_stats']['avg_rew'] = avg_rew
info['episode_extra_stats']['avg_agent_len'] = avg_len
info['episode_extra_stats'][
'goal_achieved'] = avg_goal_achieved
info['episode_extra_stats']['collided'] = avg_collided
info['episode_extra_stats'][
'veh_edge_collision'] = avg_veh_edge_collided
info['episode_extra_stats'][
'veh_veh_collision'] = avg_veh_veh_collided
# update the dones so we know if we need to reset
# sample factory does not call reset for you
for env_id, done_val in done.items():
# handle the __all__ signal that's just in there for
# telling when the environment should stop
if env_id == '__all__':
continue
if done_val:
agent_id = self.env_id_to_agent_id_map[env_id]
self.already_done[agent_id] = True
# okay, now if all the agents are done set done to True for all of them
# otherwise, False. Sample factory uses info['is_active'] to track if agents
# are done, not the done signal
# also, convert the obs_dict into the right format
if done['__all__']:
done_n = [True] * self.num_agents
obs_n = self.reset()
else:
done_n = [False] * self.num_agents
obs_n = self.obs_dict_to_list(next_obses)
return obs_n, rew_n, done_n, info_n
def obs_dict_to_list(self, obs_dict):
"""Convert the dictionary returned by the environment into a fixed size list of arrays.
Args:
obs_dict ({agent id in environment: observation}): dict mapping ID to observation
Returns
-------
[np.array]: List of arrays ordered by which agent ID they correspond to.
"""
obs_n = []
for agent_id in self.agent_ids:
# first check that the agent_id ever had a corresponding vehicle
# and then check that there's actually an observation for it i.e. it's not done
if agent_id in self.agent_id_to_env_id_map.keys(
) and self.agent_id_to_env_id_map[agent_id] in obs_dict.keys():
map_key = self.agent_id_to_env_id_map[agent_id]
obs_n.append(obs_dict[map_key])
else:
obs_n.append(self.dead_feat)
return obs_n
def reset(self):
"""Reset the environment.
Key things done here:
1) build a map between the agent IDs in the environment (which are not necessarily 0-N)
and the agent IDs for sample factory which are from 0 to the maximum number of agents
2) sample factory (until some bugs are fixed) requires a fixed number of agents. Some of these
agents will be dummy agents that do not act in the environment. So, here we build valid
indices which can be used to figure out which agent IDs correspond
Returns
-------
[np.array]: List of numpy arrays, one for each agent.
"""
# track the agent_ids that actually take an action during the episode
self.valid_indices = []
self.episode_rewards = np.zeros(self.num_agents)
self.num_steps = np.zeros(self.num_agents)
self.goal_achieved = np.zeros(self.num_agents)
self.collided = np.zeros(self.num_agents)
self.veh_veh_collided = np.zeros(self.num_agents)
self.veh_edge_collided = np.zeros(self.num_agents)
self.already_done = [False for _ in self.agent_ids]
next_obses = self.env.reset()
env_keys = sorted(list(next_obses.keys()))
# agent ids is a list going from 0 to (num_agents - 1)
# however, the vehicle IDs might go from 0 to anything
# we want to initialize a mapping that is maintained through the episode and always
# uniquely convert the vehicle ID to an agent id
self.agent_id_to_env_id_map = {
agent_id: env_id
for agent_id, env_id in zip(self.agent_ids, env_keys)
}
self.env_id_to_agent_id_map = {
env_id: agent_id
for agent_id, env_id in zip(self.agent_ids, env_keys)
}
# if there isn't a mapping from an agent id to a vehicle id, that agent should be
# set to permanently inactive
for agent_id in self.agent_ids:
if agent_id not in self.agent_id_to_env_id_map.keys():
self.already_done[agent_id] = True
else:
# check that this isn't actually a fake padding agent used
# when keep_inactive_agents is True
if agent_id in self.agent_id_to_env_id_map.keys(
) and self.agent_id_to_env_id_map[
agent_id] not in self.env.dead_agent_ids:
self.valid_indices.append(agent_id)
obs_n = self.obs_dict_to_list(next_obses)
return obs_n
@property
def observation_space(self):
"""See superclass."""
return self.env.observation_space
@property
def action_space(self):
"""See superclass."""
return self.env.action_space
def render(self, mode=None):
"""See superclass."""
return self.env.render(mode)
def seed(self, seed=None):
"""Pass the seed to the environment."""
self.env.seed(seed)
def __getattr__(self, name):
"""Pass attributes directly through to the wrapped env. TODO(remove)."""
return getattr(self.env, name)
class CustomEncoder(EncoderBase):
"""Encoder for the input."""
def __init__(self, cfg, obs_space, timing):
super().__init__(cfg, timing)
obs_shape = get_obs_shape(obs_space)
assert len(obs_shape.obs) == 1
fc_encoder_layer = cfg.encoder_hidden_size
encoder_layers = [
nn.Linear(obs_shape.obs[0], fc_encoder_layer),
nonlinearity(cfg),
nn.Linear(fc_encoder_layer, fc_encoder_layer),
nonlinearity(cfg),
]
self.mlp_head = nn.Sequential(*encoder_layers)
self.init_fc_blocks(fc_encoder_layer)
def forward(self, obs_dict):
"""See superclass."""
x = self.mlp_head(obs_dict['obs'])
x = self.forward_fc_blocks(x)
return x
def make_custom_multi_env_func(full_env_name, cfg, env_config=None):
"""Return a wrapped base environment.
Args:
full_env_name (str): Unused.
cfg (dict): Dict needed to configure the environment.
env_config (dict, optional): Deprecated. Will be removed from SampleFactory later.
Returns
-------
SampleFactoryEnv: Wrapped environment.
"""
env = create_env(cfg)
return SampleFactoryEnv(env)
def register_custom_components():
"""Register needed constructors for custom environments."""
global_env_registry().register_env(
env_name_prefix='my_custom_multi_env_',
make_env_func=make_custom_multi_env_func,
override_default_params_func=override_default_params_func,
)
register_custom_encoder('custom_env_encoder', CustomEncoder)
@hydra.main(config_path="../../cfgs/", config_name="config")
def main(cfg):
"""Script entry point."""
register_custom_components()
# cfg = parse_args()
# TODO(ev) hacky renaming and restructuring, better to do this cleanly
cfg_dict = OmegaConf.to_container(cfg, resolve=True)
# copy algo keys into the main keys
for key, value in cfg_dict['algorithm'].items():
cfg_dict[key] = value
# we didn't set a train directory so use the hydra one
if cfg_dict['train_dir'] is None:
cfg_dict['train_dir'] = os.getcwd()
print(f'storing the results in {os.getcwd()}')
else:
output_dir = cfg_dict['train_dir']
print(f'storing results in {output_dir}')
# recommendation from Aleksei to keep horizon length fixed
# and number of agents fixed and just pad missing / exited
# agents with a vector of -1s
cfg_dict['subscriber']['keep_inactive_agents'] = True
# put it into a namespace so sample factory code runs correctly
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
cfg = Bunch(cfg_dict)
status = run_algorithm(cfg)
return status
if __name__ == '__main__':
sys.exit(main())
| 14,308 | 39.535411 | 120 | py |
nocturne | nocturne-main/examples/sample_factory_files/results/plot_successes.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Util for plotting eval_sample_factory.py output."""
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
plt.figure()
num_arr = np.load('success_by_veh_number.npy')
for i in range(num_arr.shape[0]):
veh_num_arr = num_arr[i, i]
plt.figure()
plt.plot(list(range(len(veh_num_arr))), veh_num_arr[:, 0])
plt.plot(list(range(len(veh_num_arr))), veh_num_arr[:, 1])
plt.plot(list(range(len(veh_num_arr))),
veh_num_arr[:, 1] + veh_num_arr[:, 0])
plt.xlabel('num vehicles')
plt.ylabel('rate')
plt.legend(['goal rate', 'collide rate', 'sum'])
plt.title('goal rate as function of number of vehicles')
plt.savefig(f'{i}_goal_func_num.png')
plt.close()
num_arr = np.load('success_by_dist.npy')
for i in range(num_arr.shape[0]):
dist_arr = num_arr[i, i]
plt.figure()
plt.plot(10 * np.array(list(range(len(dist_arr)))), dist_arr[:, 0])
plt.plot(10 * np.array(list(range(len(dist_arr)))), dist_arr[:, 1])
plt.plot(10 * np.array(list(range(len(dist_arr)))),
dist_arr[:, 1] + dist_arr[:, 0])
plt.xlabel('distance')
plt.ylabel('rate')
plt.legend(['goal rate', 'collide rate', 'sum'])
plt.title('goal rate as function of start distance')
plt.savefig(f'{i}_goal_func_dist.png')
plt.close()
| 1,625 | 40.692308 | 75 | py |
nocturne | nocturne-main/examples/rllib_files/run_rllib.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Example run script for RLlib."""
import os
import hydra
from omegaconf import OmegaConf
from cfgs.config import set_display_window
import ray
from ray import tune
from ray.tune.registry import register_env
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from nocturne.envs.wrappers import create_env
class RLlibWrapperEnv(MultiAgentEnv):
"""Thin wrapper making our env look like a MultiAgentEnv."""
metadata = {
"render.modes": ["rgb_array"],
}
def __init__(self, env):
"""See wrapped env class."""
self._skip_env_checking = True # temporary fix for rllib env checking issue
super().__init__()
self._env = env
def step(self, actions):
"""See wrapped env class."""
next_obs, rew, done, info = self._env.step(actions)
return next_obs, rew, done, info
def reset(self):
"""See wrapped env class."""
obses = self._env.reset()
return obses
@property
def observation_space(self):
"""See wrapped env class."""
return self._env.observation_space
@property
def action_space(self):
"""See wrapped env class."""
return self._env.action_space
def render(self, mode=None):
"""See wrapped env class."""
return self._env.render()
def seed(self, seed=None):
"""Set seed on the wrapped env."""
self._env.seed(seed)
def __getattr__(self, name):
"""Return attributes from the wrapped env."""
return getattr(self._env, name)
def create_rllib_env(cfg):
"""Return an MultiAgentEnv wrapped environment."""
return RLlibWrapperEnv(create_env(cfg))
@hydra.main(config_path="../../cfgs/", config_name="config")
def main(cfg):
"""Run RLlib example."""
set_display_window()
cfg = OmegaConf.to_container(cfg, resolve=True)
# TODO(eugenevinitsky) move these into a config
if cfg['debug']:
ray.init(local_mode=True)
num_workers = 0
num_envs_per_worker = 1
num_gpus = 0
use_lstm = False
else:
num_workers = 15
num_envs_per_worker = 5
num_gpus = 1
use_lstm = True
register_env("nocturne", lambda cfg: create_rllib_env(cfg))
username = os.environ["USER"]
tune.run(
"PPO",
# TODO(eugenevinitsky) move into config
local_dir=f"/checkpoint/{username}/nocturne/ray_results",
stop={"episodes_total": 60000},
checkpoint_freq=1000,
config={
# Enviroment specific.
"env":
"nocturne",
"env_config":
cfg,
# General
"framework":
"torch",
"num_gpus":
num_gpus,
"num_workers":
num_workers,
"num_envs_per_worker":
num_envs_per_worker,
"observation_filter":
"MeanStdFilter",
# Method specific.
"entropy_coeff":
0.0,
"num_sgd_iter":
5,
"train_batch_size":
max(100 * num_workers * num_envs_per_worker, 512),
"rollout_fragment_length":
20,
"sgd_minibatch_size":
max(int(100 * num_workers * num_envs_per_worker / 4), 512),
"multiagent": {
# We only have one policy (calling it "shared").
# Class, obs/act-spaces, and config will be derived
# automatically.
"policies": {"shared_policy"},
# Always use "shared" policy.
"policy_mapping_fn":
(lambda agent_id, episode, **kwargs: "shared_policy"),
# each agent step is counted towards train_batch_size
# rather than environment steps
"count_steps_by":
"agent_steps",
},
"model": {
"use_lstm": use_lstm
},
# Evaluation stuff
"evaluation_interval":
50,
# Run evaluation on (at least) one episodes
"evaluation_duration":
1,
# ... using one evaluation worker (setting this to 0 will cause
# evaluation to run on the local evaluation worker, blocking
# training until evaluation is done).
# TODO: if this is not 0, it seems to error out
"evaluation_num_workers":
0,
# Special evaluation config. Keys specified here will override
# the same keys in the main config, but only for evaluation.
"evaluation_config": {
# Store videos in this relative directory here inside
# the default output dir (~/ray_results/...).
# Alternatively, you can specify an absolute path.
# Set to True for using the default output dir (~/ray_results/...).
# Set to False for not recording anything.
"record_env": "videos_test",
# "record_env": "/Users/xyz/my_videos/",
# Render the env while evaluating.
# Note that this will always only render the 1st RolloutWorker's
# env and only the 1st sub-env in a vectorized env.
"render_env": True,
},
},
)
if __name__ == "__main__":
main()
| 5,607 | 31.229885 | 84 | py |
nocturne | nocturne-main/examples/on_policy_files/nocturne_runner.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
"""Runner for PPO from https://github.com/marlbenchmark/on-policy."""
from pathlib import Path
import os
import time
import hydra
from cfgs.config import set_display_window
import imageio
import numpy as np
import setproctitle
import torch
import wandb
from algos.ppo.base_runner import Runner
from algos.ppo.env_wrappers import SubprocVecEnv, DummyVecEnv
from nocturne.envs.wrappers import create_ppo_env
def _t2n(x):
"""Convert torch tensor to a numpy array."""
return x.detach().cpu().numpy()
def make_train_env(cfg):
"""Construct a training environment."""
def get_env_fn(rank):
def init_env():
env = create_ppo_env(cfg, rank)
# TODO(eugenevinitsky) implement this
env.seed(cfg.seed + rank * 1000)
return env
return init_env
if cfg.algorithm.n_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv(
[get_env_fn(i) for i in range(cfg.algorithm.n_rollout_threads)])
def make_eval_env(cfg):
"""Construct an eval environment."""
def get_env_fn(rank):
def init_env():
env = create_ppo_env(cfg)
# TODO(eugenevinitsky) implement this
env.seed(cfg.seed + rank * 1000)
return env
return init_env
if cfg.algorithm.n_eval_rollout_threads == 1:
return DummyVecEnv([get_env_fn(0)])
else:
return SubprocVecEnv(
[get_env_fn(i) for i in range(cfg.algorithm.n_eval_rollout_threads)])
def make_render_env(cfg):
"""Construct a rendering environment."""
def get_env_fn(rank):
def init_env():
env = create_ppo_env(cfg)
# TODO(eugenevinitsky) implement this
env.seed(cfg.seed + rank * 1000)
return env
return init_env
return DummyVecEnv([get_env_fn(0)])
class NocturneSharedRunner(Runner):
"""
Runner class to perform training, evaluation and data collection for the Nocturne envs.
WARNING: Assumes a shared policy.
"""
def __init__(self, config):
"""Initialize."""
super(NocturneSharedRunner, self).__init__(config)
self.cfg = config['cfg.algo']
self.render_envs = config['render_envs']
def run(self):
"""Run the training code."""
self.warmup()
start = time.time()
episodes = int(self.num_env_steps
) // self.episode_length // self.n_rollout_threads
for episode in range(episodes):
if self.use_linear_lr_decay:
self.trainer.policy.lr_decay(episode, episodes)
for step in range(self.episode_length):
# Sample actions
values, actions, action_log_probs, rnn_states, rnn_states_critic, actions_env = self.collect(
step)
# Obser reward and next obs
obs, rewards, dones, infos = self.envs.step(actions_env)
data = obs, rewards, dones, infos, values, actions, action_log_probs, rnn_states, rnn_states_critic
# insert data into buffer
self.insert(data)
# compute return and update network
self.compute()
train_infos = self.train()
# post process
total_num_steps = (
episode + 1) * self.episode_length * self.n_rollout_threads
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save()
# log information
if episode % self.log_interval == 0:
end = time.time()
print(
"\n Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.algorithm_name, self.experiment_name,
episode * self.n_rollout_threads,
episodes * self.n_rollout_threads, total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
if self.use_wandb:
wandb.log({'fps': int(total_num_steps / (end - start))},
step=total_num_steps)
env_infos = {}
for agent_id in range(self.num_agents):
idv_rews = []
for info in infos:
if 'individual_reward' in info[agent_id].keys():
idv_rews.append(
info[agent_id]['individual_reward'])
agent_k = 'agent%i/individual_rewards' % agent_id
env_infos[agent_k] = idv_rews
# TODO(eugenevinitsky) this does not correctly account for the fact that there could be
# two episodes in the buffer
train_infos["average_episode_rewards"] = np.mean(
self.buffer.rewards) * self.episode_length
print("average episode rewards is {}".format(
train_infos["average_episode_rewards"]))
print(
f"maximum per step reward is {np.max(self.buffer.rewards)}"
)
self.log_train(train_infos, total_num_steps)
self.log_env(env_infos, total_num_steps)
# eval
if episode % self.eval_interval == 0 and self.use_eval:
self.eval(total_num_steps)
# save videos
if episode % self.cfg.render_interval == 0:
self.render(total_num_steps)
def warmup(self):
"""Initialize the buffers."""
# reset env
obs = self.envs.reset()
# replay buffer
if self.use_centralized_V:
share_obs = obs.reshape(self.n_rollout_threads, -1)
share_obs = np.expand_dims(share_obs, 1).repeat(self.num_agents,
axis=1)
else:
share_obs = obs
self.buffer.share_obs[0] = share_obs.copy()
self.buffer.obs[0] = obs.copy()
@torch.no_grad()
def collect(self, step):
"""Collect rollout data."""
self.trainer.prep_rollout()
value, action, action_log_prob, rnn_states, rnn_states_critic \
= self.trainer.policy.get_actions(np.concatenate(self.buffer.share_obs[step]),
np.concatenate(self.buffer.obs[step]),
np.concatenate(self.buffer.rnn_states[step]),
np.concatenate(self.buffer.rnn_states_critic[step]),
np.concatenate(self.buffer.masks[step]))
# [self.envs, agents, dim]
values = np.array(np.split(_t2n(value), self.n_rollout_threads))
actions = np.array(np.split(_t2n(action), self.n_rollout_threads))
action_log_probs = np.array(
np.split(_t2n(action_log_prob), self.n_rollout_threads))
rnn_states = np.array(
np.split(_t2n(rnn_states), self.n_rollout_threads))
rnn_states_critic = np.array(
np.split(_t2n(rnn_states_critic), self.n_rollout_threads))
# rearrange action
if self.envs.action_space[0].__class__.__name__ == 'MultiDiscrete':
for i in range(self.envs.action_space[0].shape):
uc_actions_env = np.eye(self.envs.action_space[0].high[i] +
1)[actions[:, :, i]]
if i == 0:
actions_env = uc_actions_env
else:
actions_env = np.concatenate((actions_env, uc_actions_env),
axis=2)
elif self.envs.action_space[0].__class__.__name__ == 'Discrete':
actions_env = np.squeeze(
np.eye(self.envs.action_space[0].n)[actions], 2)
else:
raise NotImplementedError
return values, actions, action_log_probs, rnn_states, rnn_states_critic, actions_env
def insert(self, data):
"""Store the data in the buffers."""
obs, rewards, dones, _, values, actions, action_log_probs, rnn_states, rnn_states_critic = data
dones_env = np.all(dones, axis=1)
rnn_states[dones_env] = np.zeros(((dones_env).sum(), self.num_agents,
self.recurrent_N, self.hidden_size),
dtype=np.float32)
rnn_states_critic[dones_env] = np.zeros(
((dones_env).sum(), self.num_agents,
*self.buffer.rnn_states_critic.shape[3:]),
dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1),
dtype=np.float32)
masks[dones_env] = np.zeros(((dones_env).sum(), self.num_agents, 1),
dtype=np.float32)
active_masks = np.ones((self.n_rollout_threads, self.num_agents, 1),
dtype=np.float32)
active_masks[dones] = np.zeros(((dones).sum(), 1), dtype=np.float32)
active_masks[dones_env] = np.ones(
((dones_env).sum(), self.num_agents, 1), dtype=np.float32)
if self.use_centralized_V:
share_obs = obs.reshape(self.n_rollout_threads, -1)
share_obs = np.expand_dims(share_obs, 1).repeat(self.num_agents,
axis=1)
else:
share_obs = obs
self.buffer.insert(share_obs,
obs,
rnn_states,
rnn_states_critic,
actions,
action_log_probs,
values,
rewards,
masks,
active_masks=active_masks)
@torch.no_grad()
def eval(self, total_num_steps):
"""Get the policy returns in deterministic mode."""
eval_episode = 0
eval_episode_rewards = []
one_episode_rewards = [[] for _ in range(self.n_eval_rollout_threads)]
num_achieved_goals = 0
num_collisions = 0
i = 0
eval_obs = self.eval_envs.reset()
eval_rnn_states = np.zeros(
(self.n_eval_rollout_threads, self.num_agents, self.recurrent_N,
self.hidden_size),
dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1),
dtype=np.float32)
while eval_episode < self.cfg.eval_episodes:
i += 1
self.trainer.prep_rollout()
eval_actions, eval_rnn_states = \
self.trainer.policy.act(np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
np.concatenate(eval_masks),
deterministic=True)
eval_actions = np.array(
np.split(_t2n(eval_actions), self.n_eval_rollout_threads))
eval_rnn_states = np.array(
np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
# Observed reward and next obs
eval_obs, eval_rewards, eval_dones, eval_infos = self.eval_envs.step(
eval_actions)
for info_arr in eval_infos:
for agent_info_arr in info_arr:
if 'goal_achieved' in agent_info_arr and agent_info_arr[
'goal_achieved']:
num_achieved_goals += 1
if 'collided' in agent_info_arr and agent_info_arr[
'collided']:
num_collisions += 1
for i in range(self.n_eval_rollout_threads):
one_episode_rewards[i].append(eval_rewards[i])
eval_dones_env = np.all(eval_dones, axis=1)
eval_rnn_states[eval_dones_env] = np.zeros(
((eval_dones_env).sum(), self.num_agents, self.recurrent_N,
self.hidden_size),
dtype=np.float32)
eval_masks = np.ones(
(self.n_eval_rollout_threads, self.num_agents, 1),
dtype=np.float32)
eval_masks[eval_dones_env] = np.zeros(
((eval_dones_env).sum(), self.num_agents, 1), dtype=np.float32)
for eval_i in range(self.n_eval_rollout_threads):
if eval_dones_env[eval_i]:
eval_episode += 1
eval_episode_rewards.append(
np.sum(one_episode_rewards[eval_i], axis=0).mean())
one_episode_rewards[eval_i] = []
eval_episode_rewards = np.array(eval_episode_rewards)
eval_episode_rewards = np.mean(eval_episode_rewards)
if self.use_wandb:
wandb.log({'eval_episode_rewards': eval_episode_rewards},
step=total_num_steps)
wandb.log(
{
'avg_eval_goals_achieved':
num_achieved_goals / self.num_agents /
self.cfg.eval_episodes
},
step=total_num_steps)
wandb.log(
{
'avg_eval_num_collisions':
num_collisions / self.num_agents / self.cfg.eval_episodes
},
step=total_num_steps)
@torch.no_grad()
def render(self, total_num_steps):
"""Visualize the env."""
envs = self.render_envs
all_frames = []
for episode in range(self.cfg.render_episodes):
obs = envs.reset()
if self.cfg.save_gifs:
image = envs.envs[0].render('rgb_array')
all_frames.append(image)
else:
envs.render('human')
rnn_states = np.zeros(
(1, self.num_agents, self.recurrent_N, self.hidden_size),
dtype=np.float32)
masks = np.ones((1, self.num_agents, 1), dtype=np.float32)
episode_rewards = []
self.trainer.prep_rollout()
for step in range(self.episode_length):
calc_start = time.time()
action, rnn_states = self.trainer.policy.act(
np.concatenate(obs),
np.concatenate(rnn_states),
np.concatenate(masks),
deterministic=True)
actions = np.array(np.split(_t2n(action), 1))
rnn_states = np.array(np.split(_t2n(rnn_states), 1))
if envs.action_space[0].__class__.__name__ == 'MultiDiscrete':
for i in range(envs.action_space[0].shape):
uc_actions_env = np.eye(envs.action_space[0].high[i] +
1)[actions[:, :, i]]
if i == 0:
actions_env = uc_actions_env
else:
actions_env = np.concatenate(
(actions_env, uc_actions_env), axis=2)
elif envs.action_space[0].__class__.__name__ == 'Discrete':
actions_env = np.squeeze(
np.eye(envs.action_space[0].n)[actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
obs, rewards, dones, infos = envs.step(actions_env)
episode_rewards.append(rewards)
rnn_states[dones] = np.zeros(
((dones).sum(), self.recurrent_N, self.hidden_size),
dtype=np.float32)
masks = np.ones((1, self.num_agents, 1), dtype=np.float32)
masks[dones] = np.zeros(((dones).sum(), 1), dtype=np.float32)
if self.cfg.save_gifs:
image = envs.envs[0].render('rgb_array')
all_frames.append(image)
calc_end = time.time()
elapsed = calc_end - calc_start
if elapsed < self.cfg.ifi:
time.sleep(self.cfg.ifi - elapsed)
else:
envs.render('human')
if np.all(dones[0]):
break
# note, every rendered episode is exactly the same since there's no randomness in the env and our actions
# are deterministic
# TODO(eugenevinitsky) why is this lower than the non-render reward?
render_val = np.mean(np.sum(np.array(episode_rewards), axis=0))
print("episode reward of rendered episode is: " + str(render_val))
if self.use_wandb:
wandb.log({'render_rew': render_val}, step=total_num_steps)
if self.cfg.save_gifs:
if self.use_wandb:
np_arr = np.stack(all_frames).transpose((0, 3, 1, 2))
wandb.log({"video": wandb.Video(np_arr, fps=4, format="gif")},
step=total_num_steps)
# else:
imageio.mimsave(os.getcwd() + '/render.gif',
all_frames,
duration=self.cfg.ifi)
@hydra.main(config_path='../../cfgs/', config_name='config')
def main(cfg):
"""Run the on-policy code."""
set_display_window()
logdir = Path(os.getcwd())
if cfg.wandb_id is not None:
wandb_id = cfg.wandb_id
else:
wandb_id = wandb.util.generate_id()
# with open(os.path.join(logdir, 'wandb_id.txt'), 'w+') as f:
# f.write(wandb_id)
wandb_mode = "disabled" if (cfg.debug or not cfg.wandb) else "online"
if cfg.wandb:
run = wandb.init(config=cfg,
project=cfg.wandb_name,
name=wandb_id,
group='ppov2_' + cfg.experiment,
resume="allow",
settings=wandb.Settings(start_method="fork"),
mode=wandb_mode)
else:
if not logdir.exists():
curr_run = 'run1'
else:
exst_run_nums = [
int(str(folder.name).split('run')[1])
for folder in logdir.iterdir()
if str(folder.name).startswith('run')
]
if len(exst_run_nums) == 0:
curr_run = 'run1'
else:
curr_run = 'run%i' % (max(exst_run_nums) + 1)
logdir = logdir / curr_run
if not logdir.exists():
os.makedirs(str(logdir))
if cfg.algorithm.algorithm_name == "rmappo":
assert (cfg.algorithm.use_recurrent_policy
or cfg.algorithm.use_naive_recurrent_policy), (
"check recurrent policy!")
elif cfg.algorithm.algorithm_name == "mappo":
assert (not cfg.algorithm.use_recurrent_policy
and not cfg.algorithm.use_naive_recurrent_policy), (
"check recurrent policy!")
else:
raise NotImplementedError
# cuda
if 'cpu' not in cfg.algorithm.device and torch.cuda.is_available():
print("choose to use gpu...")
device = torch.device(cfg.algorithm.device)
torch.set_num_threads(cfg.algorithm.n_training_threads)
# if cfg.algorithm.cuda_deterministic:
# import torch.backends.cudnn as cudnn
# cudnn.benchmark = False
# cudnn.deterministic = True
else:
print("choose to use cpu...")
device = torch.device("cpu")
torch.set_num_threads(cfg.algorithm.n_training_threads)
setproctitle.setproctitle(
str(cfg.algorithm.algorithm_name) + "-" + str(cfg.experiment))
# seed
torch.manual_seed(cfg.algorithm.seed)
torch.cuda.manual_seed_all(cfg.algorithm.seed)
np.random.seed(cfg.algorithm.seed)
# env init
# TODO(eugenevinitsky) this code requires a fixed number of agents but this
# should be done by overriding in the hydra config rather than here
cfg.subscriber.keep_inactive_agents = True
envs = make_train_env(cfg)
eval_envs = make_eval_env(cfg)
render_envs = make_render_env(cfg)
# TODO(eugenevinitsky) hacky
num_agents = envs.reset().shape[1]
config = {
"cfg.algo": cfg.algorithm,
"envs": envs,
"eval_envs": eval_envs,
"render_envs": render_envs,
"num_agents": num_agents,
"device": device,
"logdir": logdir
}
# run experiments
runner = NocturneSharedRunner(config)
runner.run()
# post process
envs.close()
if cfg.algorithm.use_eval and eval_envs is not envs:
eval_envs.close()
if cfg.wandb:
run.finish()
else:
runner.writter.export_scalars_to_json(
str(runner.log_dir + '/summary.json'))
runner.writter.close()
if __name__ == '__main__':
main()
| 21,461 | 37.120782 | 117 | py |
nocturne | nocturne-main/algos/ppo/env_wrappers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
"""
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
import torch
from multiprocessing import Process, Pipe
from abc import ABC, abstractmethod
from algos.ppo.utils.util import tile_images
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class ShareVecEnv(ABC):
"""
An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that
each observation becomes an batch of observations, and expected action is a batch of actions to
be applied per-environment.
"""
closed = False
viewer = None
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self, num_envs, observation_space, share_observation_space,
action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.share_observation_space = share_observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
"""
Reset all the environments and return an array of
observations, or a dict of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
@abstractmethod
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
@abstractmethod
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close_extras(self):
"""
Clean up the extra resources, beyond what's in this base class.
Only runs when not self.closed.
"""
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self, actions):
"""
Step the environments synchronously.
This is available for backwards compatibility.
"""
self.step_async(actions)
return self.step_wait()
def render(self, mode='human'):
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
"""
Return RGB images from each environment
"""
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob = env.reset()
else:
if np.all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send((ob))
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space,
env.action_space))
else:
raise NotImplementedError
class GuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=worker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = False # could cause zombie process
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class SubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=worker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def shareworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
if 'bool' in done.__class__.__name__:
if done:
ob, s_ob, available_actions = env.reset()
else:
if np.all(done):
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset()
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space,
env.action_space))
elif cmd == 'render_vulnerability':
fr = env.render_vulnerability(data)
remote.send((fr))
else:
raise NotImplementedError
class ShareSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=shareworker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(
dones), infos, np.stack(available_actions)
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def choosesimpleworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
if data == "rgb_array":
fr = env.render(mode=data)
remote.send(fr)
elif data == "human":
env.render(mode=data)
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space,
env.action_space))
else:
raise NotImplementedError
class ChooseSimpleSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=choosesimpleworker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def render(self, mode="rgb_array"):
for remote in self.remotes:
remote.send(('render', mode))
if mode == "rgb_array":
frame = [remote.recv() for remote in self.remotes]
return np.stack(frame)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, s_ob, reward, done, info, available_actions = env.step(data)
remote.send((ob, s_ob, reward, done, info, available_actions))
elif cmd == 'reset':
ob, s_ob, available_actions = env.reset(data)
remote.send((ob, s_ob, available_actions))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space,
env.action_space))
else:
raise NotImplementedError
class ChooseSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=chooseworker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, share_obs, rews, dones, infos, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(rews), np.stack(
dones), infos, np.stack(available_actions)
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
results = [remote.recv() for remote in self.remotes]
obs, share_obs, available_actions = zip(*results)
return np.stack(obs), np.stack(share_obs), np.stack(available_actions)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def chooseguardworker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset(data)
remote.send((ob))
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
env.close()
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.share_observation_space,
env.action_space))
else:
raise NotImplementedError
class ChooseGuardSubprocVecEnv(ShareVecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [
Process(target=chooseguardworker,
args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote,
env_fn) in zip(self.work_remotes, self.remotes, env_fns)
]
for p in self.ps:
p.daemon = False # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, share_observation_space, action_space = self.remotes[
0].recv()
ShareVecEnv.__init__(self, len(env_fns), observation_space,
share_observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self, reset_choose):
for remote, choose in zip(self.remotes, reset_choose):
remote.send(('reset', choose))
obs = [remote.recv() for remote in self.remotes]
return np.stack(obs)
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
# single env
class DummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space,
env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
# TODO(eugenevinitsky) remove this
obs, rews, dones, infos = map(np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i] = self.envs[i].reset()
else:
if np.all(done):
obs[i] = self.envs[i].reset()
self.actions = None
return obs, rews, dones, infos
def reset(self):
obs = [env.reset() for env in self.envs]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ShareDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space,
env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
for (i, done) in enumerate(dones):
if 'bool' in done.__class__.__name__:
if done:
obs[i], share_obs[i], available_actions[i] = self.envs[
i].reset()
else:
if np.all(done):
obs[i], share_obs[i], available_actions[i] = self.envs[
i].reset()
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self):
results = [env.reset() for env in self.envs]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space,
env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, share_obs, rews, dones, infos, available_actions = map(
np.array, zip(*results))
self.actions = None
return obs, share_obs, rews, dones, infos, available_actions
def reset(self, reset_choose):
results = [
env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)
]
obs, share_obs, available_actions = map(np.array, zip(*results))
return obs, share_obs, available_actions
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
class ChooseSimpleDummyVecEnv(ShareVecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
ShareVecEnv.__init__(self, len(env_fns), env.observation_space,
env.share_observation_space, env.action_space)
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a, env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.actions = None
return obs, rews, dones, infos
def reset(self, reset_choose):
obs = [
env.reset(choose) for (env, choose) in zip(self.envs, reset_choose)
]
return np.array(obs)
def close(self):
for env in self.envs:
env.close()
def render(self, mode="human"):
if mode == "rgb_array":
return np.array([env.render(mode=mode) for env in self.envs])
elif mode == "human":
for env in self.envs:
env.render(mode=mode)
else:
raise NotImplementedError
| 29,079 | 32.502304 | 99 | py |
nocturne | nocturne-main/algos/ppo/__init__.py | 0 | 0 | 0 | py |
|
nocturne | nocturne-main/algos/ppo/base_runner.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import wandb
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
from algos.ppo.utils.shared_buffer import SharedReplayBuffer
def _t2n(x):
"""Convert torch tensor to a numpy array."""
return x.detach().cpu().numpy()
class Runner(object):
"""
Base class for training recurrent policies.
:param config: (dict) Config dictionary containing parameters for training.
"""
def __init__(self, config):
self.all_args = config['cfg.algo']
self.envs = config['envs']
self.eval_envs = config['eval_envs']
self.device = config['device']
self.num_agents = config['num_agents']
if config.__contains__("render_envs"):
self.render_envs = config['render_envs']
# parameters
# self.env_name = self.all_args.env_name
self.algorithm_name = self.all_args.algorithm_name
self.experiment_name = self.all_args.experiment
self.use_centralized_V = self.all_args.use_centralized_V
self.use_obs_instead_of_state = self.all_args.use_obs_instead_of_state
self.num_env_steps = self.all_args.num_env_steps
self.episode_length = self.all_args.episode_length
# self.episodes_per_thread = self.all_args.episodes_per_thread
self.n_rollout_threads = self.all_args.n_rollout_threads
self.n_eval_rollout_threads = self.all_args.n_eval_rollout_threads
self.n_render_rollout_threads = self.all_args.n_render_rollout_threads
self.use_linear_lr_decay = self.all_args.use_linear_lr_decay
self.hidden_size = self.all_args.hidden_size
self.use_wandb = self.all_args.wandb
self.use_render = self.all_args.use_render
self.recurrent_N = self.all_args.recurrent_N
# interval
self.save_interval = self.all_args.save_interval
self.use_eval = self.all_args.use_eval
self.eval_interval = self.all_args.eval_interval
self.log_interval = self.all_args.log_interval
# dir
self.model_dir = self.all_args.model_dir
if self.use_wandb:
self.save_dir = str(wandb.run.dir)
self.run_dir = str(wandb.run.dir)
else:
self.run_dir = config["logdir"]
self.log_dir = str(self.run_dir / 'logs')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.writter = SummaryWriter(self.log_dir)
self.save_dir = str(self.run_dir / 'models')
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
from algos.ppo.r_mappo.r_mappo import R_MAPPO as TrainAlgo
from algos.ppo.r_mappo.algorithm.rMAPPOPolicy import R_MAPPOPolicy as Policy
share_observation_space = self.envs.share_observation_space[
0] if self.use_centralized_V else self.envs.observation_space[0]
# policy network
self.policy = Policy(self.all_args,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0],
device=self.device)
if self.model_dir is not None:
self.restore()
# algorithm
self.trainer = TrainAlgo(self.all_args,
self.policy,
device=self.device)
# buffer
self.buffer = SharedReplayBuffer(self.all_args, self.num_agents,
self.envs.observation_space[0],
share_observation_space,
self.envs.action_space[0])
def run(self):
"""Collect training data, perform training updates, and evaluate policy."""
raise NotImplementedError
def warmup(self):
"""Collect warmup pre-training data."""
raise NotImplementedError
def collect(self, step):
"""Collect rollouts for training."""
raise NotImplementedError
def insert(self, data):
"""
Insert data into buffer.
:param data: (Tuple) data to insert into training buffer.
"""
raise NotImplementedError
@torch.no_grad()
def compute(self):
"""Calculate returns for the collected data."""
self.trainer.prep_rollout()
next_values = self.trainer.policy.get_values(
np.concatenate(self.buffer.share_obs[-1]),
np.concatenate(self.buffer.rnn_states_critic[-1]),
np.concatenate(self.buffer.masks[-1]))
next_values = np.array(
np.split(_t2n(next_values), self.n_rollout_threads))
self.buffer.compute_returns(next_values, self.trainer.value_normalizer)
def train(self):
"""Train policies with data in buffer. """
self.trainer.prep_training()
train_infos = self.trainer.train(self.buffer)
self.buffer.after_update()
return train_infos
def save(self):
"""Save policy's actor and critic networks."""
policy_actor = self.trainer.policy.actor
torch.save(policy_actor.state_dict(), str(self.save_dir) + "/actor.pt")
policy_critic = self.trainer.policy.critic
torch.save(policy_critic.state_dict(),
str(self.save_dir) + "/critic.pt")
def restore(self):
"""Restore policy's networks from a saved model."""
policy_actor_state_dict = torch.load(str(self.model_dir) + '/actor.pt')
self.policy.actor.load_state_dict(policy_actor_state_dict)
if not self.all_args.use_render:
policy_critic_state_dict = torch.load(
str(self.model_dir) + '/critic.pt')
self.policy.critic.load_state_dict(policy_critic_state_dict)
def log_train(self, train_infos, total_num_steps):
"""
Log training info.
:param train_infos: (dict) information about training update.
:param total_num_steps: (int) total number of training env steps.
"""
for k, v in train_infos.items():
if self.use_wandb:
wandb.log({k: v}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: v}, total_num_steps)
def log_env(self, env_infos, total_num_steps):
"""
Log env info.
:param env_infos: (dict) information about env state.
:param total_num_steps: (int) total number of training env steps.
"""
for k, v in env_infos.items():
if len(v) > 0:
if self.use_wandb:
wandb.log({k: np.mean(v)}, step=total_num_steps)
else:
self.writter.add_scalars(k, {k: np.mean(v)},
total_num_steps)
| 7,111 | 38.292818 | 84 | py |
nocturne | nocturne-main/algos/ppo/r_mappo/r_mappo.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import numpy as np
import torch
import torch.nn as nn
from algos.ppo.utils.util import get_gard_norm, huber_loss, mse_loss
from algos.ppo.utils.valuenorm import ValueNorm
from algos.ppo.ppo_utils.util import check
class R_MAPPO():
"""
Trainer class for MAPPO to update policies.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param policy: (R_MAPPO_Policy) policy to update.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, policy, device=torch.device("cpu")):
self.device = device
self.tpdv = dict(dtype=torch.float32, device=device)
self.policy = policy
self.clip_param = args.clip_param
self.ppo_epoch = args.ppo_epoch
self.num_mini_batch = args.num_mini_batch
self.data_chunk_length = args.data_chunk_length
self.value_loss_coef = args.value_loss_coef
self.entropy_coef = args.entropy_coef
self.max_grad_norm = args.max_grad_norm
self.huber_delta = args.huber_delta
self._use_recurrent_policy = args.use_recurrent_policy
self._use_naive_recurrent = args.use_naive_recurrent_policy
self._use_max_grad_norm = args.use_max_grad_norm
self._use_clipped_value_loss = args.use_clipped_value_loss
self._use_huber_loss = args.use_huber_loss
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_value_active_masks = args.use_value_active_masks
self._use_policy_active_masks = args.use_policy_active_masks
assert (self._use_popart and self._use_valuenorm) == False, (
"self._use_popart and self._use_valuenorm can not be set True simultaneously"
)
if self._use_popart:
self.value_normalizer = self.policy.critic.v_out
elif self._use_valuenorm:
self.value_normalizer = ValueNorm(1, device=self.device)
else:
self.value_normalizer = None
def cal_value_loss(self, values, value_preds_batch, return_batch,
active_masks_batch):
"""
Calculate value function loss.
:param values: (torch.Tensor) value function predictions.
:param value_preds_batch: (torch.Tensor) "old" value predictions from data batch (used for value clip loss)
:param return_batch: (torch.Tensor) reward to go returns.
:param active_masks_batch: (torch.Tensor) denotes if agent is active or dead at a given timesep.
:return value_loss: (torch.Tensor) value function loss.
"""
value_pred_clipped = value_preds_batch + (
values - value_preds_batch).clamp(-self.clip_param,
self.clip_param)
if self._use_popart or self._use_valuenorm:
self.value_normalizer.update(return_batch)
error_clipped = self.value_normalizer.normalize(
return_batch) - value_pred_clipped
error_original = self.value_normalizer.normalize(
return_batch) - values
else:
error_clipped = return_batch - value_pred_clipped
error_original = return_batch - values
if self._use_huber_loss:
value_loss_clipped = huber_loss(error_clipped, self.huber_delta)
value_loss_original = huber_loss(error_original, self.huber_delta)
else:
value_loss_clipped = mse_loss(error_clipped)
value_loss_original = mse_loss(error_original)
if self._use_clipped_value_loss:
value_loss = torch.max(value_loss_original, value_loss_clipped)
else:
value_loss = value_loss_original
if self._use_value_active_masks:
value_loss = (value_loss *
active_masks_batch).sum() / active_masks_batch.sum()
else:
value_loss = value_loss.mean()
return value_loss
def ppo_update(self, sample, update_actor=True):
"""
Update actor and critic networks.
:param sample: (Tuple) contains data batch with which to update networks.
:update_actor: (bool) whether to update actor network.
:return value_loss: (torch.Tensor) value function loss.
:return critic_grad_norm: (torch.Tensor) gradient norm from critic up9date.
;return policy_loss: (torch.Tensor) actor(policy) loss value.
:return dist_entropy: (torch.Tensor) action entropies.
:return actor_grad_norm: (torch.Tensor) gradient norm from actor update.
:return imp_weights: (torch.Tensor) importance sampling weights.
"""
share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, \
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, \
adv_targ, available_actions_batch = sample
old_action_log_probs_batch = check(old_action_log_probs_batch).to(
**self.tpdv)
adv_targ = check(adv_targ).to(**self.tpdv)
value_preds_batch = check(value_preds_batch).to(**self.tpdv)
return_batch = check(return_batch).to(**self.tpdv)
active_masks_batch = check(active_masks_batch).to(**self.tpdv)
# Reshape to do in a single forward pass for all steps
values, action_log_probs, dist_entropy = self.policy.evaluate_actions(
share_obs_batch, obs_batch, rnn_states_batch,
rnn_states_critic_batch, actions_batch, masks_batch,
available_actions_batch, active_masks_batch)
# actor update
imp_weights = torch.exp(action_log_probs - old_action_log_probs_batch)
surr1 = imp_weights * adv_targ
surr2 = torch.clamp(imp_weights, 1.0 - self.clip_param,
1.0 + self.clip_param) * adv_targ
if self._use_policy_active_masks:
policy_action_loss = (
-torch.sum(torch.min(surr1, surr2), dim=-1, keepdim=True) *
active_masks_batch).sum() / active_masks_batch.sum()
else:
policy_action_loss = -torch.sum(
torch.min(surr1, surr2), dim=-1, keepdim=True).mean()
policy_loss = policy_action_loss
self.policy.actor_optimizer.zero_grad()
if update_actor:
(policy_loss - dist_entropy * self.entropy_coef).backward()
if self._use_max_grad_norm:
actor_grad_norm = nn.utils.clip_grad_norm_(
self.policy.actor.parameters(), self.max_grad_norm)
else:
actor_grad_norm = get_gard_norm(self.policy.actor.parameters())
self.policy.actor_optimizer.step()
# critic update
value_loss = self.cal_value_loss(values, value_preds_batch,
return_batch, active_masks_batch)
self.policy.critic_optimizer.zero_grad()
(value_loss * self.value_loss_coef).backward()
if self._use_max_grad_norm:
critic_grad_norm = nn.utils.clip_grad_norm_(
self.policy.critic.parameters(), self.max_grad_norm)
else:
critic_grad_norm = get_gard_norm(self.policy.critic.parameters())
self.policy.critic_optimizer.step()
return value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights
def train(self, buffer, update_actor=True):
"""
Perform a training update using minibatch GD.
:param buffer: (SharedReplayBuffer) buffer containing training data.
:param update_actor: (bool) whether to update actor network.
:return train_info: (dict) contains information regarding training update (e.g. loss, grad norms, etc).
"""
if self._use_popart or self._use_valuenorm:
advantages = buffer.returns[:
-1] - self.value_normalizer.denormalize(
buffer.value_preds[:-1])
else:
advantages = buffer.returns[:-1] - buffer.value_preds[:-1]
advantages_copy = advantages.copy()
advantages_copy[buffer.active_masks[:-1] == 0.0] = np.nan
mean_advantages = np.nanmean(advantages_copy)
std_advantages = np.nanstd(advantages_copy)
advantages = (advantages - mean_advantages) / (std_advantages + 1e-5)
train_info = {}
train_info['value_loss'] = 0
train_info['policy_loss'] = 0
train_info['dist_entropy'] = 0
train_info['actor_grad_norm'] = 0
train_info['critic_grad_norm'] = 0
train_info['ratio'] = 0
for _ in range(self.ppo_epoch):
if self._use_recurrent_policy:
data_generator = buffer.recurrent_generator(
advantages, self.num_mini_batch, self.data_chunk_length)
elif self._use_naive_recurrent:
data_generator = buffer.naive_recurrent_generator(
advantages, self.num_mini_batch)
else:
data_generator = buffer.feed_forward_generator(
advantages, self.num_mini_batch)
for sample in data_generator:
value_loss, critic_grad_norm, policy_loss, dist_entropy, actor_grad_norm, imp_weights \
= self.ppo_update(sample, update_actor)
train_info['value_loss'] += value_loss.item()
train_info['policy_loss'] += policy_loss.item()
train_info['dist_entropy'] += dist_entropy.item()
train_info['actor_grad_norm'] += actor_grad_norm
train_info['critic_grad_norm'] += critic_grad_norm
train_info['ratio'] += imp_weights.mean()
num_updates = self.ppo_epoch * self.num_mini_batch
for k in train_info.keys():
train_info[k] /= num_updates
return train_info
def prep_training(self):
self.policy.actor.train()
self.policy.critic.train()
def prep_rollout(self):
self.policy.actor.eval()
self.policy.critic.eval()
| 10,421 | 41.538776 | 116 | py |
nocturne | nocturne-main/algos/ppo/r_mappo/__init__.py | 0 | 0 | 0 | py |
|
nocturne | nocturne-main/algos/ppo/r_mappo/algorithm/r_actor_critic.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
import torch.nn as nn
from algos.ppo.ppo_utils.util import init, check
from algos.ppo.ppo_utils.mlp import MLPBase
from algos.ppo.ppo_utils.rnn import RNNLayer
from algos.ppo.ppo_utils.act import ACTLayer
from algos.ppo.ppo_utils.popart import PopArt
from algos.ppo.utils.util import get_shape_from_obs_space
class R_Actor(nn.Module):
"""
Actor network class for MAPPO. Outputs actions given observations.
:param args: (argparse.Namespace) arguments containing relevant model information.
:param obs_space: (gym.Space) observation space.
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self,
args,
obs_space,
action_space,
device=torch.device("cpu")):
super(R_Actor, self).__init__()
self.hidden_size = args.hidden_size
self._gain = args.gain
self._use_orthogonal = args.use_orthogonal
self._use_policy_active_masks = args.use_policy_active_masks
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self.tpdv = dict(dtype=torch.float32, device=device)
obs_shape = get_shape_from_obs_space(obs_space)
base = MLPBase
self.base = base(args, obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size,
self._recurrent_N, self._use_orthogonal,
device)
self.act = ACTLayer(action_space, self.hidden_size,
self._use_orthogonal, self._gain, device)
self.to(device)
def forward(self,
obs,
rnn_states,
masks,
available_actions=None,
deterministic=False):
"""
Compute actions from the given inputs.
:param obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (np.ndarray / torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether to sample from action distribution or return the mode.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of taken actions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states,
masks)
actions, action_log_probs = self.act(actor_features, available_actions,
deterministic)
return actions, action_log_probs, rnn_states
def evaluate_actions(self,
obs,
rnn_states,
action,
masks,
available_actions=None,
active_masks=None):
"""
Compute log probability and entropy of given actions.
:param obs: (torch.Tensor) observation inputs into network.
:param action: (torch.Tensor) actions whose entropy and log probability to evaluate.
:param rnn_states: (torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (torch.Tensor) mask tensor denoting if hidden states should be reinitialized to zeros.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
obs = check(obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
action = check(action).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
if available_actions is not None:
available_actions = check(available_actions).to(**self.tpdv)
if active_masks is not None:
active_masks = check(active_masks).to(**self.tpdv)
actor_features = self.base(obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
actor_features, rnn_states = self.rnn(actor_features, rnn_states,
masks)
action_log_probs, dist_entropy = self.act.evaluate_actions(
actor_features,
action,
available_actions,
active_masks=active_masks
if self._use_policy_active_masks else None)
return action_log_probs, dist_entropy
class R_Critic(nn.Module):
"""
Critic network class for MAPPO. Outputs value function predictions given centralized input (MAPPO) or
local observations (IPPO).
:param args: (argparse.Namespace) arguments containing relevant model information.
:param cent_obs_space: (gym.Space) (centralized) observation space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self, args, cent_obs_space, device=torch.device("cpu")):
super(R_Critic, self).__init__()
self.hidden_size = args.hidden_size
self._use_orthogonal = args.use_orthogonal
self._use_naive_recurrent_policy = args.use_naive_recurrent_policy
self._use_recurrent_policy = args.use_recurrent_policy
self._recurrent_N = args.recurrent_N
self._use_popart = args.use_popart
self.tpdv = dict(dtype=torch.float32, device=device)
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][self._use_orthogonal]
cent_obs_shape = get_shape_from_obs_space(cent_obs_space)
base = MLPBase
self.base = base(args, cent_obs_shape)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
self.rnn = RNNLayer(self.hidden_size, self.hidden_size,
self._recurrent_N, self._use_orthogonal,
device)
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0))
if self._use_popart:
self.v_out = init_(PopArt(self.hidden_size, 1, device=device))
else:
self.v_out = init_(nn.Linear(self.hidden_size, 1))
self.to(device)
def forward(self, cent_obs, rnn_states, masks):
"""
Compute actions from the given inputs.
:param cent_obs: (np.ndarray / torch.Tensor) observation inputs into network.
:param rnn_states: (np.ndarray / torch.Tensor) if RNN network, hidden states for RNN.
:param masks: (np.ndarray / torch.Tensor) mask tensor denoting if RNN states should be reinitialized to zeros.
:return values: (torch.Tensor) value function predictions.
:return rnn_states: (torch.Tensor) updated RNN hidden states.
"""
cent_obs = check(cent_obs).to(**self.tpdv)
rnn_states = check(rnn_states).to(**self.tpdv)
masks = check(masks).to(**self.tpdv)
critic_features = self.base(cent_obs)
if self._use_naive_recurrent_policy or self._use_recurrent_policy:
critic_features, rnn_states = self.rnn(critic_features, rnn_states,
masks)
values = self.v_out(critic_features)
return values, rnn_states
| 8,798 | 43.439394 | 121 | py |
nocturne | nocturne-main/algos/ppo/r_mappo/algorithm/rMAPPOPolicy.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
from algos.ppo.r_mappo.algorithm.r_actor_critic import R_Actor, R_Critic
from algos.ppo.utils.util import update_linear_schedule
class R_MAPPOPolicy:
"""
MAPPO Policy class. Wraps actor and critic networks to compute actions and value function predictions.
:param args: (argparse.Namespace) arguments containing relevant model and policy information.
:param obs_space: (gym.Space) observation space.
:param cent_obs_space: (gym.Space) value function input space (centralized input for MAPPO, decentralized for IPPO).
:param action_space: (gym.Space) action space.
:param device: (torch.device) specifies the device to run on (cpu/gpu).
"""
def __init__(self,
args,
obs_space,
cent_obs_space,
act_space,
device=torch.device("cpu")):
self.device = device
self.lr = args.lr
self.critic_lr = args.critic_lr
self.opti_eps = args.opti_eps
self.weight_decay = args.weight_decay
self.obs_space = obs_space
self.share_obs_space = cent_obs_space
self.act_space = act_space
self.actor = R_Actor(args, self.obs_space, self.act_space, self.device)
self.critic = R_Critic(args, self.share_obs_space, self.device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
lr=self.lr,
eps=self.opti_eps,
weight_decay=self.weight_decay)
self.critic_optimizer = torch.optim.Adam(
self.critic.parameters(),
lr=self.critic_lr,
eps=self.opti_eps,
weight_decay=self.weight_decay)
def lr_decay(self, episode, episodes):
"""
Decay the actor and critic learning rates.
:param episode: (int) current training episode.
:param episodes: (int) total number of training episodes.
"""
update_linear_schedule(self.actor_optimizer, episode, episodes,
self.lr)
update_linear_schedule(self.critic_optimizer, episode, episodes,
self.critic_lr)
def get_actions(self,
cent_obs,
obs,
rnn_states_actor,
rnn_states_critic,
masks,
available_actions=None,
deterministic=False):
"""
Compute actions and value function predictions for the given inputs.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
:return values: (torch.Tensor) value function predictions.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of chosen actions.
:return rnn_states_actor: (torch.Tensor) updated actor network RNN states.
:return rnn_states_critic: (torch.Tensor) updated critic network RNN states.
"""
actions, action_log_probs, rnn_states_actor = self.actor(
obs, rnn_states_actor, masks, available_actions, deterministic)
values, rnn_states_critic = self.critic(cent_obs, rnn_states_critic,
masks)
return values, actions, action_log_probs, rnn_states_actor, rnn_states_critic
def get_values(self, cent_obs, rnn_states_critic, masks):
"""
Get value function predictions.
:param cent_obs (np.ndarray): centralized input to the critic.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:return values: (torch.Tensor) value function predictions.
"""
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values
def evaluate_actions(self,
cent_obs,
obs,
rnn_states_actor,
rnn_states_critic,
action,
masks,
available_actions=None,
active_masks=None):
"""
Get action logprobs / entropy and value function predictions for actor update.
:param cent_obs (np.ndarray): centralized input to the critic.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param rnn_states_critic: (np.ndarray) if critic is RNN, RNN states for critic.
:param action: (np.ndarray) actions whose log probabilites and entropy to compute.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return values: (torch.Tensor) value function predictions.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
action_log_probs, dist_entropy = self.actor.evaluate_actions(
obs, rnn_states_actor, action, masks, available_actions,
active_masks)
values, _ = self.critic(cent_obs, rnn_states_critic, masks)
return values, action_log_probs, dist_entropy
def act(self,
obs,
rnn_states_actor,
masks,
available_actions=None,
deterministic=False):
"""
Compute actions using the given inputs.
:param obs (np.ndarray): local agent inputs to the actor.
:param rnn_states_actor: (np.ndarray) if actor is RNN, RNN states for actor.
:param masks: (np.ndarray) denotes points at which RNN states should be reset.
:param available_actions: (np.ndarray) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether the action should be mode of distribution or should be sampled.
"""
actions, _, rnn_states_actor = self.actor(obs, rnn_states_actor, masks,
available_actions,
deterministic)
return actions, rnn_states_actor
| 7,556 | 47.133758 | 120 | py |
nocturne | nocturne-main/algos/ppo/utils/multi_discrete.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import gym
import numpy as np
# An old version of OpenAI Gym's multi_discrete.py. (Was getting affected by Gym updates)
# (https://github.com/openai/gym/blob/1fb81d4e3fb780ccf77fec731287ba07da35eb84/gym/spaces/multi_discrete.py)
class MultiDiscrete(gym.Space):
"""
- The multi-discrete action space consists of a series of discrete action spaces with different parameters
- It can be adapted to both a Discrete action space or a continuous (Box) action space
- It is useful to represent game controllers or keyboards where each key can be represented as a discrete action space
- It is parametrized by passing an array of arrays containing [min, max] for each discrete action space where the discrete action space can take any integers from `min` to `max` (both inclusive)
Note: A value of 0 always need to represent the NOOP action.
e.g. Nintendo Game Controller
- Can be conceptualized as 3 discrete action spaces:
1) Arrow Keys: Discrete 5 - NOOP[0], UP[1], RIGHT[2], DOWN[3], LEFT[4] - params: min: 0, max: 4
2) Button A: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
3) Button B: Discrete 2 - NOOP[0], Pressed[1] - params: min: 0, max: 1
- Can be initialized as
MultiDiscrete([ [0,4], [0,1], [0,1] ])
"""
def __init__(self, array_of_param_array):
self.low = np.array([x[0] for x in array_of_param_array])
self.high = np.array([x[1] for x in array_of_param_array])
self.num_discrete_space = self.low.shape[0]
self.n = np.sum(self.high) + 2
def sample(self):
""" Returns a array with one sample from each discrete action space """
# For each row: round(random .* (max - min) + min, 0)
random_array = np.random.rand(self.num_discrete_space)
return [
int(x) for x in np.floor(
np.multiply((self.high - self.low + 1.), random_array) +
self.low)
]
def contains(self, x):
return len(x) == self.num_discrete_space and (
np.array(x) >= self.low).all() and (np.array(x) <=
self.high).all()
@property
def shape(self):
return self.num_discrete_space
def __repr__(self):
return "MultiDiscrete" + str(self.num_discrete_space)
def __eq__(self, other):
return np.array_equal(self.low, other.low) and np.array_equal(
self.high, other.high)
| 2,738 | 45.423729 | 198 | py |
nocturne | nocturne-main/algos/ppo/utils/valuenorm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import numpy as np
import torch
import torch.nn as nn
class ValueNorm(nn.Module):
""" Normalize a vector of observations - across the first norm_axes dimensions"""
def __init__(self,
input_shape,
norm_axes=1,
beta=0.99999,
per_element_update=False,
epsilon=1e-5,
device=torch.device("cpu")):
super(ValueNorm, self).__init__()
self.input_shape = input_shape
self.norm_axes = norm_axes
self.epsilon = epsilon
self.beta = beta
self.per_element_update = per_element_update
self.tpdv = dict(dtype=torch.float32, device=device)
self.running_mean = nn.Parameter(torch.zeros(input_shape),
requires_grad=False).to(**self.tpdv)
self.running_mean_sq = nn.Parameter(
torch.zeros(input_shape), requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0),
requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
self.running_mean.zero_()
self.running_mean_sq.zero_()
self.debiasing_term.zero_()
def running_mean_var(self):
debiased_mean = self.running_mean / self.debiasing_term.clamp(
min=self.epsilon)
debiased_mean_sq = self.running_mean_sq / self.debiasing_term.clamp(
min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean**2).clamp(min=1e-2)
return debiased_mean, debiased_var
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector**2).mean(
dim=tuple(range(self.norm_axes)))
if self.per_element_update:
batch_size = np.prod(input_vector.size()[:self.norm_axes])
weight = self.beta**batch_size
else:
weight = self.beta
self.running_mean.mul_(weight).add_(batch_mean * (1.0 - weight))
self.running_mean_sq.mul_(weight).add_(batch_sq_mean * (1.0 - weight))
self.debiasing_term.mul_(weight).add_(1.0 * (1.0 - weight))
def normalize(self, input_vector):
# Make sure input is float32
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = (input_vector - mean[(None, ) * self.norm_axes]
) / torch.sqrt(var)[(None, ) * self.norm_axes]
return out
def denormalize(self, input_vector):
""" Transform normalized data back into original distribution """
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.running_mean_var()
out = input_vector * torch.sqrt(var)[(None, ) * self.norm_axes] + mean[
(None, ) * self.norm_axes]
out = out.cpu().numpy()
return out
| 3,604 | 35.785714 | 85 | py |
nocturne | nocturne-main/algos/ppo/utils/shared_buffer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
import numpy as np
from algos.ppo.utils.util import get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 2, 0, 3).reshape(-1, *x.shape[3:])
class SharedReplayBuffer(object):
"""
Buffer to store training data.
:param args: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param num_agents: (int) number of agents in the env.
:param obs_space: (gym.Space) observation space of agents.
:param cent_obs_space: (gym.Space) centralized observation space of agents.
:param act_space: (gym.Space) action space for agents.
"""
def __init__(self, args, num_agents, obs_space, cent_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(cent_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents,
*share_obs_shape),
dtype=np.float32)
self.obs = np.zeros((self.episode_length + 1, self.n_rollout_threads,
num_agents, *obs_shape),
dtype=np.float32)
self.rnn_states = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents,
self.recurrent_N, self.hidden_size),
dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1),
dtype=np.float32)
self.returns = np.zeros_like(self.value_preds)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones(
(self.episode_length + 1, self.n_rollout_threads, num_agents,
act_space.n),
dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros((self.episode_length, self.n_rollout_threads,
num_agents, act_shape),
dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents,
act_shape),
dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, num_agents, 1),
dtype=np.float32)
self.masks = np.ones(
(self.episode_length + 1, self.n_rollout_threads, num_agents, 1),
dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self,
share_obs,
obs,
rnn_states_actor,
rnn_states_critic,
actions,
action_log_probs,
value_preds,
rewards,
masks,
bad_masks=None,
active_masks=None,
available_actions=None):
"""
Insert data into the buffer.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) action space for agents.
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states_actor.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self,
share_obs,
obs,
rnn_states,
rnn_states_critic,
actions,
action_log_probs,
value_preds,
rewards,
masks,
bad_masks=None,
active_masks=None,
available_actions=None):
"""
Insert data into the buffer. This insert function is used specifically for Hanabi, which is turn based.
:param share_obs: (argparse.Namespace) arguments containing relevant model, policy, and env information.
:param obs: (np.ndarray) local agent observations.
:param rnn_states_actor: (np.ndarray) RNN states for actor network.
:param rnn_states_critic: (np.ndarray) RNN states for critic network.
:param actions:(np.ndarray) actions taken by agents.
:param action_log_probs:(np.ndarray) log probs of actions taken by agents
:param value_preds: (np.ndarray) value function prediction at each step.
:param rewards: (np.ndarray) reward collected at each step.
:param masks: (np.ndarray) denotes whether the environment has terminated or not.
:param bad_masks: (np.ndarray) denotes indicate whether whether true terminal state or due to episode limit
:param active_masks: (np.ndarray) denotes whether an agent is active or dead in the env.
:param available_actions: (np.ndarray) actions available to each agent. If None, all actions are available.
"""
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
"""Copy last timestep data to first index. Called after update to model."""
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
"""Copy last timestep data to first index. This method is used for Hanabi."""
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
"""
Compute returns either as discounted sum of rewards, or using GAE.
:param next_value: (np.ndarray) value predictions for the step after the last episode step.
:param value_normalizer: (PopArt) If not None, PopArt value normalizer instance.
"""
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
# step + 1
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * gae * self.masks[
step + 1]
gae = gae * self.bad_masks[step + 1]
self.returns[
step] = gae + value_normalizer.denormalize(
self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(
self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[
step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[step + 1] \
- value_normalizer.denormalize(self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
self.returns[
step] = gae + value_normalizer.denormalize(
self.value_preds[step])
else:
delta = self.rewards[step] + self.gamma * self.value_preds[step + 1] * self.masks[step + 1] - \
self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[
step + 1] * self.gamma * self.masks[
step + 1] + self.rewards[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
"""
Yield training data for MLP policies.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param mini_batch_size: (int) number of samples in each minibatch.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) * number of agents ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length, num_agents,
n_rollout_threads * episode_length * num_agents,
num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [
rand[i * mini_batch_size:(i + 1) * mini_batch_size]
for i in range(num_mini_batch)
]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[3:])
rnn_states = self.rnn_states[:-1].reshape(-1,
*self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(
-1, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(
-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(
-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N M Dim]-->[T N M Dim]-->[T*N*M,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
"""
Yield training data for non-chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * num_agents
assert n_rollout_threads * num_agents >= num_mini_batch, (
"PPO requires the number of processes ({})* number of agents ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_agents,
num_mini_batch))
num_envs_per_batch = batch_size // num_mini_batch
perm = torch.randperm(batch_size).numpy()
share_obs = self.share_obs.reshape(-1, batch_size,
*self.share_obs.shape[3:])
obs = self.obs.reshape(-1, batch_size, *self.obs.shape[3:])
rnn_states = self.rnn_states.reshape(-1, batch_size,
*self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic.reshape(
-1, batch_size, *self.rnn_states_critic.shape[3:])
actions = self.actions.reshape(-1, batch_size, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions.reshape(
-1, batch_size, self.available_actions.shape[-1])
value_preds = self.value_preds.reshape(-1, batch_size, 1)
returns = self.returns.reshape(-1, batch_size, 1)
masks = self.masks.reshape(-1, batch_size, 1)
active_masks = self.active_masks.reshape(-1, batch_size, 1)
action_log_probs = self.action_log_probs.reshape(
-1, batch_size, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, batch_size, 1)
for start_ind in range(0, batch_size, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(share_obs[:-1, ind])
obs_batch.append(obs[:-1, ind])
rnn_states_batch.append(rnn_states[0:1, ind])
rnn_states_critic_batch.append(rnn_states_critic[0:1, ind])
actions_batch.append(actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(available_actions[:-1, ind])
value_preds_batch.append(value_preds[:-1, ind])
return_batch.append(returns[:-1, ind])
masks_batch.append(masks[:-1, ind])
active_masks_batch.append(active_masks[:-1, ind])
old_action_log_probs_batch.append(action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch,
1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, dim) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch).reshape(
N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(
rnn_states_critic_batch).reshape(
N, *self.rnn_states_critic.shape[3:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N,
available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N,
old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch,
data_chunk_length):
"""
Yield training data for chunked RNN training.
:param advantages: (np.ndarray) advantage estimates.
:param num_mini_batch: (int) number of minibatches to split the batch into.
:param data_chunk_length: (int) length of sequence chunks with which to train RNN.
"""
episode_length, n_rollout_threads, num_agents = self.rewards.shape[0:3]
batch_size = n_rollout_threads * episode_length * num_agents
data_chunks = batch_size // data_chunk_length # [C=r*T*M/L]
mini_batch_size = data_chunks // num_mini_batch
rand = torch.randperm(data_chunks).numpy()
sampler = [
rand[i * mini_batch_size:(i + 1) * mini_batch_size]
for i in range(num_mini_batch)
]
if len(self.share_obs.shape) > 4:
share_obs = self.share_obs[:-1].transpose(
1, 2, 0, 3, 4, 5).reshape(-1, *self.share_obs.shape[3:])
obs = self.obs[:-1].transpose(1, 2, 0, 3, 4,
5).reshape(-1, *self.obs.shape[3:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 2, 0, 3, 4).reshape(
-1, *self.rnn_states.shape[3:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(
1, 2, 0, 3, 4).reshape(-1, *self.rnn_states_critic.shape[3:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N M Dim]-->[N,M,T,Dim]-->[N*M*T,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind + data_chunk_length])
obs_batch.append(obs[ind:ind + data_chunk_length])
actions_batch.append(actions[ind:ind + data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(
available_actions[ind:ind + data_chunk_length])
value_preds_batch.append(value_preds[ind:ind +
data_chunk_length])
return_batch.append(returns[ind:ind + data_chunk_length])
masks_batch.append(masks[ind:ind + data_chunk_length])
active_masks_batch.append(active_masks[ind:ind +
data_chunk_length])
old_action_log_probs_batch.append(
action_log_probs[ind:ind + data_chunk_length])
adv_targ.append(advantages[ind:ind + data_chunk_length])
# size [T+1 N M Dim]-->[T N M Dim]-->[N M T Dim]-->[N*M*T,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (L, N, Dim)
share_obs_batch = np.stack(share_obs_batch, axis=1)
obs_batch = np.stack(obs_batch, axis=1)
actions_batch = np.stack(actions_batch, axis=1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch,
axis=1)
value_preds_batch = np.stack(value_preds_batch, axis=1)
return_batch = np.stack(return_batch, axis=1)
masks_batch = np.stack(masks_batch, axis=1)
active_masks_batch = np.stack(active_masks_batch, axis=1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch,
axis=1)
adv_targ = np.stack(adv_targ, axis=1)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(
N, *self.rnn_states.shape[3:])
rnn_states_critic_batch = np.stack(
rnn_states_critic_batch).reshape(
N, *self.rnn_states_critic.shape[3:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N,
available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N,
old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch,\
value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch,\
adv_targ, available_actions_batch
| 29,299 | 49.08547 | 120 | py |
nocturne | nocturne-main/algos/ppo/utils/util.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import numpy as np
import math
import torch
def check(input):
if type(input) == np.ndarray:
return torch.from_numpy(input)
def get_gard_norm(it):
sum_grad = 0
for x in it:
if x.grad is None:
continue
sum_grad += x.grad.norm()**2
return math.sqrt(sum_grad)
def update_linear_schedule(optimizer, epoch, total_num_epochs, initial_lr):
"""Decreases the learning rate linearly"""
lr = initial_lr - (initial_lr * (epoch / float(total_num_epochs)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def huber_loss(e, d):
a = (abs(e) <= d).float()
b = (e > d).float()
return a * e**2 / 2 + b * d * (abs(e) - d / 2)
def mse_loss(e):
return e**2 / 2
def get_shape_from_obs_space(obs_space):
if obs_space.__class__.__name__ == 'Box':
obs_shape = obs_space.shape
elif obs_space.__class__.__name__ == 'list':
obs_shape = obs_space
else:
raise NotImplementedError
return obs_shape
def get_shape_from_act_space(act_space):
if act_space.__class__.__name__ == 'Discrete':
act_shape = 1
elif act_space.__class__.__name__ == "MultiDiscrete":
act_shape = act_space.shape
elif act_space.__class__.__name__ == "Box":
act_shape = act_space.shape[0]
elif act_space.__class__.__name__ == "MultiBinary":
act_shape = act_space.shape[0]
else: # agar
act_shape = act_space[0].shape[0] + 1
return act_shape
def tile_images(img_nhwc):
"""
Tile N images into one big PxQ image
(P,Q) are chosen to be as close as possible, and if N
is square, then P=Q.
input: img_nhwc, list or array of images, ndim=4 once turned into array
n = batch index, h = height, w = width, c = channel
returns:
bigim_HWc, ndarray with ndim=3
"""
img_nhwc = np.asarray(img_nhwc)
N, h, w, c = img_nhwc.shape
H = int(np.ceil(np.sqrt(N)))
W = int(np.ceil(float(N) / H))
img_nhwc = np.array(
list(img_nhwc) + [img_nhwc[0] * 0 for _ in range(N, H * W)])
img_HWhwc = img_nhwc.reshape(H, W, h, w, c)
img_HhWwc = img_HWhwc.transpose(0, 2, 1, 3, 4)
img_Hh_Ww_c = img_HhWwc.reshape(H * h, W * w, c)
return img_Hh_Ww_c
| 2,524 | 28.360465 | 75 | py |
nocturne | nocturne-main/algos/ppo/utils/separated_buffer.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
import numpy as np
from collections import defaultdict
from algos.ppo.utils.util import check, get_shape_from_obs_space, get_shape_from_act_space
def _flatten(T, N, x):
return x.reshape(T * N, *x.shape[2:])
def _cast(x):
return x.transpose(1, 0, 2).reshape(-1, *x.shape[2:])
class SeparatedReplayBuffer(object):
def __init__(self, args, obs_space, share_obs_space, act_space):
self.episode_length = args.episode_length
self.n_rollout_threads = args.n_rollout_threads
self.rnn_hidden_size = args.hidden_size
self.recurrent_N = args.recurrent_N
self.gamma = args.gamma
self.gae_lambda = args.gae_lambda
self._use_gae = args.use_gae
self._use_popart = args.use_popart
self._use_valuenorm = args.use_valuenorm
self._use_proper_time_limits = args.use_proper_time_limits
obs_shape = get_shape_from_obs_space(obs_space)
share_obs_shape = get_shape_from_obs_space(share_obs_space)
if type(obs_shape[-1]) == list:
obs_shape = obs_shape[:1]
if type(share_obs_shape[-1]) == list:
share_obs_shape = share_obs_shape[:1]
self.share_obs = np.zeros((self.episode_length + 1,
self.n_rollout_threads, *share_obs_shape),
dtype=np.float32)
self.obs = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, *obs_shape),
dtype=np.float32)
self.rnn_states = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, self.recurrent_N,
self.rnn_hidden_size),
dtype=np.float32)
self.rnn_states_critic = np.zeros_like(self.rnn_states)
self.value_preds = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, 1),
dtype=np.float32)
self.returns = np.zeros(
(self.episode_length + 1, self.n_rollout_threads, 1),
dtype=np.float32)
if act_space.__class__.__name__ == 'Discrete':
self.available_actions = np.ones(
(self.episode_length + 1, self.n_rollout_threads, act_space.n),
dtype=np.float32)
else:
self.available_actions = None
act_shape = get_shape_from_act_space(act_space)
self.actions = np.zeros(
(self.episode_length, self.n_rollout_threads, act_shape),
dtype=np.float32)
self.action_log_probs = np.zeros(
(self.episode_length, self.n_rollout_threads, act_shape),
dtype=np.float32)
self.rewards = np.zeros(
(self.episode_length, self.n_rollout_threads, 1), dtype=np.float32)
self.masks = np.ones(
(self.episode_length + 1, self.n_rollout_threads, 1),
dtype=np.float32)
self.bad_masks = np.ones_like(self.masks)
self.active_masks = np.ones_like(self.masks)
self.step = 0
def insert(self,
share_obs,
obs,
rnn_states,
rnn_states_critic,
actions,
action_log_probs,
value_preds,
rewards,
masks,
bad_masks=None,
active_masks=None,
available_actions=None):
self.share_obs[self.step + 1] = share_obs.copy()
self.obs[self.step + 1] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step + 1] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step + 1] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def chooseinsert(self,
share_obs,
obs,
rnn_states,
rnn_states_critic,
actions,
action_log_probs,
value_preds,
rewards,
masks,
bad_masks=None,
active_masks=None,
available_actions=None):
self.share_obs[self.step] = share_obs.copy()
self.obs[self.step] = obs.copy()
self.rnn_states[self.step + 1] = rnn_states.copy()
self.rnn_states_critic[self.step + 1] = rnn_states_critic.copy()
self.actions[self.step] = actions.copy()
self.action_log_probs[self.step] = action_log_probs.copy()
self.value_preds[self.step] = value_preds.copy()
self.rewards[self.step] = rewards.copy()
self.masks[self.step + 1] = masks.copy()
if bad_masks is not None:
self.bad_masks[self.step + 1] = bad_masks.copy()
if active_masks is not None:
self.active_masks[self.step] = active_masks.copy()
if available_actions is not None:
self.available_actions[self.step] = available_actions.copy()
self.step = (self.step + 1) % self.episode_length
def after_update(self):
self.share_obs[0] = self.share_obs[-1].copy()
self.obs[0] = self.obs[-1].copy()
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
self.active_masks[0] = self.active_masks[-1].copy()
if self.available_actions is not None:
self.available_actions[0] = self.available_actions[-1].copy()
def chooseafter_update(self):
self.rnn_states[0] = self.rnn_states[-1].copy()
self.rnn_states_critic[0] = self.rnn_states_critic[-1].copy()
self.masks[0] = self.masks[-1].copy()
self.bad_masks[0] = self.bad_masks[-1].copy()
def compute_returns(self, next_value, value_normalizer=None):
if self._use_proper_time_limits:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[
step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[
step + 1] - value_normalizer.denormalize(
self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[
step] = gae + value_normalizer.denormalize(
self.value_preds[step])
else:
delta = self.rewards[
step] + self.gamma * self.value_preds[
step + 1] * self.masks[
step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
gae = gae * self.bad_masks[step + 1]
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * value_normalizer.denormalize(self.value_preds[step])
else:
self.returns[step] = (self.returns[step + 1] * self.gamma * self.masks[step + 1] + self.rewards[step]) * self.bad_masks[step + 1] \
+ (1 - self.bad_masks[step + 1]) * self.value_preds[step]
else:
if self._use_gae:
self.value_preds[-1] = next_value
gae = 0
for step in reversed(range(self.rewards.shape[0])):
if self._use_popart or self._use_valuenorm:
delta = self.rewards[
step] + self.gamma * value_normalizer.denormalize(
self.value_preds[step + 1]) * self.masks[
step + 1] - value_normalizer.denormalize(
self.value_preds[step])
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
self.returns[
step] = gae + value_normalizer.denormalize(
self.value_preds[step])
else:
delta = self.rewards[
step] + self.gamma * self.value_preds[
step + 1] * self.masks[
step + 1] - self.value_preds[step]
gae = delta + self.gamma * self.gae_lambda * self.masks[
step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.shape[0])):
self.returns[step] = self.returns[
step + 1] * self.gamma * self.masks[
step + 1] + self.rewards[step]
def feed_forward_generator(self,
advantages,
num_mini_batch=None,
mini_batch_size=None):
episode_length, n_rollout_threads = self.rewards.shape[0:2]
batch_size = n_rollout_threads * episode_length
if mini_batch_size is None:
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(n_rollout_threads, episode_length,
n_rollout_threads * episode_length, num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
rand = torch.randperm(batch_size).numpy()
sampler = [
rand[i * mini_batch_size:(i + 1) * mini_batch_size]
for i in range(num_mini_batch)
]
share_obs = self.share_obs[:-1].reshape(-1, *self.share_obs.shape[2:])
obs = self.obs[:-1].reshape(-1, *self.obs.shape[2:])
rnn_states = self.rnn_states[:-1].reshape(-1,
*self.rnn_states.shape[2:])
rnn_states_critic = self.rnn_states_critic[:-1].reshape(
-1, *self.rnn_states_critic.shape[2:])
actions = self.actions.reshape(-1, self.actions.shape[-1])
if self.available_actions is not None:
available_actions = self.available_actions[:-1].reshape(
-1, self.available_actions.shape[-1])
value_preds = self.value_preds[:-1].reshape(-1, 1)
returns = self.returns[:-1].reshape(-1, 1)
masks = self.masks[:-1].reshape(-1, 1)
active_masks = self.active_masks[:-1].reshape(-1, 1)
action_log_probs = self.action_log_probs.reshape(
-1, self.action_log_probs.shape[-1])
advantages = advantages.reshape(-1, 1)
for indices in sampler:
# obs size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[index,Dim]
share_obs_batch = share_obs[indices]
obs_batch = obs[indices]
rnn_states_batch = rnn_states[indices]
rnn_states_critic_batch = rnn_states_critic[indices]
actions_batch = actions[indices]
if self.available_actions is not None:
available_actions_batch = available_actions[indices]
else:
available_actions_batch = None
value_preds_batch = value_preds[indices]
return_batch = returns[indices]
masks_batch = masks[indices]
active_masks_batch = active_masks[indices]
old_action_log_probs_batch = action_log_probs[indices]
if advantages is None:
adv_targ = None
else:
adv_targ = advantages[indices]
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def naive_recurrent_generator(self, advantages, num_mini_batch):
n_rollout_threads = self.rewards.shape[1]
assert n_rollout_threads >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(n_rollout_threads, num_mini_batch))
num_envs_per_batch = n_rollout_threads // num_mini_batch
perm = torch.randperm(n_rollout_threads).numpy()
for start_ind in range(0, n_rollout_threads, num_envs_per_batch):
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
share_obs_batch.append(self.share_obs[:-1, ind])
obs_batch.append(self.obs[:-1, ind])
rnn_states_batch.append(self.rnn_states[0:1, ind])
rnn_states_critic_batch.append(self.rnn_states_critic[0:1,
ind])
actions_batch.append(self.actions[:, ind])
if self.available_actions is not None:
available_actions_batch.append(self.available_actions[:-1,
ind])
value_preds_batch.append(self.value_preds[:-1, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
active_masks_batch.append(self.active_masks[:-1, ind])
old_action_log_probs_batch.append(self.action_log_probs[:,
ind])
adv_targ.append(advantages[:, ind])
# [N[T, dim]]
T, N = self.episode_length, num_envs_per_batch
# These are all from_numpys of size (T, N, -1)
share_obs_batch = np.stack(share_obs_batch, 1)
obs_batch = np.stack(obs_batch, 1)
actions_batch = np.stack(actions_batch, 1)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch, 1)
value_preds_batch = np.stack(value_preds_batch, 1)
return_batch = np.stack(return_batch, 1)
masks_batch = np.stack(masks_batch, 1)
active_masks_batch = np.stack(active_masks_batch, 1)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch,
1)
adv_targ = np.stack(adv_targ, 1)
# States is just a (N, -1) from_numpy [N[1,dim]]
rnn_states_batch = np.stack(rnn_states_batch,
1).reshape(N,
*self.rnn_states.shape[2:])
rnn_states_critic_batch = np.stack(
rnn_states_critic_batch,
1).reshape(N, *self.rnn_states_critic.shape[2:])
# Flatten the (T, N, ...) from_numpys to (T * N, ...)
share_obs_batch = _flatten(T, N, share_obs_batch)
obs_batch = _flatten(T, N, obs_batch)
actions_batch = _flatten(T, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(T, N,
available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(T, N, value_preds_batch)
return_batch = _flatten(T, N, return_batch)
masks_batch = _flatten(T, N, masks_batch)
active_masks_batch = _flatten(T, N, active_masks_batch)
old_action_log_probs_batch = _flatten(T, N,
old_action_log_probs_batch)
adv_targ = _flatten(T, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
def recurrent_generator(self, advantages, num_mini_batch,
data_chunk_length):
episode_length, n_rollout_threads = self.rewards.shape[0:2]
batch_size = n_rollout_threads * episode_length
data_chunks = batch_size // data_chunk_length # [C=r*T/L]
mini_batch_size = data_chunks // num_mini_batch
assert episode_length * n_rollout_threads >= data_chunk_length, (
"PPO requires the number of processes ({}) * episode length ({}) "
"to be greater than or equal to the number of "
"data chunk length ({}).".format(n_rollout_threads, episode_length,
data_chunk_length))
assert data_chunks >= 2, ("need larger batch size")
rand = torch.randperm(data_chunks).numpy()
sampler = [
rand[i * mini_batch_size:(i + 1) * mini_batch_size]
for i in range(num_mini_batch)
]
if len(self.share_obs.shape) > 3:
share_obs = self.share_obs[:-1].transpose(1, 0, 2, 3, 4).reshape(
-1, *self.share_obs.shape[2:])
obs = self.obs[:-1].transpose(1, 0, 2, 3,
4).reshape(-1, *self.obs.shape[2:])
else:
share_obs = _cast(self.share_obs[:-1])
obs = _cast(self.obs[:-1])
actions = _cast(self.actions)
action_log_probs = _cast(self.action_log_probs)
advantages = _cast(advantages)
value_preds = _cast(self.value_preds[:-1])
returns = _cast(self.returns[:-1])
masks = _cast(self.masks[:-1])
active_masks = _cast(self.active_masks[:-1])
# rnn_states = _cast(self.rnn_states[:-1])
# rnn_states_critic = _cast(self.rnn_states_critic[:-1])
rnn_states = self.rnn_states[:-1].transpose(1, 0, 2, 3).reshape(
-1, *self.rnn_states.shape[2:])
rnn_states_critic = self.rnn_states_critic[:-1].transpose(
1, 0, 2, 3).reshape(-1, *self.rnn_states_critic.shape[2:])
if self.available_actions is not None:
available_actions = _cast(self.available_actions[:-1])
for indices in sampler:
share_obs_batch = []
obs_batch = []
rnn_states_batch = []
rnn_states_critic_batch = []
actions_batch = []
available_actions_batch = []
value_preds_batch = []
return_batch = []
masks_batch = []
active_masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for index in indices:
ind = index * data_chunk_length
# size [T+1 N M Dim]-->[T N Dim]-->[N T Dim]-->[T*N,Dim]-->[L,Dim]
share_obs_batch.append(share_obs[ind:ind + data_chunk_length])
obs_batch.append(obs[ind:ind + data_chunk_length])
actions_batch.append(actions[ind:ind + data_chunk_length])
if self.available_actions is not None:
available_actions_batch.append(
available_actions[ind:ind + data_chunk_length])
value_preds_batch.append(value_preds[ind:ind +
data_chunk_length])
return_batch.append(returns[ind:ind + data_chunk_length])
masks_batch.append(masks[ind:ind + data_chunk_length])
active_masks_batch.append(active_masks[ind:ind +
data_chunk_length])
old_action_log_probs_batch.append(
action_log_probs[ind:ind + data_chunk_length])
adv_targ.append(advantages[ind:ind + data_chunk_length])
# size [T+1 N Dim]-->[T N Dim]-->[T*N,Dim]-->[1,Dim]
rnn_states_batch.append(rnn_states[ind])
rnn_states_critic_batch.append(rnn_states_critic[ind])
L, N = data_chunk_length, mini_batch_size
# These are all from_numpys of size (N, L, Dim)
share_obs_batch = np.stack(share_obs_batch)
obs_batch = np.stack(obs_batch)
actions_batch = np.stack(actions_batch)
if self.available_actions is not None:
available_actions_batch = np.stack(available_actions_batch)
value_preds_batch = np.stack(value_preds_batch)
return_batch = np.stack(return_batch)
masks_batch = np.stack(masks_batch)
active_masks_batch = np.stack(active_masks_batch)
old_action_log_probs_batch = np.stack(old_action_log_probs_batch)
adv_targ = np.stack(adv_targ)
# States is just a (N, -1) from_numpy
rnn_states_batch = np.stack(rnn_states_batch).reshape(
N, *self.rnn_states.shape[2:])
rnn_states_critic_batch = np.stack(
rnn_states_critic_batch).reshape(
N, *self.rnn_states_critic.shape[2:])
# Flatten the (L, N, ...) from_numpys to (L * N, ...)
share_obs_batch = _flatten(L, N, share_obs_batch)
obs_batch = _flatten(L, N, obs_batch)
actions_batch = _flatten(L, N, actions_batch)
if self.available_actions is not None:
available_actions_batch = _flatten(L, N,
available_actions_batch)
else:
available_actions_batch = None
value_preds_batch = _flatten(L, N, value_preds_batch)
return_batch = _flatten(L, N, return_batch)
masks_batch = _flatten(L, N, masks_batch)
active_masks_batch = _flatten(L, N, active_masks_batch)
old_action_log_probs_batch = _flatten(L, N,
old_action_log_probs_batch)
adv_targ = _flatten(L, N, adv_targ)
yield share_obs_batch, obs_batch, rnn_states_batch, rnn_states_critic_batch, actions_batch, value_preds_batch, return_batch, masks_batch, active_masks_batch, old_action_log_probs_batch, adv_targ, available_actions_batch
| 24,402 | 47.227273 | 231 | py |
nocturne | nocturne-main/algos/ppo/utils/__init__.py | 0 | 0 | 0 | py |
|
nocturne | nocturne-main/algos/ppo/ppo_utils/distributions.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
import torch.nn as nn
from .util import init
"""
Modify standard PyTorch distributions so they to make compatible with this codebase.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (super().log_prob(actions.squeeze(-1)).view(
actions.size(0), -1).sum(-1).unsqueeze(-1))
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0),
-1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self,
num_inputs,
num_outputs,
use_orthogonal=True,
gain=0.01):
super(Categorical, self).__init__()
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0),
gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x, available_actions=None):
x = self.linear(x)
if available_actions is not None:
x[available_actions == 0] = -1e10
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self,
num_inputs,
num_outputs,
use_orthogonal=True,
gain=0.01,
device='cpu'):
super(DiagGaussian, self).__init__()
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0),
gain)
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
self.to(device)
self.device = device
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size()).to(self.device)
# if x.is_cuda:
# zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self,
num_inputs,
num_outputs,
use_orthogonal=True,
gain=0.01):
super(Bernoulli, self).__init__()
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][use_orthogonal]
def init_(m):
return init(m, init_method, lambda x: nn.init.constant_(x, 0),
gain)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
class AddBias(nn.Module):
def __init__(self, bias):
super(AddBias, self).__init__()
self._bias = nn.Parameter(bias.unsqueeze(1))
def forward(self, x):
if x.dim() == 2:
bias = self._bias.t().view(1, -1)
else:
bias = self._bias.t().view(1, -1, 1, 1)
return x + bias
| 4,168 | 26.427632 | 85 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/cnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
from torchvision import transforms
import torch.nn as nn
from .util import init
"""CNN Modules and utils."""
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class CNNLayer(nn.Module):
def __init__(self,
obs_shape,
hidden_size,
use_orthogonal,
use_ReLU,
kernel_size=3,
stride=1):
super(CNNLayer, self).__init__()
active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])
self.resize = transforms.Resize(84)
def init_(m):
return init(m,
init_method,
lambda x: nn.init.constant_(x, 0),
gain=gain)
input_channel = obs_shape[0]
input_width = obs_shape[1]
input_height = obs_shape[2]
self.cnn = nn.Sequential(
init_(
nn.Conv2d(in_channels=input_channel,
out_channels=hidden_size // 2,
kernel_size=kernel_size,
stride=stride)), active_func, Flatten(),
init_(
nn.Linear(
hidden_size // 2 * (input_width - kernel_size + stride) *
(input_height - kernel_size + stride),
hidden_size)), active_func,
init_(nn.Linear(hidden_size, hidden_size)), active_func)
def forward(self, x):
# TODO(eugenevinitsky) hardcoding is bad
x = self.resize(x) / 255.0
x = self.cnn(x)
return x
class CNNBase(nn.Module):
def __init__(self, args, obs_shape):
super(CNNBase, self).__init__()
self._use_orthogonal = args.use_orthogonal
self._use_ReLU = args.use_ReLU
self.hidden_size = args.hidden_size
self.cnn = CNNLayer(obs_shape, self.hidden_size, self._use_orthogonal,
self._use_ReLU)
def forward(self, x):
x = self.cnn(x)
return x
| 2,471 | 29.518519 | 78 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/mlp.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch.nn as nn
from .util import init, get_clones
"""MLP modules."""
class MLPLayer(nn.Module):
def __init__(self, input_dim, hidden_size, layer_N, use_orthogonal,
use_ReLU):
super(MLPLayer, self).__init__()
self._layer_N = layer_N
active_func = [nn.Tanh(), nn.ReLU()][use_ReLU]
init_method = [nn.init.xavier_uniform_,
nn.init.orthogonal_][use_orthogonal]
gain = nn.init.calculate_gain(['tanh', 'relu'][use_ReLU])
def init_(m):
return init(m,
init_method,
lambda x: nn.init.constant_(x, 0),
gain=gain)
self.fc1 = nn.Sequential(init_(nn.Linear(input_dim, hidden_size)),
active_func, nn.LayerNorm(hidden_size))
self.fc_h = nn.Sequential(init_(nn.Linear(hidden_size, hidden_size)),
active_func, nn.LayerNorm(hidden_size))
self.fc2 = get_clones(self.fc_h, self._layer_N)
def forward(self, x):
x = self.fc1(x)
for i in range(self._layer_N):
x = self.fc2[i](x)
return x
class MLPBase(nn.Module):
def __init__(self, args, obs_shape, cat_self=True, attn_internal=False):
super(MLPBase, self).__init__()
self._use_feature_normalization = args.use_feature_normalization
self._use_orthogonal = args.use_orthogonal
self._use_ReLU = args.use_ReLU
self._stacked_frames = args.stacked_frames
self._layer_N = args.layer_N
self.hidden_size = args.hidden_size
obs_dim = obs_shape[0]
if self._use_feature_normalization:
self.feature_norm = nn.LayerNorm(obs_dim)
self.mlp = MLPLayer(obs_dim, self.hidden_size, self._layer_N,
self._use_orthogonal, self._use_ReLU)
def forward(self, x):
if self._use_feature_normalization:
x = self.feature_norm(x)
x = self.mlp(x)
return x
| 2,308 | 32.463768 | 77 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/encoder.py | 0 | 0 | 0 | py |
|
nocturne | nocturne-main/algos/ppo/ppo_utils/popart.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PopArt(torch.nn.Module):
def __init__(self,
input_shape,
output_shape,
norm_axes=1,
beta=0.99999,
epsilon=1e-5,
device=torch.device("cpu")):
super(PopArt, self).__init__()
self.beta = beta
self.epsilon = epsilon
self.norm_axes = norm_axes
self.tpdv = dict(dtype=torch.float32, device=device)
self.input_shape = input_shape
self.output_shape = output_shape
self.weight = nn.Parameter(torch.Tensor(output_shape,
input_shape)).to(**self.tpdv)
self.bias = nn.Parameter(torch.Tensor(output_shape)).to(**self.tpdv)
self.stddev = nn.Parameter(torch.ones(output_shape),
requires_grad=False).to(**self.tpdv)
self.mean = nn.Parameter(torch.zeros(output_shape),
requires_grad=False).to(**self.tpdv)
self.mean_sq = nn.Parameter(torch.zeros(output_shape),
requires_grad=False).to(**self.tpdv)
self.debiasing_term = nn.Parameter(torch.tensor(0.0),
requires_grad=False).to(**self.tpdv)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(
self.weight)
bound = 1 / math.sqrt(fan_in)
torch.nn.init.uniform_(self.bias, -bound, bound)
self.mean.zero_()
self.mean_sq.zero_()
self.debiasing_term.zero_()
def forward(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
return F.linear(input_vector, self.weight, self.bias)
@torch.no_grad()
def update(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
old_mean, old_var = self.debiased_mean_var()
old_stddev = torch.sqrt(old_var)
batch_mean = input_vector.mean(dim=tuple(range(self.norm_axes)))
batch_sq_mean = (input_vector**2).mean(
dim=tuple(range(self.norm_axes)))
self.mean.mul_(self.beta).add_(batch_mean * (1.0 - self.beta))
self.mean_sq.mul_(self.beta).add_(batch_sq_mean * (1.0 - self.beta))
self.debiasing_term.mul_(self.beta).add_(1.0 * (1.0 - self.beta))
self.stddev = (self.mean_sq - self.mean**2).sqrt().clamp(min=1e-4)
new_mean, new_var = self.debiased_mean_var()
new_stddev = torch.sqrt(new_var)
self.weight = self.weight * old_stddev / new_stddev
self.bias = (old_stddev * self.bias + old_mean - new_mean) / new_stddev
def debiased_mean_var(self):
debiased_mean = self.mean / self.debiasing_term.clamp(min=self.epsilon)
debiased_mean_sq = self.mean_sq / self.debiasing_term.clamp(
min=self.epsilon)
debiased_var = (debiased_mean_sq - debiased_mean**2).clamp(min=1e-2)
return debiased_mean, debiased_var
def normalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = (input_vector - mean[(None, ) * self.norm_axes]
) / torch.sqrt(var)[(None, ) * self.norm_axes]
return out
def denormalize(self, input_vector):
if type(input_vector) == np.ndarray:
input_vector = torch.from_numpy(input_vector)
input_vector = input_vector.to(**self.tpdv)
mean, var = self.debiased_mean_var()
out = input_vector * torch.sqrt(var)[(None, ) * self.norm_axes] + mean[
(None, ) * self.norm_axes]
out = out.cpu().numpy()
return out
| 4,510 | 36.280992 | 79 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/util.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import copy
import numpy as np
import torch
import torch.nn as nn
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def check(input):
output = torch.from_numpy(input) if type(input) == np.ndarray else input
return output
| 690 | 25.576923 | 76 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/act.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
from .distributions import Bernoulli, Categorical, DiagGaussian
import torch
import torch.nn as nn
class ACTLayer(nn.Module):
"""
MLP Module to compute actions.
:param action_space: (gym.Space) action space.
:param inputs_dim: (int) dimension of network input.
:param use_orthogonal: (bool) whether to use orthogonal initialization.
:param gain: (float) gain of the output layer of the network.
"""
def __init__(self, action_space, inputs_dim, use_orthogonal, gain, device):
super(ACTLayer, self).__init__()
self.mixed_action = False
self.multi_discrete = False
if action_space.__class__.__name__ == "Discrete":
action_dim = action_space.n
self.action_out = Categorical(inputs_dim, action_dim,
use_orthogonal, gain)
elif action_space.__class__.__name__ == "Box":
action_dim = action_space.shape[0]
self.action_out = DiagGaussian(inputs_dim, action_dim,
use_orthogonal, gain, device)
elif action_space.__class__.__name__ == "MultiBinary":
action_dim = action_space.shape[0]
self.action_out = Bernoulli(inputs_dim, action_dim, use_orthogonal,
gain)
elif action_space.__class__.__name__ == "MultiDiscrete":
self.multi_discrete = True
action_dims = action_space.high - action_space.low + 1
self.action_outs = []
for action_dim in action_dims:
self.action_outs.append(
Categorical(inputs_dim, action_dim, use_orthogonal, gain))
self.action_outs = nn.ModuleList(self.action_outs)
else: # discrete + continous
self.mixed_action = True
continous_dim = action_space[0].shape[0]
discrete_dim = action_space[1].n
self.action_outs = nn.ModuleList([
DiagGaussian(inputs_dim, continous_dim, use_orthogonal, gain),
Categorical(inputs_dim, discrete_dim, use_orthogonal, gain)
])
self.to(device)
def forward(self, x, available_actions=None, deterministic=False):
"""
Compute actions and action logprobs from given input.
:param x: (torch.Tensor) input to network.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param deterministic: (bool) whether to sample from action distribution or return the mode.
:return actions: (torch.Tensor) actions to take.
:return action_log_probs: (torch.Tensor) log probabilities of taken actions.
"""
if self.mixed_action:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode(
) if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action.float())
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.sum(torch.cat(action_log_probs, -1),
-1,
keepdim=True)
elif self.multi_discrete:
actions = []
action_log_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action = action_logit.mode(
) if deterministic else action_logit.sample()
action_log_prob = action_logit.log_probs(action)
actions.append(action)
action_log_probs.append(action_log_prob)
actions = torch.cat(actions, -1)
action_log_probs = torch.cat(action_log_probs, -1)
else:
action_logits = self.action_out(x)
actions = action_logits.mode(
) if deterministic else action_logits.sample()
action_log_probs = action_logits.log_probs(actions)
return actions, action_log_probs
def get_probs(self, x, available_actions=None):
"""
Compute action probabilities from inputs.
:param x: (torch.Tensor) input to network.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:return action_probs: (torch.Tensor)
"""
if self.mixed_action or self.multi_discrete:
action_probs = []
for action_out in self.action_outs:
action_logit = action_out(x)
action_prob = action_logit.probs
action_probs.append(action_prob)
action_probs = torch.cat(action_probs, -1)
else:
action_logits = self.action_out(x, available_actions)
action_probs = action_logits.probs
return action_probs
def evaluate_actions(self,
x,
action,
available_actions=None,
active_masks=None):
"""
Compute log probability and entropy of given actions.
:param x: (torch.Tensor) input to network.
:param action: (torch.Tensor) actions whose entropy and log probability to evaluate.
:param available_actions: (torch.Tensor) denotes which actions are available to agent
(if None, all actions available)
:param active_masks: (torch.Tensor) denotes whether an agent is active or dead.
:return action_log_probs: (torch.Tensor) log probabilities of the input actions.
:return dist_entropy: (torch.Tensor) action distribution entropy for the given inputs.
"""
if self.mixed_action:
a, b = action.split((2, 1), -1)
b = b.long()
action = [a, b]
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
if len(action_logit.entropy().shape) == len(
active_masks.shape):
dist_entropy.append(
(action_logit.entropy() * active_masks).sum() /
active_masks.sum())
else:
dist_entropy.append((action_logit.entropy() *
active_masks.squeeze(-1)).sum() /
active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.sum(torch.cat(action_log_probs, -1),
-1,
keepdim=True)
dist_entropy = dist_entropy[0] / 2.0 + dist_entropy[
1] / 0.98 #! dosen't make sense
elif self.multi_discrete:
action = torch.transpose(action, 0, 1)
action_log_probs = []
dist_entropy = []
for action_out, act in zip(self.action_outs, action):
action_logit = action_out(x)
action_log_probs.append(action_logit.log_probs(act))
if active_masks is not None:
dist_entropy.append(
(action_logit.entropy() *
active_masks.squeeze(-1)).sum() / active_masks.sum())
else:
dist_entropy.append(action_logit.entropy().mean())
action_log_probs = torch.cat(action_log_probs,
-1) # ! could be wrong
dist_entropy = torch.tensor(dist_entropy).mean()
else:
action_logits = self.action_out(x, available_actions)
action_log_probs = action_logits.log_probs(action)
if active_masks is not None:
dist_entropy = (
action_logits.entropy() *
active_masks.squeeze(-1)).sum() / active_masks.sum()
else:
dist_entropy = action_logits.entropy().mean()
return action_log_probs, dist_entropy
| 8,915 | 43.58 | 99 | py |
nocturne | nocturne-main/algos/ppo/ppo_utils/rnn.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from https://github.com/marlbenchmark/on-policy
import torch
import torch.nn as nn
"""RNN modules."""
class RNNLayer(nn.Module):
def __init__(self, inputs_dim, outputs_dim, recurrent_N, use_orthogonal,
device):
super(RNNLayer, self).__init__()
self._recurrent_N = recurrent_N
self._use_orthogonal = use_orthogonal
self.rnn = nn.GRU(inputs_dim,
outputs_dim,
num_layers=self._recurrent_N)
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
if self._use_orthogonal:
nn.init.orthogonal_(param)
else:
nn.init.xavier_uniform_(param)
self.norm = nn.LayerNorm(outputs_dim)
self.to(device)
def forward(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.rnn(
x.unsqueeze(0),
(hxs *
masks.repeat(1, self._recurrent_N).unsqueeze(-1)).transpose(
0, 1).contiguous())
x = x.squeeze(0)
hxs = hxs.transpose(0, 1)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0).any(
dim=-1).nonzero().squeeze().cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.transpose(0, 1)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
temp = (hxs * masks[start_idx].view(1, -1, 1).repeat(
self._recurrent_N, 1, 1)).contiguous()
rnn_scores, hxs = self.rnn(x[start_idx:end_idx], temp)
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.reshape(T * N, -1)
hxs = hxs.transpose(0, 1)
x = self.norm(x)
return x, hxs
| 3,188 | 34.043956 | 88 | py |
nocturne | nocturne-main/nocturne/__init__.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Import file for Nocturne objects."""
from nocturne_cpp import (Action, CollisionType, ObjectType, Object, RoadLine,
RoadType, Scenario, Simulation, Vector2D, Vehicle,
Pedestrian, Cyclist)
__all__ = [
"Action",
"CollisionType",
"ObjectType",
"Object",
"RoadLine",
"RoadType",
"Scenario",
"Simulation",
"Vector2D",
"Vehicle",
"Pedestrian",
"Cyclist",
"envs",
]
import os
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROCESSED_VALID_NO_TL, PROJECT_PATH
os.environ["PROCESSED_TRAIN_NO_TL"] = str(PROCESSED_TRAIN_NO_TL)
os.environ["PROCESSED_VALID_NO_TL"] = str(PROCESSED_VALID_NO_TL)
os.environ["NOCTURNE_LOG_DIR"] = str(os.path.join(PROJECT_PATH, 'logs'))
| 963 | 29.125 | 82 | py |
nocturne | nocturne-main/nocturne/envs/base_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Default environment for Nocturne."""
from typing import Any, Dict, Sequence, Union
from collections import defaultdict, deque
from itertools import islice
import json
import os
from gym import Env
from gym.spaces import Box, Discrete
import numpy as np
import torch
from cfgs.config import ERR_VAL as INVALID_POSITION, get_scenario_dict
from nocturne import Action, Simulation
class BaseEnv(Env):
"""Default environment for Nocturne."""
def __init__(self, cfg: Dict[str, Any], rank: int = 0) -> None:
"""Initialize the environment.
Args
----
cfg (dict): configuration file describing the experiment
rank (int, optional): [description]. Defaults to 0.
"""
super().__init__()
self.cfg = cfg
with open(os.path.join(cfg['scenario_path'],
'valid_files.json')) as file:
self.valid_veh_dict = json.load(file)
self.files = list(self.valid_veh_dict.keys())
# sort the files so that we have a consistent order
self.files = sorted(self.files)
if cfg['num_files'] != -1:
self.files = self.files[0:cfg['num_files']]
self.file = self.files[np.random.randint(len(self.files))]
self.simulation = Simulation(os.path.join(cfg['scenario_path'],
self.file),
config=get_scenario_dict(cfg))
self.scenario = self.simulation.getScenario()
self.controlled_vehicles = self.scenario.getObjectsThatMoved()
self.cfg = cfg
self.n_frames_stacked = self.cfg['subscriber'].get(
'n_frames_stacked', 1)
if self.n_frames_stacked > 1:
print(
'WARNING: you are frame stacking and may want to turn off recurrence if it is enabled\
in your agent as frame-stacking may not be needed when using recurrent policies.'
)
self.single_agent_mode = cfg['single_agent_mode']
self.seed(cfg['seed'])
self.episode_length = cfg['episode_length']
self.t = 0
self.step_num = 0
self.rank = rank
self.seed(cfg['seed'])
obs_dict = self.reset()
self.observation_space = Box(low=-np.infty,
high=np.infty,
shape=(obs_dict[list(
obs_dict.keys())[0]].shape[0], ))
if self.cfg['discretize_actions']:
self.accel_discretization = self.cfg['accel_discretization']
self.steering_discretization = self.cfg['steering_discretization']
self.head_angle_discretization = self.cfg[
'head_angle_discretization']
self.action_space = Discrete(self.accel_discretization *
self.steering_discretization *
self.head_angle_discretization)
self.accel_grid = np.linspace(
-np.abs(self.cfg['accel_lower_bound']),
self.cfg['accel_upper_bound'], self.accel_discretization)
self.steering_grid = np.linspace(
-np.abs(self.cfg['steering_lower_bound']),
self.cfg['steering_upper_bound'], self.steering_discretization)
self.head_angle_grid = np.linspace(
-np.abs(self.cfg['head_angle_lower_bound']),
self.cfg['head_angle_upper_bound'],
self.head_angle_discretization)
# compute the indexing only once
self.idx_to_actions = {}
i = 0
for accel in self.accel_grid:
for steer in self.steering_grid:
for head_angle in self.head_angle_grid:
self.idx_to_actions[i] = [accel, steer, head_angle]
i += 1
else:
self.action_space = Box(
low=-np.array([
np.abs(self.cfg['accel_lower_bound']),
self.cfg['steering_lower_bound'],
self.cfg['head_angle_lower_bound']
]),
high=np.array([
np.abs(self.cfg['accel_upper_bound']),
self.cfg['steering_upper_bound'],
self.cfg['head_angle_upper_bound']
]),
)
def apply_actions(
self, action_dict: Dict[int, Union[Action, np.ndarray, Sequence[float],
int]]
) -> None:
"""Apply a dict of actions to the vehicle objects."""
for veh_obj in self.scenario.getObjectsThatMoved():
action = action_dict.get(veh_obj.id, None)
if action is None:
continue
# TODO: Make this a util function.
if isinstance(action, Action):
veh_obj.apply_action(action)
elif isinstance(action, np.ndarray):
veh_obj.apply_action(Action.from_numpy(action))
elif isinstance(action, (tuple, list)):
veh_obj.acceleration = action[0]
veh_obj.steering = action[1]
veh_obj.head_angle = action[2]
else:
accel, steer, head_angle = self.idx_to_actions[action]
veh_obj.acceleration = accel
veh_obj.steering = steer
veh_obj.head_angle = head_angle
def step(
self, action_dict: Dict[int, Union[Action, np.ndarray, Sequence[float],
int]]
) -> None:
"""See superclass."""
obs_dict = {}
rew_dict = {}
done_dict = {}
info_dict = defaultdict(dict)
rew_cfg = self.cfg['rew_cfg']
self.apply_actions(action_dict)
self.simulation.step(self.cfg['dt'])
self.t += self.cfg['dt']
self.step_num += 1
objs_to_remove = []
for veh_obj in self.controlled_vehicles:
veh_id = veh_obj.getID()
if veh_id in self.done_ids:
continue
self.context_dict[veh_id].append(self.get_observation(veh_obj))
if self.n_frames_stacked > 1:
veh_deque = self.context_dict[veh_id]
context_list = list(
islice(veh_deque,
len(veh_deque) - self.n_frames_stacked,
len(veh_deque)))
obs_dict[veh_id] = np.concatenate(context_list)
else:
obs_dict[veh_id] = self.context_dict[veh_id][-1]
rew_dict[veh_id] = 0
done_dict[veh_id] = False
info_dict[veh_id]['goal_achieved'] = False
info_dict[veh_id]['collided'] = False
info_dict[veh_id]['veh_veh_collision'] = False
info_dict[veh_id]['veh_edge_collision'] = False
obj_pos = veh_obj.position
goal_pos = veh_obj.target_position
'''############################################
Compute rewards
############################################'''
position_target_achieved = True
speed_target_achieved = True
heading_target_achieved = True
if rew_cfg['position_target']:
position_target_achieved = (
goal_pos -
obj_pos).norm() < rew_cfg['position_target_tolerance']
if rew_cfg['speed_target']:
speed_target_achieved = np.abs(
veh_obj.speed -
veh_obj.target_speed) < rew_cfg['speed_target_tolerance']
if rew_cfg['heading_target']:
heading_target_achieved = np.abs(
self.angle_sub(veh_obj.heading, veh_obj.target_heading)
) < rew_cfg['heading_target_tolerance']
if position_target_achieved and speed_target_achieved and heading_target_achieved:
info_dict[veh_id]['goal_achieved'] = True
rew_dict[veh_id] += rew_cfg['goal_achieved_bonus'] / rew_cfg[
'reward_scaling']
if rew_cfg['shaped_goal_distance'] and rew_cfg['position_target']:
# penalize the agent for its distance from goal
# we scale by goal_dist_normalizers to ensure that this value is always less than the penalty for
# collision
if rew_cfg['goal_distance_penalty']:
rew_dict[veh_id] -= rew_cfg.get(
'shaped_goal_distance_scaling', 1.0) * (
(goal_pos - obj_pos).norm() /
self.goal_dist_normalizers[veh_id]
) / rew_cfg['reward_scaling']
else:
# the minus one is to ensure that it's not beneficial to collide
# we divide by goal_achieved_bonus / episode_length to ensure that
# acquiring the maximum "get-close-to-goal" reward at every time-step is
# always less than just acquiring the goal reward once
# we also assume that vehicles are never more than 400 meters from their goal
# which makes sense as the episodes are 9 seconds long i.e. we'd have to go more than
# 40 m/s to get there
rew_dict[veh_id] += rew_cfg.get(
'shaped_goal_distance_scaling',
1.0) * (1 - (goal_pos - obj_pos).norm() /
self.goal_dist_normalizers[veh_id]
) / rew_cfg['reward_scaling']
# repeat the same thing for speed and heading
if rew_cfg['shaped_goal_distance'] and rew_cfg['speed_target']:
if rew_cfg['goal_distance_penalty']:
rew_dict[veh_id] -= rew_cfg.get(
'shaped_goal_distance_scaling', 1.0) * (
np.abs(veh_obj.speed - veh_obj.target_speed) /
40.0) / rew_cfg['reward_scaling']
else:
rew_dict[veh_id] += rew_cfg.get(
'shaped_goal_distance_scaling', 1.0
) * (1 - np.abs(veh_obj.speed - veh_obj.target_speed) /
40.0) / rew_cfg['reward_scaling']
if rew_cfg['shaped_goal_distance'] and rew_cfg[
'heading_target']:
if rew_cfg['goal_distance_penalty']:
rew_dict[veh_id] -= rew_cfg.get(
'shaped_goal_distance_scaling',
1.0) * (np.abs(
self.angle_sub(veh_obj.heading,
veh_obj.target_heading)) /
(2 * np.pi)) / rew_cfg['reward_scaling']
else:
rew_dict[veh_id] += rew_cfg.get(
'shaped_goal_distance_scaling',
1.0) * (1 - np.abs(
self.angle_sub(veh_obj.heading,
veh_obj.target_heading)) /
(2 * np.pi)) / rew_cfg['reward_scaling']
'''############################################
Handle potential done conditions
############################################'''
# achieved our goal
if info_dict[veh_id]['goal_achieved'] and self.cfg.get(
'remove_at_goal', True):
done_dict[veh_id] = True
if veh_obj.getCollided():
info_dict[veh_id]['collided'] = True
if int(veh_obj.collision_type) == 1:
info_dict[veh_id]['veh_veh_collision'] = True
if int(veh_obj.collision_type) == 2:
info_dict[veh_id]['veh_edge_collision'] = True
rew_dict[veh_id] -= np.abs(
rew_cfg['collision_penalty']) / rew_cfg['reward_scaling']
if self.cfg.get('remove_at_collide', True):
done_dict[veh_id] = True
# remove the vehicle so that its trajectory doesn't continue. This is important
# in the multi-agent setting.
if done_dict[veh_id]:
self.done_ids.append(veh_id)
if (info_dict[veh_id]['goal_achieved']
and self.cfg.get('remove_at_goal', True)) or (
info_dict[veh_id]['collided']
and self.cfg.get('remove_at_collide', True)):
objs_to_remove.append(veh_obj)
for veh_obj in objs_to_remove:
self.scenario.removeVehicle(veh_obj)
if self.cfg['rew_cfg']['shared_reward']:
total_reward = np.sum([rew_dict[key] for key in rew_dict.keys()])
rew_dict = {key: total_reward for key in rew_dict.keys()}
# fill in the missing observations if we should be doing so
if self.cfg['subscriber']['keep_inactive_agents']:
# force all vehicles done to be false since they should persist through the episode
done_dict = {key: False for key in self.all_vehicle_ids}
for key in self.all_vehicle_ids:
if key not in obs_dict.keys():
obs_dict[key] = self.dead_feat
rew_dict[key] = 0.0
info_dict[key]['goal_achieved'] = False
info_dict[key]['collided'] = False
info_dict[key]['veh_veh_collision'] = False
info_dict[key]['veh_edge_collision'] = False
if self.step_num >= self.episode_length:
done_dict = {key: True for key in done_dict.keys()}
all_done = True
for value in done_dict.values():
all_done *= value
done_dict['__all__'] = all_done
return obs_dict, rew_dict, done_dict, info_dict
def reset(self):
"""See superclass."""
self.t = 0
self.step_num = 0
enough_vehicles = False
# we don't want to initialize scenes with 0 actors after satisfying
# all the conditions on a scene that we have
while not enough_vehicles:
self.file = self.files[np.random.randint(len(self.files))]
self.simulation = Simulation(os.path.join(
self.cfg['scenario_path'], self.file),
config=get_scenario_dict(self.cfg))
self.scenario = self.simulation.getScenario()
'''##################################################################
Construct context dictionary of observations that can be used to
warm up policies by stepping all vehicles as experts.
#####################################################################'''
dead_obs = self.get_observation(self.scenario.getVehicles()[0])
self.dead_feat = -np.ones(
dead_obs.shape[0] * self.n_frames_stacked)
# step all the vehicles forward by one second and record their observations as context
context_len = max(10, self.n_frames_stacked)
self.context_dict = {
veh.getID():
deque([self.dead_feat for _ in range(context_len)],
maxlen=context_len)
for veh in self.scenario.getObjectsThatMoved()
}
for veh in self.scenario.getObjectsThatMoved():
veh.expert_control = True
for _ in range(10):
for veh in self.scenario.getObjectsThatMoved():
self.context_dict[veh.getID()].append(
self.get_observation(veh))
self.simulation.step(self.cfg['dt'])
# now hand back control to our actual controllers
for veh in self.scenario.getObjectsThatMoved():
veh.expert_control = False
# remove all the objects that are in collision or are already in goal dist
# additionally set the objects that have infeasible goals to be experts
for veh_obj in self.simulation.getScenario().getObjectsThatMoved():
obj_pos = veh_obj.getPosition()
obj_pos = np.array([obj_pos.x, obj_pos.y])
goal_pos = veh_obj.getGoalPosition()
goal_pos = np.array([goal_pos.x, goal_pos.y])
'''############################################
Remove vehicles at goal
############################################'''
norm = np.linalg.norm(goal_pos - obj_pos)
if norm < self.cfg['rew_cfg'][
'goal_tolerance'] or veh_obj.getCollided():
self.scenario.removeVehicle(veh_obj)
'''############################################
Set all vehicles with unachievable goals to be experts
############################################'''
if self.file in self.valid_veh_dict and veh_obj.getID(
) in self.valid_veh_dict[self.file]:
veh_obj.expert_control = True
'''############################################
Pick out the vehicles that we are controlling
############################################'''
# ensure that we have no more than max_num_vehicles are controlled
temp_vehicles = self.scenario.getObjectsThatMoved()
np.random.shuffle(temp_vehicles)
curr_index = 0
self.controlled_vehicles = []
self.expert_controlled_vehicles = []
self.vehicles_to_delete = []
for vehicle in temp_vehicles:
# this vehicle was invalid at the end of the 1 second context
# step so we need to remove it.
if np.isclose(vehicle.position.x, INVALID_POSITION):
self.vehicles_to_delete.append(vehicle)
# we don't want to include vehicles that had unachievable goals
# as controlled vehicles
elif not vehicle.expert_control and curr_index < self.cfg[
'max_num_vehicles']:
self.controlled_vehicles.append(vehicle)
curr_index += 1
else:
self.expert_controlled_vehicles.append(vehicle)
self.all_vehicle_ids = [
veh.getID() for veh in self.controlled_vehicles
]
# make all the vehicles that are in excess of max_num_vehicles controlled by an expert
for veh in self.expert_controlled_vehicles:
veh.expert_control = True
# remove vehicles that are currently at an invalid position
for veh in self.vehicles_to_delete:
self.scenario.removeVehicle(veh)
# check that we have at least one vehicle or if we have just one file, exit anyways
# or else we might be stuck in an infinite loop
if len(self.all_vehicle_ids) > 0 or len(self.files) == 1:
enough_vehicles = True
# for one reason or another (probably we had a file where all the agents achieved their goals)
# we have no controlled vehicles
# just grab a vehicle even if it hasn't moved so that we have something
# to return obs for even if it's not controlled
# NOTE: this case only occurs during our eval procedure where we set the
# self.files list to be length 1. Otherwise, the while loop above will repeat
# until a file is found.
if len(self.all_vehicle_ids) == 0:
self.controlled_vehicles = [self.scenario.getVehicles()[0]]
self.all_vehicle_ids = [
veh.getID() for veh in self.controlled_vehicles
]
# construct the observations and goal normalizers
obs_dict = {}
self.goal_dist_normalizers = {}
max_goal_dist = -100
for veh_obj in self.controlled_vehicles:
veh_id = veh_obj.getID()
# store normalizers for each vehicle
obj_pos = veh_obj.getPosition()
obj_pos = np.array([obj_pos.x, obj_pos.y])
goal_pos = veh_obj.getGoalPosition()
goal_pos = np.array([goal_pos.x, goal_pos.y])
dist = np.linalg.norm(obj_pos - goal_pos)
self.goal_dist_normalizers[veh_id] = dist
# compute the obs
self.context_dict[veh_id].append(self.get_observation(veh_obj))
if self.n_frames_stacked > 1:
veh_deque = self.context_dict[veh_id]
context_list = list(
islice(veh_deque,
len(veh_deque) - self.n_frames_stacked,
len(veh_deque)))
obs_dict[veh_id] = np.concatenate(context_list)
else:
obs_dict[veh_id] = self.context_dict[veh_id][-1]
# pick the vehicle that has to travel the furthest distance and use it for rendering
if dist > max_goal_dist:
# this attribute is just used for rendering of the view
# from the ego frame
self.render_vehicle = veh_obj
max_goal_dist = dist
self.done_ids = []
# we should return obs for the missing agents
if self.cfg['subscriber']['keep_inactive_agents']:
max_id = max([int(key) for key in obs_dict.keys()])
num_missing_agents = max(
0, self.cfg['max_num_vehicles'] - len(obs_dict))
for i in range(num_missing_agents):
obs_dict[max_id + i + 1] = self.dead_feat
self.dead_agent_ids = [
max_id + i + 1 for i in range(num_missing_agents)
]
self.all_vehicle_ids = list(obs_dict.keys())
else:
self.dead_agent_ids = []
return obs_dict
def get_observation(self, veh_obj):
"""Return the observation for a particular vehicle."""
ego_obs = self.scenario.ego_state(veh_obj)
if self.cfg['subscriber']['use_ego_state'] and self.cfg['subscriber'][
'use_observations']:
obs = np.concatenate(
(ego_obs,
self.scenario.flattened_visible_state(
veh_obj,
view_dist=self.cfg['subscriber']['view_dist'],
view_angle=self.cfg['subscriber']['view_angle'],
head_angle=veh_obj.head_angle)))
elif self.cfg['subscriber']['use_ego_state'] and not self.cfg[
'subscriber']['use_observations']:
obs = ego_obs
else:
obs = self.scenario.flattened_visible_state(
veh_obj,
view_dist=self.cfg['subscriber']['view_dist'],
view_angle=self.cfg['subscriber']['view_angle'],
head_angle=veh_obj.head_angle)
return obs
def make_all_vehicles_experts(self):
"""Force all vehicles to be experts."""
for veh in self.scenario.getVehicles():
veh.expert_control = True
def get_vehicles(self):
"""Return the vehicles."""
return self.scenario.getVehicles()
def get_objects_that_moved(self):
"""Return the objects that moved."""
return self.scenario.getObjectsThatMoved()
def render(self, mode=None):
"""See superclass."""
return self.scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
)
def render_ego(self, mode=None):
"""See superclass."""
if self.render_vehicle.getID() in self.done_ids:
return None
else:
return self.scenario.getConeImage(
source=self.render_vehicle,
view_dist=self.cfg['subscriber']['view_dist'],
view_angle=self.cfg['subscriber']['view_angle'],
head_angle=self.render_vehicle.head_angle,
img_width=1600,
img_height=1600,
padding=50.0,
draw_target_position=True,
)
def render_features(self, mode=None):
"""See superclass."""
if self.render_vehicle.getID() in self.done_ids:
return None
else:
return self.scenario.getFeaturesImage(
source=self.render_vehicle,
view_dist=self.cfg['subscriber']['view_dist'],
view_angle=self.cfg['subscriber']['view_angle'],
head_angle=self.render_vehicle.head_angle,
img_width=1600,
img_height=1600,
padding=50.0,
draw_target_position=True,
)
def seed(self, seed=None):
"""Ensure determinism."""
if seed is None:
np.random.seed(1)
else:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def angle_sub(self, current_angle, target_angle) -> int:
"""Subtract two angles to find the minimum angle between them."""
# Subtract the angles, constraining the value to [0, 2 * np.pi)
diff = (target_angle - current_angle) % (2 * np.pi)
# If we are more than np.pi we're taking the long way around.
# Let's instead go in the shorter, negative direction
if diff > np.pi:
diff = -(2 * np.pi - diff)
return diff
| 26,180 | 46.088129 | 113 | py |
nocturne | nocturne-main/nocturne/envs/wrappers.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Wrappers and env constructors for the environments."""
from gym.spaces import Box
import numpy as np
from nocturne.envs import BaseEnv
class OnPolicyPPOWrapper(object):
"""Wrapper to make env compatible with On-Policy code."""
def __init__(self, env, use_images=False):
"""Wrap with appropriate observation spaces and make fixed length.
Args
----
env ([type]): [description]
no_img_concat (bool, optional): If true, we don't concat images into the 'state' key
"""
self._env = env
self.use_images = use_images
self.n = self.cfg.max_num_vehicles
obs_dict = self.reset()
# tracker used to match observations to actions
self.agent_ids = []
self.feature_shape = obs_dict[0].shape
self.share_observation_space = [
Box(low=-np.inf,
high=+np.inf,
shape=self.feature_shape,
dtype=np.float32) for _ in range(self.n)
]
@property
def observation_space(self):
"""See superclass."""
return [self._env.observation_space for _ in range(self.n)]
@property
def action_space(self):
"""See superclass."""
return [self._env.action_space for _ in range(self.n)]
def step(self, actions):
"""Convert returned dicts to lists."""
agent_actions = {}
for action_vec, agent_id in zip(actions, self.agent_ids):
agent_actions[agent_id] = action_vec
next_obses, rew, done, info = self._env.step(agent_actions)
obs_n = []
rew_n = []
done_n = []
info_n = []
for key in self.agent_ids:
if isinstance(next_obses[key], dict):
obs_n.append(next_obses[key]['features'])
else:
obs_n.append(next_obses[key])
rew_n.append([rew[key]])
done_n.append(done[key])
agent_info = info[key]
agent_info['individual_reward'] = rew[key]
info_n.append(agent_info)
return obs_n, rew_n, done_n, info_n
def reset(self):
"""Convert observation dict to list."""
obses = self._env.reset()
obs_n = []
self.agent_ids = []
for key in obses.keys():
self.agent_ids.append(key)
if not hasattr(self, 'agent_key'):
self.agent_key = key
if isinstance(obses[key], dict):
obs_n.append(obses[key]['features'])
else:
obs_n.append(obses[key])
return obs_n
def render(self, mode=None):
"""See superclass."""
return self._env.render(mode)
def seed(self, seed=None):
"""See superclass."""
self._env.seed(seed)
def __getattr__(self, name):
"""See superclass."""
return getattr(self._env, name)
def create_env(cfg):
"""Return the base environment."""
env = BaseEnv(cfg)
return env
def create_ppo_env(cfg, rank=0):
"""Return a PPO wrapped environment."""
env = BaseEnv(cfg, rank=rank)
return OnPolicyPPOWrapper(env, use_images=cfg.img_as_state)
| 3,370 | 30.212963 | 96 | py |
nocturne | nocturne-main/nocturne/envs/__init__.py | """Import file for tests."""
from nocturne.envs.base_env import BaseEnv
__all__ = [
"BaseEnv",
]
| 102 | 13.714286 | 42 | py |
nocturne | nocturne-main/nocturne/utils/eval/average_displacement.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Average displacement error computation."""
from collections import defaultdict
from itertools import repeat
import json
from multiprocessing import Pool
import os
import random
import numpy as np
import torch
from cfgs.config import PROCESSED_VALID_NO_TL, ERR_VAL
from nocturne import Simulation
SIM_N_STEPS = 90 # number of steps per trajectory
GOAL_TOLERANCE = 0.5
def _average_displacement_impl(arg):
trajectory_path, model, configs = arg
print(trajectory_path)
scenario_config = configs['scenario_cfg']
view_dist = configs['dataloader_cfg']['view_dist']
view_angle = configs['dataloader_cfg']['view_angle']
state_normalization = configs['dataloader_cfg']['state_normalization']
dt = configs['dataloader_cfg']['dt']
n_stacked_states = configs['dataloader_cfg']['n_stacked_states']
state_size = configs['model_cfg']['n_inputs'] // n_stacked_states
state_dict = defaultdict(lambda: np.zeros(state_size * n_stacked_states))
# create expert simulation
sim_expert = Simulation(str(trajectory_path), scenario_config)
scenario_expert = sim_expert.getScenario()
vehicles_expert = scenario_expert.getVehicles()
objects_expert = scenario_expert.getObjectsThatMoved()
id2veh_expert = {veh.id: veh for veh in vehicles_expert}
# create model simulation
sim_model = Simulation(str(trajectory_path), scenario_config)
scenario_model = sim_model.getScenario()
vehicles_model = scenario_model.getVehicles()
objects_model = scenario_model.getObjectsThatMoved()
# set all objects to be expert-controlled
for obj in objects_expert:
obj.expert_control = True
for obj in objects_model:
obj.expert_control = True
# in model sim, model will control vehicles that moved
controlled_vehicles = [
veh for veh in vehicles_model if veh in objects_model
]
random.shuffle(controlled_vehicles)
# controlled_vehicles = controlled_vehicles[:2]
# warmup to build up state stacking
for i in range(n_stacked_states - 1):
for veh in controlled_vehicles:
ego_state = scenario_model.ego_state(veh)
visible_state = scenario_model.flattened_visible_state(
veh, view_dist=view_dist, view_angle=view_angle)
state = np.concatenate(
(ego_state, visible_state)) / state_normalization
state_dict[veh.getID()] = np.roll(state_dict[veh.getID()],
len(state))
state_dict[veh.getID()][:len(state)] = state
sim_model.step(dt)
sim_expert.step(dt)
for veh in controlled_vehicles:
veh.expert_control = False
avg_displacements = []
final_displacements = [0 for _ in controlled_vehicles]
collisions = [False for _ in controlled_vehicles]
goal_achieved = [False for _ in controlled_vehicles]
for i in range(SIM_N_STEPS - n_stacked_states):
for veh in controlled_vehicles:
if np.isclose(veh.position.x, ERR_VAL):
veh.expert_control = True
else:
veh.expert_control = False
# set model actions
all_states = []
for veh in controlled_vehicles:
# get vehicle state
state = np.concatenate(
(scenario_model.ego_state(veh),
scenario_model.flattened_visible_state(
veh, view_dist=view_dist,
view_angle=view_angle))) / state_normalization
# stack state
state_dict[veh.getID()] = np.roll(state_dict[veh.getID()],
len(state))
state_dict[veh.getID()][:len(state)] = state
all_states.append(state_dict[veh.getID()])
all_states = torch.as_tensor(np.array(all_states), dtype=torch.float32)
# compute vehicle actions
all_actions = model(all_states, deterministic=True
) # /!\ this returns an array (2,n) and not (n,2)
accel_actions = all_actions[0].cpu().numpy()
steering_actions = all_actions[1].cpu().numpy()
# set vehicles actions
for veh, accel_action, steering_action in zip(controlled_vehicles,
accel_actions,
steering_actions):
veh.acceleration = accel_action
veh.steering = steering_action
# step simulations
sim_expert.step(dt)
sim_model.step(dt)
# compute displacements over non-collided vehicles
displacements = []
for i, veh in enumerate(controlled_vehicles):
# get corresponding vehicle in expert simulation
expert_veh = id2veh_expert[veh.id]
# make sure it is valid
if np.isclose(expert_veh.position.x,
ERR_VAL) or expert_veh.collided:
continue
# print(expert_veh.position, veh.position)
# compute displacement
expert_pos = id2veh_expert[veh.id].position
model_pos = veh.position
pos_diff = (model_pos - expert_pos).norm()
displacements.append(pos_diff)
final_displacements[i] = pos_diff
if veh.collided:
collisions[i] = True
if (veh.position - veh.target_position).norm() < GOAL_TOLERANCE:
goal_achieved[i] = True
# average displacements over all vehicles
if len(displacements) > 0:
avg_displacements.append(np.mean(displacements))
# print(displacements, np.mean(displacements))
# average displacements over all time steps
avg_displacement = np.mean(
avg_displacements) if len(avg_displacements) > 0 else np.nan
final_displacement = np.mean(
final_displacements) if len(final_displacements) > 0 else np.nan
avg_collisions = np.mean(collisions) if len(collisions) > 0 else np.nan
avg_goals = np.mean(goal_achieved) if len(goal_achieved) > 0 else np.nan
print('displacements', avg_displacement)
print('final_displacement', final_displacement)
print('collisions', avg_collisions)
print('goal_rate', avg_goals)
return avg_displacement, final_displacement, avg_collisions, avg_goals
def compute_average_displacement(trajectories_dir, model, configs):
"""Compute average displacement error between a model and the ground truth."""
NUM_FILES = 200
# get trajectories paths
with open(os.path.join(trajectories_dir, 'valid_files.json')) as file:
valid_veh_dict = json.load(file)
files = list(valid_veh_dict.keys())
# sort the files so that we have a consistent order
np.random.seed(0)
np.random.shuffle(files)
# compute average displacement over each individual trajectory file
trajectories_paths = files[:NUM_FILES]
for i, trajectory in enumerate(trajectories_paths):
trajectories_paths[i] = os.path.join(trajectories_dir, trajectory)
with Pool(processes=14) as pool:
result = list(
pool.map(_average_displacement_impl,
zip(trajectories_paths, repeat(model), repeat(configs))))
average_displacements = np.array(result)[:, 0]
final_displacements = np.array(result)[:, 1]
average_collisions = np.array(result)[:, 2]
average_goals = np.array(result)[:, 3]
print(average_displacements, final_displacements, average_collisions,
average_goals)
return [
np.mean(average_displacements[~np.isnan(average_displacements)]),
np.std(average_displacements[~np.isnan(average_displacements)])
], [
np.mean(final_displacements[~np.isnan(final_displacements)]),
np.std(final_displacements[~np.isnan(final_displacements)])
], [
np.mean(average_collisions[~np.isnan(average_collisions)]),
np.std(average_collisions[~np.isnan(average_displacements)])
], [
np.mean(average_goals[~np.isnan(average_goals)]),
np.std(average_goals[~np.isnan(average_goals)])
]
if __name__ == '__main__':
from examples.imitation_learning.model import ImitationAgent # noqa: F401
model = torch.load(
'/checkpoint/eugenevinitsky/nocturne/test/2022.06.05/test/14.23.17/\
++device=cuda,++file_limit=1000/train_logs/2022_06_05_14_23_23/model_600.pth'
).to('cpu')
model.actions_grids = [x.to('cpu') for x in model.actions_grids]
model.eval()
model.nn[0].eval()
with open(
'/checkpoint/eugenevinitsky/nocturne/test/2022.06.05/test/14.23.17/\
++device=cuda,++file_limit=1000/train_logs/2022_06_05_14_23_23/configs.json',
'r') as fp:
configs = json.load(fp)
configs['device'] = 'cpu'
with torch.no_grad():
ade, fde, collisions, goals = compute_average_displacement(
PROCESSED_VALID_NO_TL, model=model, configs=configs)
print(f'Average Displacement Error: {ade[0]:.3f} ± {ade[1]:.3f} meters')
print(f'Final Displacement Error: {fde[0]:.3f} ± {fde[1]:.3f} meters')
print(f'Average Collisions: {collisions[0]:.3f} ± {collisions[1]:.3f}%')
print(
f'Average Success at getting to goal: {goals[0]:.3f} ± {goals[1]:.3f}%'
)
| 9,552 | 41.0837 | 93 | py |
nocturne | nocturne-main/nocturne/utils/eval/goal_reaching_rate.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Goal reaching rate computation."""
from pathlib import Path
import numpy as np
import torch
from nocturne import Simulation
SIM_N_STEPS = 90 # number of steps per trajectory
SIM_STEP_TIME = 0.1 # dt (in seconds)
def _goal_reaching_rate_impl(trajectory_path,
model=None,
sim_allow_non_vehicles=True,
check_vehicles_only=True):
# create expert simulation
sim = Simulation(scenario_path=str(trajectory_path),
start_time=0,
allow_non_vehicles=sim_allow_non_vehicles)
scenario = sim.getScenario()
vehicles = scenario.getVehicles()
objects_that_moved = scenario.getObjectsThatMoved()
vehicles_that_moved = [
veh for veh in vehicles if veh in objects_that_moved
]
# set all objects to be expert-controlled
for obj in objects_that_moved:
obj.expert_control = True
for obj in vehicles:
obj.expert_control = True
# if a model is given, model will control vehicles that moved
if model is not None:
controlled_vehicles = vehicles_that_moved
for veh in controlled_vehicles:
veh.expert_control = False
else:
controlled_vehicles = []
# vehicles to check for collisions on
objects_to_check = vehicles_that_moved if check_vehicles_only else objects_that_moved
# step sim until the end and check for collisions
reached_goal = {obj.id: False for obj in objects_to_check}
for i in range(SIM_N_STEPS):
# set model actions
for veh in controlled_vehicles:
# get vehicle state
state = torch.as_tensor(np.expand_dims(np.concatenate(
(scenario.ego_state(veh),
scenario.flattened_visible_state(veh,
view_dist=120,
view_angle=3.14))),
axis=0),
dtype=torch.float32)
# compute vehicle action
action = model(state)[0]
# set vehicle action
veh.acceleration = action[0]
veh.steering = action[1]
# step simulation
sim.step(SIM_STEP_TIME)
# check for collisions
for obj in objects_to_check:
if (obj.target_position - obj.position).norm() < 0.5:
reached_goal[obj.id] = True
# compute collision rate
reached_goal_values = list(reached_goal.values())
reached_goal_rate = reached_goal_values.count(True) / len(
reached_goal_values)
return reached_goal_rate
def compute_average_goal_reaching_rate(trajectories_dir, model=None, **kwargs):
"""Compute average goal reaching rate for a model."""
# get trajectories paths
if isinstance(trajectories_dir, str):
# if trajectories_dir is a string, treat it as the path to a directory of trajectories
trajectories_dir = Path(trajectories_dir)
trajectories_paths = list(trajectories_dir.glob('*tfrecord*.json'))
elif isinstance(trajectories_dir, list):
# if trajectories_dir is a list, treat it as a list of paths to trajectory files
trajectories_paths = [Path(path) for path in trajectories_dir]
# compute average collision rate over each individual trajectory file
average_goal_reaching_rates = np.array(
list(
map(lambda path: _goal_reaching_rate_impl(path, model, **kwargs),
trajectories_paths)))
return np.mean(average_goal_reaching_rates)
if __name__ == '__main__':
from nocturne.utils.imitation_learning.waymo_data_loader import ImitationAgent # noqa: F401
model = torch.load('model.pth')
goal_reaching_rate = compute_average_goal_reaching_rate(
'dataset/json_files', model=None)
print(f'Average Goal Reaching Rate: {100*goal_reaching_rate:.2f}%')
| 4,169 | 37.611111 | 96 | py |
nocturne | nocturne-main/nocturne/utils/eval/collision_rate.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Collision rate computation."""
from pathlib import Path
import numpy as np
import torch
from nocturne import Simulation
from cfgs.config import ERR_VAL as INVALID_POSITION
SIM_N_STEPS = 90 # number of steps per trajectory
SIM_STEP_TIME = 0.1 # dt (in seconds)
def _collision_rate_impl(trajectory_path, model=None, sim_allow_non_vehicles=True, check_vehicles_only=True):
# create expert simulation
sim = Simulation(scenario_path=str(trajectory_path), start_time=0, allow_non_vehicles=sim_allow_non_vehicles)
scenario = sim.getScenario()
vehicles = scenario.getVehicles()
objects_that_moved = scenario.getObjectsThatMoved()
vehicles_that_moved = [veh for veh in vehicles if veh in objects_that_moved]
# set all objects to be expert-controlled
for obj in objects_that_moved:
obj.expert_control = True
for obj in vehicles:
obj.expert_control = True
# if a model is given, model will control vehicles that moved
if model is not None:
controlled_vehicles = vehicles_that_moved
for veh in controlled_vehicles:
veh.expert_control = False
else:
controlled_vehicles = []
# vehicles to check for collisions on
objects_to_check = [
obj for obj in (vehicles_that_moved if check_vehicles_only else objects_that_moved)
if (obj.target_position - obj.position).norm() > 0.5
]
# step sim until the end and check for collisions
collided_with_vehicle = {obj.id: False for obj in objects_to_check}
collided_with_edge = {obj.id: False for obj in objects_to_check}
for i in range(SIM_N_STEPS):
# set model actions
for veh in controlled_vehicles:
# get vehicle state
state = torch.as_tensor(np.expand_dims(np.concatenate(
(scenario.ego_state(veh),
scenario.flattened_visible_state(veh, view_dist=120, view_angle=3.14))
), axis=0), dtype=torch.float32)
# compute vehicle action
action = model(state)[0]
# set vehicle action
veh.acceleration = action[0]
veh.steering = action[1]
# step simulation
sim.step(SIM_STEP_TIME)
# check for collisions
for obj in objects_to_check:
if not np.isclose(obj.position.x, INVALID_POSITION) and obj.collided:
if int(obj.collision_type) == 1:
collided_with_vehicle[obj.id] = True
if int(obj.collision_type) == 2:
collided_with_edge[obj.id] = True
# compute collision rate
collisions_with_vehicles = list(collided_with_vehicle.values())
collisions_with_edges = list(collided_with_edge.values())
collision_rate_vehicles = collisions_with_vehicles.count(True) / len(collisions_with_vehicles)
collision_rate_edges = collisions_with_edges.count(True) / len(collisions_with_edges)
return collision_rate_vehicles, collision_rate_edges
def compute_average_collision_rate(trajectories_dir, model=None, **kwargs):
"""Compute average collision rate for a model."""
# get trajectories paths
if isinstance(trajectories_dir, str):
# if trajectories_dir is a string, treat it as the path to a directory of trajectories
trajectories_dir = Path(trajectories_dir)
trajectories_paths = list(trajectories_dir.glob('*tfrecord*.json'))
elif isinstance(trajectories_dir, list):
# if trajectories_dir is a list, treat it as a list of paths to trajectory files
trajectories_paths = [Path(path) for path in trajectories_dir]
# compute average collision rate over each individual trajectory file
average_collision_rates = np.array(list(map(
lambda path: _collision_rate_impl(path, model, **kwargs),
trajectories_paths
)))
return np.mean(average_collision_rates, axis=0)
if __name__ == '__main__':
from nocturne.utils.imitation_learning.waymo_data_loader import ImitationAgent # noqa: F401
model = torch.load('model.pth')
collisions_with_vehicles, collisions_with_road_lines = \
compute_average_collision_rate('dataset/json_files', model=None)
print(f'Average Collision Rate: {100*collisions_with_vehicles:.2f}% with vehicles, '
f'{100*collisions_with_road_lines:.2f}% with road lines')
| 4,539 | 40.651376 | 113 | py |
nocturne | nocturne-main/nocturne/utils/eval/goal_by_intersection.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Goal reaching rate and collision rate computation as a function of number of intersections in expert trajectory."""
from pathlib import Path
import numpy as np
import torch
from collections import defaultdict
import random
import json
from nocturne import Simulation
from cfgs.config import ERR_VAL as INVALID_POSITION
from multiprocessing import Pool
from itertools import repeat, combinations
SIM_N_STEPS = 90 # number of steps per trajectory
GOAL_TOLERANCE = 0.5
def _compute_expert_intersections(trajectory_path):
with open(trajectory_path, 'r') as fp:
data = json.load(fp)
segments = defaultdict(list)
for veh_id, veh in enumerate(data['objects']):
# note: i checked and veh_id is consistent with how it's loaded in simulation
for i in range(len(veh['position']) - 1):
# compute polyline (might not be continuous since we have invalid positions)
segment = np.array([
[veh['position'][i]['x'], veh['position'][i]['y']],
[veh['position'][i + 1]['x'], veh['position'][i + 1]['y']],
])
# if segment doesnt contain an invalid position, append to trajectory
if np.isclose(segment, INVALID_POSITION).any():
continue
segments[veh_id].append(segment)
# go over pair of vehicles and check if their segments intersect
n_collisions = defaultdict(int)
for veh1, veh2 in combinations(segments.keys(), 2):
# get corresponding segments
segments1 = np.array(segments[veh1])
segments2 = np.array(segments[veh2])
# check bounding rectangle intersection - O(n)
xmin1, ymin1 = np.min(np.min(segments1, axis=0), axis=0)
xmax1, ymax1 = np.max(np.max(segments1, axis=0), axis=0)
xmin2, ymin2 = np.min(np.min(segments2, axis=0), axis=0)
xmax2, ymax2 = np.max(np.max(segments2, axis=0), axis=0)
if xmax1 <= xmin2 or xmax2 <= xmin1 or ymax1 <= ymin2 or ymax2 <= ymin1:
# segments can't intersect since their bounding rectangle don't intersect
continue
# check intersection over pairs of segments - O(n^2)
# construct numpy array of shape (N = len(segments1) * len(segments2), 4, 2)
# where each element contain 4 points ABCD (segment AB of segments1 and segment CD of segments2)
idx1 = np.repeat(
np.arange(len(segments1)),
len(segments2)) # build indexes 1 1 1 2 2 2 3 3 3 4 4 4
idx2 = np.tile(np.arange(len(segments2)),
len(segments1)) # build indexes 1 2 3 1 2 3 1 2 3 1 2 3
segment_pairs = np.concatenate(
(segments1[idx1], segments2[idx2]),
axis=1) # concatenate to create all pairs
# now we need to check if at least one element ABCD contains an intersection between segment AB and segment CD
def ccw(A, B, C):
return (C[:, 1] - A[:, 1]) * (B[:, 0] - A[:, 0]) > (
B[:, 1] - A[:, 1]) * (C[:, 0] - A[:, 0])
# ABCD are each arrays of N points (shape (N, 2))
A = segment_pairs[:, 0]
B = segment_pairs[:, 1]
C = segment_pairs[:, 2]
D = segment_pairs[:, 3]
if np.logical_and(
ccw(A, C, D) != ccw(B, C, D),
ccw(A, B, C) != ccw(A, B, D)).any():
n_collisions[veh1] += 1
n_collisions[veh2] += 1
return n_collisions
def _intesection_metrics_impl(trajectory_path, model, configs):
print(trajectory_path)
scenario_config = configs['scenario_cfg']
view_dist = configs['dataloader_cfg']['view_dist']
view_angle = configs['dataloader_cfg']['view_angle']
state_normalization = configs['dataloader_cfg']['state_normalization']
dt = configs['dataloader_cfg']['dt']
n_stacked_states = configs['dataloader_cfg']['n_stacked_states']
state_size = configs['model_cfg']['n_inputs'] // n_stacked_states
state_dict = defaultdict(lambda: np.zeros(state_size * n_stacked_states))
# create model simulation
sim = Simulation(str(trajectory_path), scenario_config)
scenario = sim.getScenario()
vehicles = scenario.getVehicles()
objects = scenario.getObjectsThatMoved()
# set all objects to be expert-controlled
for obj in objects:
obj.expert_control = True
# in model sim, model will control vehicles that moved
controlled_vehicles = [veh for veh in vehicles if veh in objects]
# only control 2 vehicles at random
random.shuffle(controlled_vehicles)
# controlled_vehicles = controlled_vehicles[:2]
# warmup to build up state stacking
for i in range(n_stacked_states - 1):
for veh in controlled_vehicles:
ego_state = scenario.ego_state(veh)
visible_state = scenario.flattened_visible_state(
veh, view_dist=view_dist, view_angle=view_angle)
state = np.concatenate(
(ego_state, visible_state)) / state_normalization
state_dict[veh.getID()] = np.roll(state_dict[veh.getID()],
len(state))
state_dict[veh.getID()][:len(state)] = state
sim.step(dt)
for veh in controlled_vehicles:
veh.expert_control = False
collisions = [False] * len(controlled_vehicles)
goal_achieved = [False] * len(controlled_vehicles)
for i in range(SIM_N_STEPS - n_stacked_states):
for veh in controlled_vehicles:
if np.isclose(veh.position.x, INVALID_POSITION):
veh.expert_control = True
else:
veh.expert_control = False
# set model actions
# get all actions at once
all_states = []
for veh in controlled_vehicles:
# get vehicle state
state = np.concatenate(
(scenario.ego_state(veh),
scenario.flattened_visible_state(
veh, view_dist=view_dist,
view_angle=view_angle))) / state_normalization
# stack state
state_dict[veh.getID()] = np.roll(state_dict[veh.getID()],
len(state))
state_dict[veh.getID()][:len(state)] = state
all_states.append(state_dict[veh.getID()])
all_states = torch.as_tensor(np.array(all_states), dtype=torch.float32)
# compute vehicle actions
all_actions = model(all_states, deterministic=True
) # /!\ this returns an array (2,n) and not (n,2)
accel_actions = all_actions[0].cpu().numpy()
steering_actions = all_actions[1].cpu().numpy()
# set vehicles actions
for veh, accel_action, steering_action in zip(controlled_vehicles,
accel_actions,
steering_actions):
veh.acceleration = accel_action
veh.steering = steering_action
# step simulation
sim.step(dt)
# compute displacements over non-collided vehicles
for i, veh in enumerate(controlled_vehicles):
# make sure it is valid
if np.isclose(veh.position.x, INVALID_POSITION):
continue
# a collision with another a vehicle
if veh.collided and int(veh.collision_type) == 1:
collisions[i] = True
if (veh.position - veh.target_position).norm() < GOAL_TOLERANCE:
goal_achieved[i] = True
# compute expert intersections for all vehicles (mapping veh_id -> nb of intersections in expert traj)
intersection_data = _compute_expert_intersections(trajectory_path)
# compute metrics as a function of number of intersections
collision_rates = np.zeros(4)
goal_rates = np.zeros(4)
counts = np.zeros(4)
for i, veh in enumerate(controlled_vehicles):
n_intersections = min(intersection_data[veh.getID()], 3)
counts[n_intersections] += 1
if collisions[i]:
collision_rates[n_intersections] += 1
if goal_achieved[i]:
goal_rates[n_intersections] += 1
collision_rates /= counts
goal_rates /= counts
# note: returned values can contain NaN
return collision_rates, goal_rates
def compute_metrics_by_intersection(trajectories_dir, model, configs):
"""Compute metrics as a function of number of intesections in a vehicle's expert trajectory."""
NUM_FILES = 200
NUM_CPUS = 14
# get trajectories paths
trajectories_dir = Path(trajectories_dir)
trajectories_paths = list(trajectories_dir.glob('*tfrecord*.json'))
trajectories_paths.sort()
trajectories_paths = trajectories_paths[:NUM_FILES]
# parallel metric computation
with Pool(processes=NUM_CPUS) as pool:
result = np.array(
list(
pool.starmap(
_intesection_metrics_impl,
zip(trajectories_paths, repeat(model), repeat(configs)))))
assert result.shape == (len(trajectories_paths), 2, 4
) # collision rates, goal rates (in 4 bins)
avg_result = np.nanmean(result, axis=0) # nanmean ignores NaN values
print(avg_result)
return avg_result
if __name__ == '__main__':
from examples.imitation_learning.model import ImitationAgent # noqa: F401
model = torch.load(
'/checkpoint/eugenevinitsky/nocturne/test/2022.06.05/test/14.23.17/\
++device=cuda,++file_limit=1000/train_logs/2022_06_05_14_23_23/model_600.pth'
).to('cpu')
model.actions_grids = [x.to('cpu') for x in model.actions_grids]
model.eval()
model.nn[0].eval()
with open(
'/checkpoint/eugenevinitsky/nocturne/test/2022.06.05/test/14.23.17\
/++device=cuda,++file_limit=1000/train_logs/2022_06_05_14_23_23/configs.json',
'r') as fp:
configs = json.load(fp)
configs['device'] = 'cpu'
with torch.no_grad():
result = compute_metrics_by_intersection(
'/checkpoint/eugenevinitsky/waymo_open/motion_v1p1/\
uncompressed/scenario/formatted_json_v2_no_tl_valid',
model=model,
configs=configs)
print('collision rates', result[0])
print('goal rates', result[1])
| 10,583 | 39.707692 | 118 | py |
nocturne | nocturne-main/scripts/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Storage for SLURM running utilities."""
class Overrides(object):
"""Utility class used to convert commands into a bash runnable string."""
def __init__(self):
"""Initialize class."""
self.kvs = dict()
def add(self, key, values):
"""Add each of the desired key value pairs into a dict."""
value = ','.join(str(v) for v in values)
assert key not in self.kvs
self.kvs[key] = value
def cmd(self):
"""Append the keys together into a command that can be run."""
cmd = []
for k, v in self.kvs.items():
cmd.append(f'{k}={v}')
return cmd
| 843 | 30.259259 | 77 | py |
nocturne | nocturne-main/scripts/visualization/visualize_waymo_map.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Plot the text file representation of a protobuf."""
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import pprint
pp = pprint.PrettyPrinter()
data = {}
current = data
file = 'output.txt'
show_tracks = True
parent_keys = []
with open(file, 'r') as f:
lines = f.read().split('\n')
for line in lines:
# print(line)
if ":" in line:
k, v = [x.strip() for x in line.split(':')]
if k in current:
current[k].append(v)
else:
current[k] = [v]
elif "{" in line:
k = line[:-1].strip()
if k not in current:
current[k] = []
parent_keys.append(k)
current[k].append({})
current = current[k][-1]
elif "}" in line:
current = data
for k in parent_keys[:-1]:
current = current[k][-1]
parent_keys = parent_keys[:-1]
else:
pass
# message Scenario:
# https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/protos/scenario.proto
print('\nScenario')
print(data.keys())
# message Track, message ObjectState:
# https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/protos/scenario.proto
print('\nObjects (vehicles, pedestrians, cyclists..)')
print(len(data['tracks']))
print(data['tracks'][0].keys())
print(len(data['tracks'][0]['states']))
print(data['tracks'][0]['states'][0].keys())
# message MapFeature:
# https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/protos/map.proto
print('\nMap (roads, lanes..)')
print(len(data['map_features']))
print(data['map_features'][0].keys())
# supported values are '-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
fig = plt.figure(figsize=(20, 20))
for mf in data['map_features']:
k = list(mf.keys())[1]
assert len(mf[k]) == 1
v = mf[k][0]
if k == 'lane':
xs = []
ys = []
for pt in v['polyline']:
xs.append(float(pt['x'][0]))
ys.append(float(pt['y'][0]))
plt.plot(xs, ys, color='cyan', linewidth=1)
elif k == 'road_line':
edge_type = v['type'][0]
# linestyle = 'solid' if edge_type == 'TYPE_ROAD_EDGE_BOUNDARY' else 'dashdot'
# print(edge_type)
xs = []
ys = []
for pt in v['polyline']:
xs.append(float(pt['x'][0]))
ys.append(float(pt['y'][0]))
plt.plot(xs, ys, color='orange')
elif k == 'road_edge':
edge_type = v['type'][0]
linestyle = 'solid' if edge_type == 'TYPE_ROAD_EDGE_BOUNDARY' else 'dashdot'
xs = []
ys = []
for pt in v['polyline']:
xs.append(float(pt['x'][0]))
ys.append(float(pt['y'][0]))
plt.plot(xs, ys, color='black', linestyle=linestyle)
elif k == 'stop_sign':
pos = v['position'][0]
plt.plot(float(pos['x'][0]), float(pos['y'][0]), 'ro')
elif k == 'crosswalk':
xs = []
ys = []
for pt in v['polygon']:
xs.append(float(pt['x'][0]))
ys.append(float(pt['y'][0]))
plt.plot(xs, ys, color='purple', linestyle=linestyle)
elif k == 'speed_bump':
xs = []
ys = []
for pt in v['polygon']:
xs.append(float(pt['x'][0]))
ys.append(float(pt['y'][0]))
plt.plot(xs, ys, color='green', linestyle=linestyle)
else:
print('Error with key', k)
if show_tracks:
img_arr = []
from celluloid import Camera
camera = Camera(plt.gcf())
ax = plt.gca()
# in range(len(data['tracks'][0]['states'])):
for i in range(20):
for object in data['tracks']:
if object['states'][i]['valid'][0] != 'false':
plt.scatter(float(object['states'][i]['center_x'][0]),
float(object['states'][i]['center_y'][0]),
c='blue',
s=40)
# TODO(eugenevinitsky) this is a horrible way of copying over the figure
lines = list(ax.get_lines())
for obj in lines:
plt.plot(obj.get_data()[0], obj.get_data()[1])
camera.snap()
animation = camera.animate()
animation.save('animation.mp4')
patches = []
patches.append(mpatches.Patch(color='cyan', label='lane_center'))
patches.append(mpatches.Patch(color='orange', label='road_line'))
patches.append(mpatches.Patch(color='black', label='road_edge'))
patches.append(mpatches.Patch(color='red', label='stop_sign'))
patches.append(mpatches.Patch(color='purple', label='crosswalk'))
patches.append(mpatches.Patch(color='green', label='speedbump'))
plt.legend(handles=patches)
plt.savefig(file.split('.')[0] + '.png')
| 5,023 | 31.205128 | 107 | py |
nocturne | nocturne-main/scripts/visualization/waymo_movie.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Make a movie from a random file."""
import os
import hydra
import imageio
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROCESSED_TRAIN_NO_TL, get_scenario_dict, set_display_window
from nocturne import Simulation
@hydra.main(config_path="../../cfgs/", config_name="config")
def main(cfg):
"""See file docstring."""
set_display_window()
_ = plt.figure()
files = os.listdir(PROCESSED_TRAIN_NO_TL)
file = os.path.join(PROCESSED_TRAIN_NO_TL,
files[np.random.randint(len(files))])
sim = Simulation(file, get_scenario_dict(cfg))
frames = []
scenario = sim.getScenario()
for veh in scenario.getVehicles():
veh.expert_control = True
for i in range(90):
img = scenario.getImage(
img_width=1600,
img_height=1600,
draw_target_positions=False,
padding=50.0,
)
frames.append(img)
sim.step(0.1)
movie_frames = np.array(frames)
output_path = f'{os.path.basename(file)}.mp4'
imageio.mimwrite(output_path, movie_frames, fps=30)
print('>', output_path)
if __name__ == '__main__':
main()
| 1,378 | 27.729167 | 84 | py |
nocturne | nocturne-main/scripts/cluster_scripts/run_sample_factory_cluster.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run sample factory experiments on a SLURM cluster."""
import argparse
import os
import pathlib
import shutil
from datetime import datetime
from subprocess import Popen
from cfgs.config import PROJECT_PATH
from scripts.cluster_scripts.utils import Overrides
def make_code_snap(experiment, code_path, str_time):
"""Copy code to directory to ensure that the run launches with correct commit.
Args:
experiment (str): Name of experiment
code_path (str): Path to where we are saving the code.
str_time (str): Unique time identifier used to distinguish
experiments with same name.
Returns
-------
snap_dir (str): path to where the code has been copied.
"""
if len(code_path) > 0:
snap_dir = pathlib.Path(code_path)
else:
snap_dir = pathlib.Path.cwd()
snap_dir /= str_time
snap_dir /= f'{experiment}'
snap_dir.mkdir(exist_ok=True, parents=True)
def copy_dir(dir, pat):
dst_dir = snap_dir / 'code' / dir
dst_dir.mkdir(exist_ok=True, parents=True)
for f in (src_dir / dir).glob(pat):
shutil.copy(f, dst_dir / f.name)
dirs_to_copy = [
'.', './cfgs/', './examples/', './examples/sample_factory_files',
'./cfgs/algorithm', './nocturne/envs/', './nocturne_utils/',
'./nocturne/python/', './scenarios/', './build'
]
src_dir = pathlib.Path(PROJECT_PATH)
for dir in dirs_to_copy:
copy_dir(dir, '*.py')
copy_dir(dir, '*.yaml')
return snap_dir
def main():
"""Launch experiments on SLURM cluster by overriding Hydra config."""
parser = argparse.ArgumentParser()
parser.add_argument('experiment', type=str)
parser.add_argument(
'--code_path',
default='/checkpoint/eugenevinitsky/nocturne/sample_factory_runs')
parser.add_argument('--dry', action='store_true')
args = parser.parse_args()
now = datetime.now()
str_time = now.strftime('%Y.%m.%d_%H%M%S')
snap_dir = make_code_snap(args.experiment, args.code_path, str_time)
overrides = Overrides()
overrides.add('hydra/launcher', ['submitit_slurm'])
overrides.add('hydra.launcher.partition', ['learnlab'])
overrides.add('experiment', [args.experiment])
overrides.add('num_files', [10000])
overrides.add('seed', [0, 1, 2, 3, 4])
overrides.add('scenario.max_visible_road_points', [500])
overrides.add('rew_cfg.collision_penalty', [0, -80.0])
cmd = [
'python',
str(snap_dir / 'code' / 'examples' / 'sample_factory_files' /
'run_sample_factory.py'), '-m', 'algorithm=APPO'
]
print(cmd)
cmd += overrides.cmd()
if args.dry:
print(' '.join(cmd))
else:
env = os.environ.copy()
env['PYTHONPATH'] = str(snap_dir / 'code')
p = Popen(cmd, env=env)
p.communicate()
if __name__ == '__main__':
main()
| 3,119 | 30.836735 | 82 | py |
nocturne | nocturne-main/scripts/cluster_scripts/run_rllib_cluster.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run rllib experiments on a SLURM cluster."""
import argparse
import os
import pathlib
import shutil
from datetime import datetime
from subprocess import Popen
from cfgs.config import PROJECT_PATH
from scripts.utils import Overrides
def make_code_snap(experiment, code_path, str_time):
"""Copy code to directory to ensure that the run launches with correct commit.
Args:
experiment (str): Name of experiment
code_path (str): Path to where we are saving the code.
str_time (str): Unique time identifier used to distinguish
experiments with same name.
Returns
-------
snap_dir (str): path to where the code has been copied.
"""
if len(code_path) > 0:
snap_dir = pathlib.Path(code_path)
else:
snap_dir = pathlib.Path.cwd()
snap_dir /= str_time
snap_dir /= f'{experiment}'
snap_dir.mkdir(exist_ok=True, parents=True)
def copy_dir(dir, pat):
dst_dir = snap_dir / 'code' / dir
dst_dir.mkdir(exist_ok=True, parents=True)
for f in (src_dir / dir).glob(pat):
shutil.copy(f, dst_dir / f.name)
dirs_to_copy = [
'.', './cfgs/', './examples/', './cfgs/algorithm', './envs/',
'./nocturne_utils/', './python/', './scenarios/', './build'
]
src_dir = pathlib.Path(PROJECT_PATH)
for dir in dirs_to_copy:
copy_dir(dir, '*.py')
copy_dir(dir, '*.yaml')
return snap_dir
def main():
"""Launch experiments on SLURM cluster by overriding Hydra config."""
username = os.environ["USER"]
parser = argparse.ArgumentParser()
parser.add_argument('experiment', type=str)
parser.add_argument(
'--code_path',
default=f'/checkpoint/{username}/nocturne/sample_factory_runs')
parser.add_argument('--dry', action='store_true')
args = parser.parse_args()
now = datetime.now()
str_time = now.strftime('%Y.%m.%d_%H%M%S')
snap_dir = make_code_snap(args.experiment, args.code_path, str_time)
overrides = Overrides()
overrides.add('hydra/launcher', ['ray'])
overrides.add('hydra.launcher.partition', ['learnlab'])
cmd = [
'python',
str(snap_dir / 'code' / 'examples' / 'run_rllib.py'), '-m'
]
cmd += overrides.cmd()
print(cmd)
if args.dry:
print(' '.join(cmd))
else:
env = os.environ.copy()
env['PYTHONPATH'] = str(snap_dir / 'code')
p = Popen(cmd, env=env)
p.communicate()
if __name__ == '__main__':
main()
| 2,735 | 28.73913 | 82 | py |
nocturne | nocturne-main/scripts/cluster_scripts/utils.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Storage for SLURM running utilities."""
class Overrides(object):
"""Utility class used to convert commands into a bash runnable string."""
def __init__(self):
"""Initialize class."""
self.kvs = dict()
def add(self, key, values):
"""Add each of the desired key value pairs into a dict."""
value = ','.join(str(v) for v in values)
assert key not in self.kvs
self.kvs[key] = value
def cmd(self):
"""Append the keys together into a command that can be run."""
cmd = []
for k, v in self.kvs.items():
cmd.append(f'{k}={v}')
return cmd
| 843 | 30.259259 | 77 | py |
nocturne | nocturne-main/scripts/cluster_scripts/run_ppo_cluster.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run on-policy PPO experiments on a SLURM cluster."""
import argparse
import os
import pathlib
import shutil
from datetime import datetime
from subprocess import Popen
from cfgs.config import PROJECT_PATH
from scripts.cluster_scripts.utils import Overrides
def make_code_snap(experiment, code_path, slurm_dir='exp'):
"""Copy code to directory to ensure that the run launches with correct commit.
Args:
experiment (str): Name of experiment
code_path (str): Path to where we are saving the code.
str_time (str): Unique time identifier used to distinguish
experiments with same name.
Returns
-------
snap_dir (str): path to where the code has been copied.
"""
now = datetime.now()
if len(code_path) > 0:
snap_dir = pathlib.Path(code_path) / slurm_dir
else:
snap_dir = pathlib.Path.cwd() / slurm_dir
snap_dir /= now.strftime('%Y.%m.%d')
snap_dir /= now.strftime('%H%M%S') + f'_{experiment}'
snap_dir.mkdir(exist_ok=True, parents=True)
def copy_dir(dir, pat):
dst_dir = snap_dir / 'code' / dir
dst_dir.mkdir(exist_ok=True, parents=True)
for f in (src_dir / dir).glob(pat):
shutil.copy(f, dst_dir / f.name)
dirs_to_copy = [
'.', './cfgs/', './cfgs/algo', './algos/', './algos/ppo/',
'./algos/ppo/ppo_utils', './algos/ppo/r_mappo',
'./algos/ppo/r_mappo/algorithm', './algos/ppo/utils',
'.nocturne/envs/', './nocturne_utils/', '.nocturne/python/', './build'
]
src_dir = pathlib.Path(os.path.dirname(os.getcwd()))
for dir in dirs_to_copy:
copy_dir(dir, '*.py')
copy_dir(dir, '*.yaml')
return snap_dir
def main():
"""Launch experiments on SLURM cluster by overriding Hydra config."""
parser = argparse.ArgumentParser()
parser.add_argument('experiment', type=str)
parser.add_argument('--code_path',
default='/checkpoint/eugenevinitsky/nocturne')
parser.add_argument('--dry', action='store_true')
args = parser.parse_args()
snap_dir = make_code_snap(args.experiment, args.code_path)
print(str(snap_dir))
overrides = Overrides()
overrides.add('hydra/launcher', ['submitit_slurm'])
overrides.add('hydra.launcher.partition', ['learnlab'])
overrides.add('experiment', [args.experiment])
# experiment parameters
overrides.add('episode_length', [200])
# algo
overrides.add('algo', ['ppo'])
overrides.add('algo.entropy_coef', [-0.001, 0.0, 0.001])
overrides.add('algo.n_rollout_threads', [128])
# rewards
overrides.add('rew_cfg.goal_achieved_bonus', [10, 50])
# misc
overrides.add('scenario_path',
[PROJECT_PATH / 'scenarios/twenty_car_intersection.json'])
cmd = [
'python',
str(snap_dir / 'code' / 'algos' / 'ppo' / 'nocturne_runner.py'), '-m'
]
print(cmd)
cmd += overrides.cmd()
if args.dry:
print(' '.join(cmd))
else:
env = os.environ.copy()
env['PYTHONPATH'] = str(snap_dir / 'code')
p = Popen(cmd, env=env)
p.communicate()
if __name__ == '__main__':
main()
| 3,396 | 31.663462 | 82 | py |
nocturne | nocturne-main/scripts/cluster_scripts/run_imitation_cluster.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run sample factory experiments on a SLURM cluster."""
import argparse
import os
import pathlib
import shutil
from datetime import datetime
from subprocess import Popen
from cfgs.config import PROJECT_PATH
from scripts.cluster_scripts.utils import Overrides
def make_code_snap(experiment, code_path, str_time):
"""Copy code to directory to ensure that the run launches with correct commit.
Args:
experiment (str): Name of experiment
code_path (str): Path to where we are saving the code.
str_time (str): Unique time identifier used to distinguish
experiments with same name.
Returns
-------
snap_dir (str): path to where the code has been copied.
"""
if len(code_path) > 0:
snap_dir = pathlib.Path(code_path)
else:
snap_dir = pathlib.Path.cwd()
snap_dir /= str_time
snap_dir /= f'{experiment}'
snap_dir.mkdir(exist_ok=True, parents=True)
def copy_dir(dir, pat):
dst_dir = snap_dir / 'code' / dir
dst_dir.mkdir(exist_ok=True, parents=True)
for f in (src_dir / dir).glob(pat):
shutil.copy(f, dst_dir / f.name)
dirs_to_copy = [
'.', './cfgs/', './cfgs/algorithm', './cfgs/imitation',
'./nocturne/envs/', './nocturne/pybind11',
'.examples/imitation_learning', './build'
]
src_dir = pathlib.Path(PROJECT_PATH)
for dir in dirs_to_copy:
copy_dir(dir, '*.py')
copy_dir(dir, '*.yaml')
return snap_dir
def main():
"""Launch experiments on SLURM cluster by overriding Hydra config."""
username = os.environ["USER"]
parser = argparse.ArgumentParser()
parser.add_argument('experiment', type=str)
parser.add_argument('--code_path',
default=f'/checkpoint/{username}/nocturne/il_runs')
parser.add_argument('--dry', action='store_true')
args = parser.parse_args()
now = datetime.now()
str_time = now.strftime('%Y.%m.%d_%H%M%S')
snap_dir = make_code_snap(args.experiment, args.code_path, str_time)
overrides = Overrides()
overrides.add('hydra/launcher', ['submitit_slurm'])
overrides.add('hydra.launcher.partition', ['learnlab'])
overrides.add('experiment', [args.experiment])
overrides.add('num_files', [1000])
overrides.add('epochs', [1400])
overrides.add('seed', [0, 1, 2, 3, 4])
cmd = [
'python',
str(snap_dir / 'code' / 'nocturne' / 'utils' / 'imitation_learning' /
'train.py'), '-m'
]
print(cmd)
cmd += overrides.cmd()
if args.dry:
print(' '.join(cmd))
else:
env = os.environ.copy()
env['PYTHONPATH'] = str(snap_dir / 'code')
p = Popen(cmd, env=env)
p.communicate()
if __name__ == '__main__':
main()
| 3,004 | 29.663265 | 82 | py |
nocturne | nocturne-main/scripts/data_analysis/data_analysis.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utils that we use to understand the datasets we are working with."""
import os
import hydra
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROJECT_PATH, get_scenario_dict
from nocturne import Simulation
def run_analysis(cfg, files):
"""Compute the expert accelerations and number of vehicles across the dataset.
Args:
files ([str]): List of files to analyze
Returns
-------
[np.float], [np.float]: List of expert accels, list of number
of moving vehicles in file
"""
observed_accels = []
num_vehicles = []
cfg['start_time'] = 0
cfg['allow_non_vehicles'] = False
for file_idx, file in enumerate(files):
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
get_scenario_dict(cfg))
vehs = sim.scenario().getObjectsThatMoved()
# this checks if the vehicles has actually moved any distance at all
valid_vehs = []
prev_speeds = []
for veh in vehs:
veh.expert_control = True
obj_pos = veh.position
goal_pos = veh.target_position
if (obj_pos - goal_pos).norm() > 0.5:
valid_vehs.append(veh)
if veh in valid_vehs:
veh_speed = sim.scenario().getExpertSpeeds(0, veh.id)
veh_speed = np.linalg.norm([veh_speed.x, veh_speed.y])
if not np.isclose(veh.position.x, -10000.0):
prev_speeds.append(
(veh_speed, True, [veh.position.x, veh.position.y], 0))
else:
prev_speeds.append(
(veh_speed, False, [veh.position.x,
veh.position.y], 0))
num_vehicles.append(len(valid_vehs))
sim.step(0.1)
for i in range(1, 90):
for veh_index, veh in enumerate(valid_vehs):
# check if the vehicle is actually valid
veh_speed = sim.scenario().getExpertSpeeds(i, veh.id)
veh_speed = veh_speed.norm()
if np.isclose(veh.position.x, -10000.0):
prev_speeds[veh_index] = (veh_speed, False,
[veh.position.x,
veh.position.y], i)
else:
# approximate the accel using an euler step but only
# if the prior step was a step where the agent
# was valid
if prev_speeds[veh_index][1]:
accel = (veh_speed - prev_speeds[veh_index][0]) / 0.1
observed_accels.append(accel)
prev_speeds[veh_index] = (veh_speed, True,
[veh.position.x,
veh.position.y], i)
sim.step(0.1)
if file_idx > 300:
break
return observed_accels, num_vehicles
@hydra.main(config_path="../../cfgs/", config_name="config")
def analyze_accels(cfg):
"""Plot the expert accels and number of observed moving vehicles."""
f_path = PROCESSED_TRAIN_NO_TL
with open(os.path.join(f_path, 'valid_files.txt')) as file:
files = [line.strip() for line in file]
observed_accels_valid, num_vehicles_valid = run_analysis(cfg, files)
with open(os.path.join(f_path, 'invalid_files.txt')) as file:
files = [line.strip() for line in file]
_, num_vehicles_invalid = run_analysis(cfg, files)
output_path = os.path.join(PROJECT_PATH, 'nocturne_utils/data_analysis')
if not os.path.exists(output_path):
os.makedirs(output_path)
observed_accels = np.array(observed_accels_valid)
print(np.max(observed_accels))
print(np.min(observed_accels))
observed_accels = observed_accels[np.abs(observed_accels) < 5]
plt.figure()
plt.hist(observed_accels)
plt.savefig(os.path.join(output_path, 'observed_accels.png'))
plt.figure()
plt.hist(
num_vehicles_valid,
bins=30,
density=True,
histtype='step',
cumulative=True,
)
plt.hist(
num_vehicles_invalid,
bins=30,
density=True,
histtype='step',
cumulative=True,
)
plt.legend(['valid', 'invalid'])
plt.savefig(os.path.join(output_path, 'num_vehs_cdf.png'))
plt.figure()
plt.hist(num_vehicles_valid, bins=30, alpha=0.5, color='b')
plt.axvline(np.mean(num_vehicles_valid), color='b', label='_nolegend_')
plt.hist(num_vehicles_invalid, bins=30, alpha=0.5, color='r')
plt.axvline(np.mean(num_vehicles_invalid), color='r', label='_nolegend_')
plt.legend(['valid', 'invalid'])
plt.savefig(os.path.join(output_path, 'num_vehs_hist.png'))
if __name__ == '__main__':
analyze_accels()
| 5,126 | 38.137405 | 82 | py |
nocturne | nocturne-main/scripts/data_analysis/speed_test.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utils that we use to understand the datasets we are working with."""
import json
import os
import time
import hydra
import numpy as np
from cfgs.config import PROCESSED_TRAIN_NO_TL, get_scenario_dict, set_display_window
from nocturne import Simulation, Action
def run_speed_test(files, cfg):
"""Compute the expert accelerations and number of vehicles across the dataset.
Args:
files ([str]): List of files to analyze
Returns
-------
[np.float], [np.float]: List of expert accels, list of number
of moving vehicles in file
"""
times_list = []
for file in files:
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
get_scenario_dict(cfg))
vehs = sim.scenario().getObjectsThatMoved()
scenario = sim.getScenario()
veh = vehs[np.random.randint(len(vehs))]
t = time.perf_counter()
_ = scenario.flattened_visible_state(veh, 80, (180 / 180) * np.pi)
veh.apply_action(Action(1.0, 1.0, 1.0))
sim.step(0.1)
times_list.append(time.perf_counter() - t)
print('avg, std. time to get obs is {}, {}'.format(np.mean(times_list),
np.std(times_list)))
@hydra.main(config_path="../../cfgs/", config_name="config")
def analyze_accels(cfg):
"""Plot the expert accels and number of observed moving vehicles."""
f_path = PROCESSED_TRAIN_NO_TL
with open(os.path.join(f_path, 'valid_files.json')) as file:
valid_veh_dict = json.load(file)
files = list(valid_veh_dict.keys())
run_speed_test(files[0:10], cfg)
if __name__ == '__main__':
set_display_window()
analyze_accels()
| 1,937 | 33 | 84 | py |
nocturne | nocturne-main/scripts/data_analysis/corner_case_search.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run through the data to look for cases where there are undesirable corner cases.
The cases we currently check for are:
1) is a vehicle initialized in a colliding state with another vehicle
2) is a vehicle initialized in a colliding state with a road edge?
"""
from copy import deepcopy
from pathlib import Path
import os
import sys
import hydra
import imageio
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROJECT_PATH, \
get_scenario_dict, set_display_window
from nocturne import Simulation
@hydra.main(config_path="../../cfgs/", config_name="config")
def main(cfg):
"""See file docstring."""
set_display_window()
SAVE_IMAGES = False
MAKE_MOVIES = False
output_folder = 'corner_case_vis'
output_path = Path(PROJECT_PATH) / f'nocturne_utils/{output_folder}'
output_path.mkdir(exist_ok=True)
files = list(os.listdir(PROCESSED_TRAIN_NO_TL))
files = [file for file in files if 'tfrecord' in file]
# track the number of collisions at each time-step
collide_counter = np.zeros((2, 90))
file_has_veh_collision_counter = 0
file_has_edge_collision_counter = 0
total_edge_collision_counter = 0
total_veh_collision_counter = 0
initialized_collision_counter = 0
total_veh_counter = 0
start_cfg = deepcopy(cfg)
start_cfg['scenario']['start_time'] = 0
start_cfg['scenario']['allow_non_vehicles'] = False
for file_idx, file in enumerate(files):
found_collision = False
edge_collision = False
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
get_scenario_dict(cfg))
vehs = sim.getScenario().getObjectsThatMoved()
# this checks if the vehicles has actually moved any distance at all
valid_vehs = []
for veh in vehs:
veh.expert_control = True
obj_pos = veh.getPosition()
obj_pos = np.array([obj_pos.x, obj_pos.y])
goal_pos = veh.getGoalPosition()
goal_pos = np.array([goal_pos.x, goal_pos.y])
if np.linalg.norm(obj_pos - goal_pos) > 0.5:
valid_vehs.append(veh)
veh_edge_collided = [False for _ in vehs]
veh_veh_collided = [False for _ in vehs]
initialized_collided = [False for _ in vehs]
for time_index in range(90):
for veh_index, veh in enumerate(valid_vehs):
collided = veh.getCollided()
if collided and not np.isclose(veh.getPosition().x, -10000.0):
collide_counter[int(veh.collision_type) - 1,
time_index] += 1
if int(veh.collision_type) == 2:
veh_edge_collided[veh_index] = True
if int(veh.collision_type) == 1:
veh_veh_collided[veh_index] = True
if time_index == 0:
initialized_collided[veh_index] = True
if np.isclose(veh.getPosition().x, -10000.0):
collided = False
if time_index == 0 and not found_collision and collided and SAVE_IMAGES:
img = sim.getScenario().getImage(
img_width=1600,
img_height=1600,
draw_target_positions=True,
padding=50.0,
)
fig = plt.figure()
plt.imshow(img)
plt.savefig(f'{output_folder}/{file}.png')
plt.close(fig)
if not found_collision and collided:
found_collision = True
if int(veh.collision_type) == 1:
file_has_veh_collision_counter += 1
else:
file_has_edge_collision_counter += 1
edge_collision = True
sim.step(0.1)
total_veh_counter += len(valid_vehs)
total_edge_collision_counter += np.sum(veh_edge_collided)
total_veh_collision_counter += np.sum(veh_veh_collided)
initialized_collision_counter += np.sum(initialized_collided)
print(f'at file {file_idx} we have {collide_counter} collisions for a\
ratio of {collide_counter / (file_idx + 1)}')
print(f'the number of files that have a veh collision at all is\
{file_has_veh_collision_counter / (file_idx + 1)}')
print(f'the number of files that have a edge collision at all is\
{file_has_edge_collision_counter / (file_idx + 1)}')
print(f'the fraction of vehicles that have had an edge collision\
is {total_edge_collision_counter / total_veh_counter}')
print(f'the fraction of vehicles that have had a collision at all\
is {(total_edge_collision_counter + total_veh_collision_counter) / total_veh_counter}'
)
print(
f'the fraction of vehicles that are initialized in collision are \
{initialized_collision_counter / total_veh_counter}')
if found_collision and edge_collision and MAKE_MOVIES:
movie_frames = []
fig = plt.figure()
sim = Simulation(os.path.join(PROCESSED_TRAIN_NO_TL, file),
get_scenario_dict(start_cfg))
vehs = sim.getScenario().getObjectsThatMoved()
for veh in vehs:
veh.expert_control = True
for time_index in range(89):
movie_frames.append(sim.getScenario().getImage(
img_width=1600, img_height=1600))
sim.step(0.1)
movie_frames = np.array(movie_frames)
imageio.mimwrite(f'{output_path}/{os.path.basename(file)}.mp4',
movie_frames,
fps=10)
if file_has_edge_collision_counter + file_has_veh_collision_counter > 10:
sys.exit()
if __name__ == '__main__':
main()
| 6,273 | 43.496454 | 102 | py |
nocturne | nocturne-main/scripts/json_generation/make_solvable_files.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Find all cases where collisions are required to achieve the goal.
Due to errors in Waymo labeling, some space that is crossable is mistakenly
labeled as a road edge. This file finds most of those cases.
"""
import argparse
import json
import multiprocessing
from multiprocessing import Process, Lock
import os
import numpy as np
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROCESSED_VALID_NO_TL, \
get_default_scenario_dict, set_display_window
from nocturne import Simulation
def is_file_valid(file_list, output_file, output_file_invalid, lock=None):
"""Test if file requires an agent to collide with a road edge to get to goal.
We test for this by making the agent have very thin width. If an agent
is in collision with a road edge despite this thin width, it was crossing
that road edge because that road edge was on the way to its goal. We also
shrink the length to avoid the cases where the vehicle is initialized
in collision with a road edge.
If a file has more than 80% of the agents need to collide with a road edge to get
to goal, we store it in output_file_invalid instead.
Args
----
file_list ([str]): list of file paths.
output_file (str): file to store valid json names.
output_file_invalid (_type_): file to store invalid json names.
lock (Lock, optional): Lock used for safe file writing.
"""
file_valid_dict = {}
file_invalid_dict = {}
cfg = get_default_scenario_dict()
cfg['start_time'] = 0
cfg['allow_non_vehicles'] = False
for i, file in enumerate(file_list):
sim = Simulation(str(file), cfg)
vehs = sim.scenario().getObjectsThatMoved()
for veh in vehs:
# we shrink the vehicle width and length to tiny values.
# then, if a vehicle collides with a road edge, we know it had to
# cross that road edge to actually get to its goal
veh._scale_shape(length_scale=0.3, width_scale=0.1)
veh.expert_control = True
# dict tracking which vehicles were forced to collide with
# an edge on their way to goal
veh_edge_collided = {veh.id: False for veh in vehs}
for _ in range(90):
for veh in vehs:
collided = veh.collided
# the second conditions check whether the
# the vehicle has "collided", but only because
# it was invalid at the same time as another
# vehicle was invalid
if collided and not np.isclose(veh.position.x, -10000.0):
if int(veh.collision_type) == 2:
veh_edge_collided[veh.id] = True
sim.step(0.1)
# write all the vehicle ids that had a collision to a file
# so that we know which vehicles should be set to be experts
# if more than 80% of the vehicles are experts, we throw the file
# away
if np.sum(list(
veh_edge_collided.values())) / len(veh_edge_collided) < 0.8:
storage = file_valid_dict
else:
storage = file_invalid_dict
storage[str(file).split('/')[-1]] = [
key for key, val in veh_edge_collided.items() if val
]
for file, return_dict in zip([output_file, output_file_invalid],
[file_valid_dict, file_invalid_dict]):
if lock is not None:
lock.acquire()
with open(file, 'r') as fp:
temp_dict = json.load(fp)
with open(file, 'w') as fp:
temp_dict.update(return_dict)
json.dump(temp_dict, fp, indent=4)
if lock is not None:
lock.release()
def main():
"""See file docstring."""
set_display_window()
parser = argparse.ArgumentParser(
description="Load and show waymo scenario data.")
parser.add_argument(
"--parallel",
action='store_true',
help="If true, split the conversion up over multiple processes")
parser.add_argument(
"--n_processes",
type=int,
default=40,
help="Number of processes over which to split file generation")
parser.add_argument("--datatype",
default='train',
type=str,
choices=['train', 'valid'],
nargs='+',
help="Whether to convert, train or valid data")
args = parser.parse_args()
# TODO(eugenevinitsky) this currently assumes that we have
# constructed the scenes without traffic lights and not
# other scenes
folders_to_convert = []
if 'train' in args.datatype:
folders_to_convert.append(PROCESSED_TRAIN_NO_TL)
if 'valid' in args.datatype:
folders_to_convert.append(PROCESSED_VALID_NO_TL)
lock = Lock()
for folder_path in folders_to_convert:
files = os.listdir(folder_path)
files = [
os.path.join(folder_path, file) for file in files
if 'tfrecord' in file
]
output_file = os.path.join(folder_path, 'valid_files.json')
with open(output_file, 'w') as fp:
json.dump({}, fp)
output_file_invalid = os.path.join(folder_path, 'invalid_files.json')
with open(output_file_invalid, 'w') as fp:
json.dump({}, fp)
if args.parallel:
# leave some cpus free but have at least one and don't use more than n_processes
num_cpus = min(max(multiprocessing.cpu_count() - 2, 1),
args.n_processes)
num_files = len(files)
process_list = []
for i in range(num_cpus):
p = Process(target=is_file_valid,
args=[
files[i * num_files // num_cpus:(i + 1) *
num_files // num_cpus], output_file,
output_file_invalid, lock
])
p.start()
process_list.append(p)
for process in process_list:
process.join()
else:
is_file_valid(files, output_file, output_file_invalid, lock=None)
if __name__ == '__main__':
main()
| 6,495 | 37.898204 | 92 | py |
nocturne | nocturne-main/scripts/json_generation/waymo_scenario_construction.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Construct a scenarios.json file from a waymos protobuf."""
from collections import defaultdict
import math
import json
from typing import Any, Dict, Iterator, Optional
import tensorflow as tf
from waymo_open_dataset.protos import map_pb2, scenario_pb2
from cfgs.config import ERR_VAL
_WAYMO_OBJECT_STR = {
scenario_pb2.Track.TYPE_UNSET: "unset",
scenario_pb2.Track.TYPE_VEHICLE: "vehicle",
scenario_pb2.Track.TYPE_PEDESTRIAN: "pedestrian",
scenario_pb2.Track.TYPE_CYCLIST: "cyclist",
scenario_pb2.Track.TYPE_OTHER: "other",
}
_WAYMO_ROAD_STR = {
map_pb2.TrafficSignalLaneState.LANE_STATE_UNKNOWN: "unknown",
map_pb2.TrafficSignalLaneState.LANE_STATE_ARROW_STOP: "arrow_stop",
map_pb2.TrafficSignalLaneState.LANE_STATE_ARROW_CAUTION: "arrow_caution",
map_pb2.TrafficSignalLaneState.LANE_STATE_ARROW_GO: "arrow_go",
map_pb2.TrafficSignalLaneState.LANE_STATE_STOP: "stop",
map_pb2.TrafficSignalLaneState.LANE_STATE_CAUTION: "caution",
map_pb2.TrafficSignalLaneState.LANE_STATE_GO: "go",
map_pb2.TrafficSignalLaneState.LANE_STATE_FLASHING_STOP: "flashing_stop",
map_pb2.TrafficSignalLaneState.LANE_STATE_FLASHING_CAUTION:
"flashing_caution",
}
def _parse_object_state(
states: scenario_pb2.ObjectState,
final_state: scenario_pb2.ObjectState) -> Dict[str, Any]:
"""Construct a dict representing the trajectory and goals of an object.
Args:
states (scenario_pb2.ObjectState): Protobuf of object state
final_state (scenario_pb2.ObjectState): Protobuf of last valid object state.
Returns
-------
Dict[str, Any]: Dict representing an object.
"""
return {
"position": [{
"x": state.center_x,
"y": state.center_y
} if state.valid else {
"x": ERR_VAL,
"y": ERR_VAL
} for state in states],
"width":
final_state.width,
"length":
final_state.length,
"heading": [
math.degrees(state.heading) if state.valid else ERR_VAL
for state in states
], # Use rad here?
"velocity": [{
"x": state.velocity_x,
"y": state.velocity_y
} if state.valid else {
"x": ERR_VAL,
"y": ERR_VAL
} for state in states],
"valid": [state.valid for state in states],
"goalPosition": {
"x": final_state.center_x,
"y": final_state.center_y
}
}
def _init_tl_object(track):
"""Construct a dict representing the traffic light states."""
returned_dict = {}
for lane_state in track.lane_states:
returned_dict[lane_state.lane] = {
'state': _WAYMO_ROAD_STR[lane_state.state],
'x': lane_state.stop_point.x,
'y': lane_state.stop_point.y
}
return returned_dict
def _init_object(track: scenario_pb2.Track) -> Optional[Dict[str, Any]]:
"""Construct a dict representing the state of the object (vehicle, cyclist, pedestrian).
Args:
track (scenario_pb2.Track): protobuf representing the scenario
Returns
-------
Optional[Dict[str, Any]]: dict representing the trajectory and velocity of an object.
"""
final_valid_index = 0
for i, state in enumerate(track.states):
if state.valid:
final_valid_index = i
obj = _parse_object_state(track.states, track.states[final_valid_index])
obj["type"] = _WAYMO_OBJECT_STR[track.object_type]
return obj
def _init_road(map_feature: map_pb2.MapFeature) -> Optional[Dict[str, Any]]:
"""Convert an element of the map protobuf to a dict representing its coordinates and type."""
feature = map_feature.WhichOneof("feature_data")
if feature == 'stop_sign':
p = getattr(map_feature,
map_feature.WhichOneof("feature_data")).position
geometry = [{"x": p.x, "y": p.y}]
elif feature != 'crosswalk' and feature != 'speed_bump':
geometry = [{
"x": p.x,
"y": p.y
} for p in getattr(map_feature, map_feature.WhichOneof(
"feature_data")).polyline]
else:
geometry = [{
"x": p.x,
"y": p.y
} for p in getattr(map_feature, map_feature.WhichOneof(
"feature_data")).polygon]
return {
"geometry": geometry,
"type": map_feature.WhichOneof("feature_data"),
}
def load_protobuf(protobuf_path: str) -> Iterator[scenario_pb2.Scenario]:
"""Yield the sharded protobufs from the TFRecord."""
dataset = tf.data.TFRecordDataset(protobuf_path, compression_type="")
for data in dataset:
scenario = scenario_pb2.Scenario()
scenario.ParseFromString(bytearray(data.numpy()))
yield scenario
def waymo_to_scenario(scenario_path: str,
protobuf: scenario_pb2.Scenario,
no_tl: bool = False) -> None:
"""Dump a JSON File containing the protobuf parsed into the right format.
Args
----
scenario_path (str): path to dump the json file
protobuf (scenario_pb2.Scenario): the protobuf we are converting
no_tl (bool, optional): If true, environments with traffic lights are not dumped.
"""
# read the protobuf file to get the right state
# write the json file
# construct the road geometries
# place the initial position of the vehicles
# Construct the traffic light states
tl_dict = defaultdict(lambda: {
'state': [],
'x': [],
'y': [],
'time_index': []
})
all_keys = ['state', 'x', 'y']
i = 0
for dynamic_map_state in protobuf.dynamic_map_states:
traffic_light_dict = _init_tl_object(dynamic_map_state)
# there is a traffic light but we don't want traffic light scenes so just return
if (no_tl and len(traffic_light_dict) > 0):
return
for id, value in traffic_light_dict.items():
for state_key in all_keys:
tl_dict[id][state_key].append(value[state_key])
tl_dict[id]['time_index'].append(i)
i += 1
# Construct the object states
objects = []
for track in protobuf.tracks:
obj = _init_object(track)
if obj is not None:
objects.append(obj)
# Construct the map states
roads = []
for map_feature in protobuf.map_features:
road = _init_road(map_feature)
if road is not None:
roads.append(road)
scenario = {
"name": scenario_path.split('/')[-1],
"objects": objects,
"roads": roads,
"tl_states": tl_dict
}
with open(scenario_path, "w") as f:
json.dump(scenario, f)
| 6,947 | 32.403846 | 97 | py |
nocturne | nocturne-main/scripts/json_generation/run_waymo_constructor.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utils for converting TFRecords into Nocturne compatible JSON."""
import argparse
from pathlib import Path
import os
import multiprocessing
from cfgs.config import TRAIN_DATA_PATH, VALID_DATA_PATH, PROCESSED_TRAIN_NO_TL, \
PROCESSED_VALID_NO_TL, PROCESSED_TRAIN, PROCESSED_VALID
import waymo_scenario_construction as waymo
def convert_files(args, files, output_dir, rank):
"""Convert the list of files into nocturne compatible JSON.
Args
----
args (NameSpace): args from the argument parser.
files ([str]): list of file paths for TFRecords that we should convert
output_dir (str): output path in which we should store the JSON
rank (int): rank of the process.
"""
cnt = 0
for file in files:
inner_count = 0
for data in waymo.load_protobuf(str(file)):
file_name = os.path.basename(file).split(
'.')[1] + f'_{inner_count}.json'
# this file is useful for debugging
if args.output_txt and cnt == 0 and rank == 0:
with open(os.path.basename(file).split('.')[1] + '.txt',
'w') as f:
f.write(str(data))
waymo.waymo_to_scenario(os.path.join(output_dir, file_name), data,
args.no_tl)
inner_count += 1
cnt += 1
if cnt >= args.num and not args.all_files:
break
print(inner_count)
def main():
"""Run the json generators."""
parser = argparse.ArgumentParser(
description="Load and show waymo scenario data.")
parser.add_argument("--file",
type=str,
default=os.path.join(
TRAIN_DATA_PATH,
'training.tfrecord-00995-of-01000'))
parser.add_argument("--num", type=int, default=1)
parser.add_argument("--output_txt",
action='store_true',
help='output a txt version of one of the protobufs')
parser.add_argument("--all_files",
action='store_true',
help='If true, iterate through the whole dataset')
parser.add_argument("--no_tl",
action='store_true',
help="If true, do not generate JSON files\
that have a traffic light in them")
parser.add_argument(
"--parallel",
action='store_true',
help="If true, split the conversion up over multiple processes")
parser.add_argument("--datatype",
default='train',
type=str,
choices=['train', 'valid'],
nargs='+',
help="Whether to convert, train or valid data")
args = parser.parse_args()
folders_to_convert = []
if 'train' in args.datatype:
folders_to_convert.append(
(TRAIN_DATA_PATH,
PROCESSED_TRAIN_NO_TL if args.no_tl else PROCESSED_TRAIN))
if 'valid' in args.datatype:
folders_to_convert.append(
(VALID_DATA_PATH,
PROCESSED_VALID_NO_TL if args.no_tl else PROCESSED_VALID))
for folder_path, output_dir in folders_to_convert:
if args.num > 1 or args.all_files:
files = list(Path(folder_path).glob('*tfrecord*'))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not args.all_files:
files = files[0:args.num]
else:
output_dir = os.getcwd()
files = [args.file]
if args.parallel:
# leave some cpus free but have at least one and don't use more than 40
num_cpus = min(max(multiprocessing.cpu_count() - 2, 1), 40)
num_files = len(files)
process_list = []
for i in range(num_cpus):
p = multiprocessing.Process(
target=convert_files,
args=[
args, files[i * num_files // num_cpus:(i + 1) *
num_files // num_cpus], output_dir, i
])
p.start()
process_list.append(p)
for process in process_list:
process.join()
else:
convert_files(args, files, output_dir, rank=0)
if __name__ == "__main__":
main()
| 4,662 | 36.910569 | 83 | py |
nocturne | nocturne-main/scripts/paper_plots/eval_sample_factory.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run a policy over the entire train set.
TODO(ev) refactor, this is wildly similar to visualize_sample_factory
"""
from copy import deepcopy
from collections import deque, defaultdict
import itertools
from itertools import repeat
import json
import multiprocessing as mp
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from sample_factory.algorithms.appo.actor_worker import transform_dict_observations
from sample_factory.algorithms.appo.learner import LearnerWorker
from sample_factory.algorithms.appo.model import create_actor_critic
from sample_factory.algorithms.appo.model_utils import get_hidden_size
from sample_factory.algorithms.utils.action_distributions import ContinuousActionDistribution, \
CategoricalActionDistribution
from sample_factory.algorithms.utils.arguments import load_from_checkpoint
from sample_factory.algorithms.utils.multi_agent_wrapper import MultiAgentWrapper, is_multiagent_env
from sample_factory.envs.create_env import create_env
from sample_factory.utils.utils import log, AttrDict
from examples.sample_factory_files.run_sample_factory import register_custom_components
from cfgs.config import PROCESSED_VALID_NO_TL, PROCESSED_TRAIN_NO_TL, \
ERR_VAL, set_display_window
CB_color_cycle = [
'#377eb8', '#ff7f00', '#4daf4a', '#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00'
]
class Bunch(object):
"""Converts a dict into an object with the keys as attributes."""
def __init__(self, adict):
self.__dict__.update(adict)
def ccw(A, B, C):
"""Blah."""
return (C[1] - A[1]) * (B[0] - A[0]) > (B[1] - A[1]) * (C[0] - A[0])
def intersect(A, B, C, D):
"""Check if two line segments AB and CD intersect."""
return ccw(A, C, D) != ccw(B, C, D) and ccw(A, B, C) != ccw(A, B, D)
def poly_intersection(poly1, poly2):
"""Compute if two polylines intersect."""
for i, p1_first_point in enumerate(poly1[:-1]):
p1_second_point = poly1[i + 1]
for j, p2_first_point in enumerate(poly2[:-1]):
p2_second_point = poly2[j + 1]
if intersect(p1_first_point, p1_second_point, p2_first_point,
p2_second_point):
return True
return False
def run_rollouts(env,
cfg,
device,
expert_trajectory_dict,
distance_bins,
intersection_bins,
veh_intersection_dict,
actor_1,
actor_2=None):
"""Run a single rollout.
Args:
env (_type_): Env we are running.
cfg (dict): dictionary configuring the environment.
device (str): device you want to run the model on
expert_trajectory_dict (dict[str]: np.array): expert trajectories
keyed by ID
distance_bins (np.array): bins used to compute the goal
rate as a function of the starting distance from goal
intersection_bins (np.array): bins used to compute the
goal rate as a function of the number of intersections
between paths in the expert trajectories
veh_intersection_dict (dict[str]: np.array): dict mapping
a vehicle ID to the number of intersections it
experienced
actor_1: SampleFactory agent
actor_2: SampleFactory agent. Will be none unless we're testing for
ZSC
Returns
-------
avg_goal: average goal rate of agents
avg_collisions: average collision rate of agents
avg_veh_edge_collisions: average veh-edge collision rate
avg_veh_veh_collisions: average veh-veh collision rate
success_rate_by_distance: np.array(number of distance bins, 4)
where the row indexes how far the vehicle was from goal
at initialization and where the column index is
[goal rate, collision rate, veh-veh collision rate, counter of
number of vehicles in this bin]
success_rate_by_num_agents: np.array(maximum number of vehicles, 4)
where the row index is how many vehicles were in this episode
where the column index is [goal rate, collision rate,
veh-veh collision rate, counter of
number of vehicles in this bin]
success_rate_by_intersections: np.array(number of intersections, 4)
where the row index is how many intersections that vehicle
had and where the column index is [goal rate, collision rate,
veh-veh collision rate, counter of
number of vehicles in this bin]
np.mean(ades): mean average displacement error of all vehicles in the
episode
np.mean(fdes): mean final displacement error of all vehicles in the
episode
veh_counter(int): how many vehicles were in that episode
"""
episode_rewards = [deque([], maxlen=100) for _ in range(env.num_agents)]
true_rewards = [deque([], maxlen=100) for _ in range(env.num_agents)]
obs = env.reset()
rollout_traj_dict = defaultdict(lambda: np.zeros((80, 2)))
# some key information for tracking statistics
goal_dist = env.goal_dist_normalizers
valid_indices = env.valid_indices
agent_id_to_env_id_map = env.agent_id_to_env_id_map
env_id_to_agent_id_map = env.env_id_to_agent_id_map
success_rate_by_num_agents = np.zeros((cfg.max_num_vehicles, 4))
success_rate_by_distance = np.zeros((distance_bins.shape[-1], 4))
success_rate_by_intersections = np.zeros((intersection_bins.shape[-1], 4))
if actor_2 is not None:
# pick which valid indices go to which policy
val = np.random.uniform()
if val < 0.5:
num_choice = int(np.floor(len(valid_indices) / 2.0))
else:
num_choice = int(np.ceil(len(valid_indices) / 2.0))
indices_1 = list(
np.random.choice(valid_indices, num_choice, replace=False))
indices_2 = [val for val in valid_indices if val not in indices_1]
rnn_states = torch.zeros(
[env.num_agents, get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
rnn_states_2 = torch.zeros(
[env.num_agents, get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
else:
rnn_states = torch.zeros(
[env.num_agents, get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
episode_reward = np.zeros(env.num_agents)
finished_episode = [False] * env.num_agents
goal_achieved = [False] * len(valid_indices)
collision_observed = [False] * len(valid_indices)
veh_veh_collision_observed = [False] * len(valid_indices)
veh_counter = 0
while not all(finished_episode):
with torch.no_grad():
obs_torch = AttrDict(transform_dict_observations(obs))
for key, x in obs_torch.items():
obs_torch[key] = torch.from_numpy(x).to(device).float()
# we have to make a copy before doing the pass
# because (for some reason), sample factory is making
# some changes to the obs in the forwards pass
# TBD what it is
if actor_2 is not None:
obs_torch_2 = deepcopy(obs_torch)
policy_outputs_2 = actor_2(obs_torch_2,
rnn_states_2,
with_action_distribution=True)
policy_outputs = actor_1(obs_torch,
rnn_states,
with_action_distribution=True)
# sample actions from the distribution by default
# also update the indices that should be drawn from the second policy
# with its outputs
actions = policy_outputs.actions
if actor_2 is not None:
actions[indices_2] = policy_outputs_2.actions[indices_2]
action_distribution = policy_outputs.action_distribution
if isinstance(action_distribution, ContinuousActionDistribution):
if not cfg.continuous_actions_sample: # TODO: add similar option for discrete actions
actions = action_distribution.means
if actor_2 is not None:
actions[
indices_2] = policy_outputs_2.action_distribution.means[
indices_2]
if isinstance(action_distribution, CategoricalActionDistribution):
if not cfg.discrete_actions_sample:
actions = policy_outputs['action_logits'].argmax(axis=1)
if actor_2 is not None:
actions[indices_2] = policy_outputs_2[
'action_logits'].argmax(axis=1)[indices_2]
actions = actions.cpu().numpy()
for veh in env.unwrapped.get_objects_that_moved():
# only check vehicles we are actually controlling
if veh.expert_control is False:
rollout_traj_dict[veh.id][
env.step_num] = veh.position.numpy()
if int(veh.collision_type) == 1:
if veh.getID() in env_id_to_agent_id_map.keys():
agent_id = env_id_to_agent_id_map[veh.getID()]
idx = valid_indices.index(agent_id)
veh_veh_collision_observed[idx] = 1
rnn_states = policy_outputs.rnn_states
if actor_2 is not None:
rnn_states_2 = policy_outputs_2.rnn_states
obs, rew, done, infos = env.step(actions)
episode_reward += rew
for i, index in enumerate(valid_indices):
goal_achieved[
i] = infos[index]['goal_achieved'] or goal_achieved[i]
collision_observed[
i] = infos[index]['collided'] or collision_observed[i]
for agent_i, done_flag in enumerate(done):
if done_flag:
finished_episode[agent_i] = True
episode_rewards[agent_i].append(episode_reward[agent_i])
true_rewards[agent_i].append(infos[agent_i].get(
'true_reward', episode_reward[agent_i]))
log.info(
'Episode finished for agent %d. Reward: %.3f, true_reward: %.3f',
agent_i, episode_reward[agent_i],
true_rewards[agent_i][-1])
rnn_states[agent_i] = torch.zeros([get_hidden_size(cfg)],
dtype=torch.float32,
device=device)
episode_reward[agent_i] = 0
if all(finished_episode):
avg_episode_rewards_str, avg_true_reward_str = '', ''
for agent_i in range(env.num_agents):
avg_rew = np.mean(episode_rewards[agent_i])
avg_true_rew = np.mean(true_rewards[agent_i])
if not np.isnan(avg_rew):
if avg_episode_rewards_str:
avg_episode_rewards_str += ', '
avg_episode_rewards_str += f'#{agent_i}: {avg_rew:.3f}'
if not np.isnan(avg_true_rew):
if avg_true_reward_str:
avg_true_reward_str += ', '
avg_true_reward_str += f'#{agent_i}: {avg_true_rew:.3f}'
avg_goal = infos[0]['episode_extra_stats']['goal_achieved']
avg_collisions = infos[0]['episode_extra_stats']['collided']
avg_veh_edge_collisions = infos[0]['episode_extra_stats'][
'veh_edge_collision']
avg_veh_veh_collisions = infos[0]['episode_extra_stats'][
'veh_veh_collision']
success_rate_by_num_agents[len(valid_indices) - 1,
0] += avg_goal
success_rate_by_num_agents[len(valid_indices) - 1,
1] += avg_collisions
success_rate_by_num_agents[len(valid_indices) - 1,
2] += np.mean(
veh_veh_collision_observed)
success_rate_by_num_agents[len(valid_indices) - 1, 3] += 1
# track how well we do as a function of distance
for i, index in enumerate(valid_indices):
env_id = agent_id_to_env_id_map[index]
bin = np.searchsorted(distance_bins, goal_dist[env_id])
success_rate_by_distance[bin - 1, :] += [
goal_achieved[i], collision_observed[i],
veh_veh_collision_observed[i], 1
]
# track how well we do as number of intersections
for i, index in enumerate(valid_indices):
env_id = agent_id_to_env_id_map[index]
bin = min(veh_intersection_dict[env_id],
distance_bins.shape[-1] - 1)
success_rate_by_intersections[bin, :] += [
goal_achieved[i], collision_observed[i],
veh_veh_collision_observed[i], 1
]
# compute ADE and FDE
ades = []
fdes = []
for agent_id, traj in rollout_traj_dict.items():
masking_arr = traj.sum(axis=1)
mask = (masking_arr != 0.0) * (masking_arr !=
traj.shape[1] * ERR_VAL)
expert_mask_arr = expert_trajectory_dict[agent_id].sum(
axis=1)
expert_mask = (expert_mask_arr != 0.0) * (
expert_mask_arr != traj.shape[1] * ERR_VAL)
ade = np.linalg.norm(traj -
expert_trajectory_dict[agent_id],
axis=-1)[mask * expert_mask]
ades.append(ade.mean())
fde = np.linalg.norm(
traj - expert_trajectory_dict[agent_id],
axis=-1)[np.max(np.argwhere(mask * expert_mask))]
fdes.append(fde)
veh_counter += 1
log.info('Avg episode rewards: %s, true rewards: %s',
avg_episode_rewards_str, avg_true_reward_str)
log.info(
'Avg episode reward: %.3f, avg true_reward: %.3f',
np.mean([
np.mean(episode_rewards[i])
for i in range(env.num_agents)
]),
np.mean([
np.mean(true_rewards[i]) for i in range(env.num_agents)
]))
return (avg_goal, avg_collisions, avg_veh_edge_collisions,
avg_veh_veh_collisions, success_rate_by_distance,
success_rate_by_num_agents,
success_rate_by_intersections, np.mean(ades),
np.mean(fdes), veh_counter)
def run_eval(cfgs,
test_zsc,
output_path,
scenario_dir,
files,
file_type,
device='cuda'):
"""Eval a stored agent over all files in validation set.
Args:
cfg (dict): configuration file for instantiating the agents and environment.
test_zsc (bool): if true, we play all agents against all agents
num_file_loops (int): how many times to loop over the file set
Returns
-------
None: None
"""
actor_critics = []
if not isinstance(cfgs, list):
cfgs = [cfgs]
for i, cfg in enumerate(cfgs):
if not isinstance(cfg, Bunch):
cfg = Bunch(cfg)
cfg = load_from_checkpoint(cfg)
render_action_repeat = cfg.render_action_repeat if cfg.render_action_repeat is not None else cfg.env_frameskip
if render_action_repeat is None:
log.warning('Not using action repeat!')
render_action_repeat = 1
log.debug('Using action repeat %d during evaluation',
render_action_repeat)
cfg.env_frameskip = 1 # for evaluation
cfg.num_envs = 1
# this config is used for computing displacement errors
ade_cfg = deepcopy(cfg)
ade_cfg['remove_at_goal'] = False
ade_cfg['remove_at_collide'] = False
def make_env_func(env_config):
return create_env(cfg.env, cfg=cfg, env_config=env_config)
env = make_env_func(AttrDict({'worker_index': 0, 'vector_index': 0}))
env.seed(0)
is_multiagent = is_multiagent_env(env)
if not is_multiagent:
env = MultiAgentWrapper(env)
if hasattr(env.unwrapped, 'reset_on_init'):
# reset call ruins the demo recording for VizDoom
env.unwrapped.reset_on_init = False
actor_critic = create_actor_critic(cfg, env.observation_space,
env.action_space)
device = torch.device(device)
actor_critic.model_to_device(device)
policy_id = cfg.policy_index
checkpoints = LearnerWorker.get_checkpoints(
LearnerWorker.checkpoint_dir(cfg, policy_id))
checkpoint_dict = LearnerWorker.load_checkpoint(checkpoints, device)
actor_critic.load_state_dict(checkpoint_dict['model'])
actor_critics.append([i, actor_critic])
# we bin the success rate into bins of 10 meters between 0 and 400
# the second dimension is the counts
distance_bins = np.linspace(0, 400, 40)
intersections_bins = np.linspace(0, 7, 7)
num_files = cfg['num_eval_files']
num_file_loops = cfg['num_file_loops']
# TODO(eugenevinitsky) horrifying copy and paste
if test_zsc:
goal_array = np.zeros((len(actor_critics), len(actor_critics),
num_file_loops * num_files))
collision_array = np.zeros((len(actor_critics), len(actor_critics),
num_files * num_file_loops))
success_rate_by_num_agents = np.zeros(
(len(actor_critics), len(actor_critics), cfg.max_num_vehicles, 4))
success_rate_by_distance = np.zeros(
(len(actor_critics), len(actor_critics), distance_bins.shape[-1],
4))
success_rate_by_intersections = np.zeros(
(len(actor_critics), len(actor_critics),
intersections_bins.shape[-1], 4))
ade_array = np.zeros((len(actor_critics), len(actor_critics),
num_file_loops * num_files))
fde_array = np.zeros((len(actor_critics), len(actor_critics),
num_file_loops * num_files))
veh_veh_collision_array = np.zeros(
(len(actor_critics), len(actor_critics),
num_file_loops * num_files))
veh_edge_collision_array = np.zeros(
(len(actor_critics), len(actor_critics),
num_file_loops * num_files))
else:
goal_array = np.zeros((len(actor_critics), num_file_loops * num_files))
collision_array = np.zeros(
(len(actor_critics), num_file_loops * num_files))
veh_veh_collision_array = np.zeros(
(len(actor_critics), num_file_loops * num_files))
veh_edge_collision_array = np.zeros(
(len(actor_critics), num_file_loops * num_files))
success_rate_by_num_agents = np.zeros(
(len(actor_critics), cfg.max_num_vehicles, 4))
success_rate_by_distance = np.zeros(
(len(actor_critics), distance_bins.shape[-1], 4))
success_rate_by_intersections = np.zeros(
(len(actor_critics), intersections_bins.shape[-1], 4))
ade_array = np.zeros((len(actor_critics), num_file_loops * num_files))
fde_array = np.zeros((len(actor_critics), num_file_loops * num_files))
if test_zsc:
output_generator = itertools.product(actor_critics, actor_critics)
else:
output_generator = actor_critics
for output in output_generator:
if test_zsc:
(index_1, actor_1), (index_2, actor_2) = output
else:
(index_1, actor_1) = output
goal_frac = []
collision_frac = []
veh_veh_collision_frac = []
veh_edge_collision_frac = []
average_displacement_error = []
final_displacement_error = []
veh_counter = 0
for loop_num in range(num_file_loops):
for file_num, file in enumerate(files[0:cfg['num_eval_files']]):
print(loop_num * cfg['num_eval_files'] + file_num)
print('file is {}'.format(os.path.join(scenario_dir, file)))
env.unwrapped.files = [os.path.join(scenario_dir, file)]
# step the env to its conclusion to generate the expert trajectories we compare against
env.cfg = ade_cfg
env.reset()
expert_trajectory_dict = defaultdict(lambda: np.zeros((80, 2)))
env.unwrapped.make_all_vehicles_experts()
for i in range(80):
for veh in env.unwrapped.get_objects_that_moved():
expert_trajectory_dict[
veh.id][i] = veh.position.numpy()
env.unwrapped.simulation.step(0.1)
# compute the number of expert trajectories that intersect
# while filtering out the bits of the trajectory
# that were invalid
vehs_with_intersecting_ids = defaultdict(int)
for veh_id in expert_trajectory_dict.keys():
for veh_id2 in expert_trajectory_dict.keys():
if veh_id == veh_id2:
continue
trajectory = expert_trajectory_dict[veh_id]
trajectory2 = expert_trajectory_dict[veh_id2]
expert_mask_arr = trajectory.sum(axis=1)
expert_mask = (expert_mask_arr != 0.0) * (
expert_mask_arr != trajectory.shape[1] * ERR_VAL)
trajectory = trajectory[expert_mask]
expert_mask_arr = trajectory2.sum(axis=1)
expert_mask = (expert_mask_arr != 0.0) * (
expert_mask_arr != trajectory2.shape[1] * ERR_VAL)
trajectory2 = trajectory2[expert_mask]
if poly_intersection(trajectory, trajectory2):
vehs_with_intersecting_ids[
veh_id] += poly_intersection(
trajectory, trajectory2)
env.cfg = cfg
if test_zsc:
output = run_rollouts(env, cfg, device,
expert_trajectory_dict,
distance_bins, intersections_bins,
vehs_with_intersecting_ids, actor_1,
actor_2)
else:
output = run_rollouts(env, cfg, device,
expert_trajectory_dict,
distance_bins, intersections_bins,
vehs_with_intersecting_ids, actor_1)
avg_goal, avg_collisions, avg_veh_edge_collisions, avg_veh_veh_collisions, \
success_rate_by_distance_return, success_rate_by_num_agents_return, \
success_rate_by_intersections_return, \
_, _, _ = output
# TODO(eugenevinitsky) hideous copy and pasting
goal_frac.append(avg_goal)
collision_frac.append(avg_collisions)
veh_veh_collision_frac.append(avg_veh_veh_collisions)
veh_edge_collision_frac.append(avg_veh_edge_collisions)
if test_zsc:
success_rate_by_distance[
index_1, index_2] += success_rate_by_distance_return
success_rate_by_num_agents[
index_1, index_2] += success_rate_by_num_agents_return
success_rate_by_intersections[
index_1,
index_2] += success_rate_by_intersections_return
else:
success_rate_by_distance[
index_1] += success_rate_by_distance_return
success_rate_by_num_agents[
index_1] += success_rate_by_num_agents_return
success_rate_by_intersections[
index_1] += success_rate_by_intersections_return
# do some logging
log.info(
f'Avg goal achieved {np.mean(goal_frac)}±{np.std(goal_frac) / len(goal_frac)}'
)
log.info(
f'Avg veh-veh collisions {np.mean(veh_veh_collision_frac)}±\
{np.std(veh_veh_collision_frac) / np.sqrt(len(veh_veh_collision_frac))}'
)
log.info(
f'Avg veh-edge collisions {np.mean(veh_edge_collision_frac)}±\
{np.std(veh_edge_collision_frac) / np.sqrt(len(veh_edge_collision_frac))}'
)
log.info(f'Avg num collisions {np.mean(collision_frac)}±\
{np.std(collision_frac) / len(collision_frac)}')
env.cfg = ade_cfg
# okay, now run the rollout one more time but this time set
# remove_at_goal and remove_at_collide to be false so we can do the ADE computations
if test_zsc:
output = run_rollouts(env, cfg, device,
expert_trajectory_dict,
distance_bins, intersections_bins,
vehs_with_intersecting_ids, actor_1,
actor_2)
else:
output = run_rollouts(env, cfg, device,
expert_trajectory_dict,
distance_bins, intersections_bins,
vehs_with_intersecting_ids, actor_1)
_, _, _, _, _, _, _, ade, fde, veh_counter = output
average_displacement_error.append(ade)
final_displacement_error.append(fde)
log.info(f'Avg ADE {np.mean(average_displacement_error)}±\
{np.std(average_displacement_error) / np.sqrt(len(average_displacement_error))}'
)
log.info(f'Avg FDE {np.mean(final_displacement_error)}±\
{np.std(final_displacement_error) / np.sqrt(len(final_displacement_error))}'
)
if test_zsc:
goal_array[index_1, index_2] = goal_frac
collision_array[index_1, index_2] = collision_frac
veh_veh_collision_array[index_1, index_2] = veh_veh_collision_frac
veh_edge_collision_array[index_1,
index_2] = veh_edge_collision_frac
ade_array[index_1, index_2] = average_displacement_error
fde_array[index_1, index_2] = final_displacement_error
else:
goal_array[index_1] = goal_frac
collision_array[index_1] = collision_frac
veh_veh_collision_array[index_1] = veh_veh_collision_frac
veh_edge_collision_array[index_1] = veh_edge_collision_frac
ade_array[index_1] = average_displacement_error
fde_array[index_1] = final_displacement_error
if test_zsc:
file_type += '_zsc'
np.save(os.path.join(output_path, '{}_goal.npy'.format(file_type)),
goal_array)
np.save(os.path.join(output_path, '{}_collision.npy'.format(file_type)),
collision_array)
np.save(
os.path.join(output_path,
'{}_veh_veh_collision.npy'.format(file_type)),
veh_veh_collision_array)
np.save(
os.path.join(output_path,
'{}_veh_edge_collision.npy'.format(file_type)),
veh_edge_collision_array)
np.save(os.path.join(output_path, '{}_ade.npy'.format(file_type)),
ade_array)
np.save(os.path.join(output_path, '{}_fde.npy'.format(file_type)),
fde_array)
with open(
os.path.join(output_path,
'{}_success_by_veh_number.npy'.format(file_type)),
'wb') as f:
np.save(f, success_rate_by_num_agents)
with open(
os.path.join(output_path,
'{}_success_by_dist.npy'.format(file_type)),
'wb') as f:
np.save(f, success_rate_by_distance)
with open(
os.path.join(
output_path,
'{}_success_by_num_intersections.npy'.format(file_type)),
'wb') as f:
np.save(f, success_rate_by_intersections)
env.close()
return
def load_wandb(experiment_name, cfg_filter, force_reload=False):
"""Pull the results from the wandb server.
Args:
----
experiment_name (str): name of the wandb group.
cfg_filter (function): use the config dict to filter
which runs are actually kept
force_reload (bool, optional): if true we overwrite
the wandb csv
even if it exists.
"""
if not os.path.exists(
'wandb_{}.csv'.format(experiment_name)) or force_reload:
import wandb
api = wandb.Api()
entity, project = "eugenevinitsky", "nocturne4" # set to your entity and project
runs = api.runs(entity + "/" + project)
history_list = []
for run in runs:
if run.name == experiment_name:
# # .config contains the hyperparameters.
# # We remove special values that start with _.
config = {
k: v
for k, v in run.config.items() if not k.startswith('_')
}
if cfg_filter(config):
history_df = run.history()
history_df['seed'] = config['seed']
history_df['num_files'] = config['num_files']
history_list.append(history_df)
runs_df = pd.concat(history_list)
runs_df.to_csv('wandb_{}.csv'.format(experiment_name))
def plot_goal_achieved(experiment_name, global_step_cutoff=3e9):
"""Use the WANDB CSV to plot number of train steps v. goal achieved."""
plt.figure(dpi=300)
df = pd.read_csv("wandb_{}.csv".format(experiment_name))
df["timestamp"] = pd.to_datetime(df["_timestamp"] * 1e9)
# technically not correct if the number of seeds varies by num_files
# but in this case we're alright
num_seeds = len(np.unique(df.seed.values))
values_num_files = np.unique(df.num_files.values)
column = "0_aux/avg_goal_achieved"
dfs = []
stdevs = []
for num_files in values_num_files:
if num_files == 1:
continue
df_n = df[(df.num_files == num_files)
& (df.global_step < global_step_cutoff)].set_index(
'global_step').sort_index()
if num_files == -1:
col_name = 134453
else:
col_name = num_files
dfs.append((df_n[column] * 100).ewm(
halflife=500,
min_periods=10).mean().rename(f"num_files={col_name}"))
stdevs.append((df_n[column] * 100).ewm(halflife=500,
min_periods=10).std())
values_num_files = [
val if val != -1 else 134453 for val in values_num_files
]
temp = list(zip(values_num_files, dfs, stdevs))
temp = sorted(temp, key=lambda x: x[0])
values_num_files, dfs, stdevs = zip(*temp)
ax = plt.gca()
for i in range(len(dfs)):
x = dfs[i].index.values
y = dfs[i].values
yerr = stdevs[i].replace(np.nan, 0) / np.sqrt(num_seeds)
ax.plot(x,
y,
label=f'Training Files: {values_num_files[i]}',
color=CB_color_cycle[i])
ax.fill_between(x,
y - 2 * yerr,
y + 2 * yerr,
color=CB_color_cycle[i],
alpha=0.3)
plt.grid(ls='--', color='#ccc')
plt.legend()
plt.xlabel("Environment step")
plt.ylabel("% Goals Achieved")
plt.savefig('goal_achieved_v_step', bbox_inches='tight', pad_inches=0.1)
def eval_generalization(output_folder,
num_eval_files,
files,
file_type,
scenario_dir,
num_file_loops,
test_zsc=False,
cfg_filter=None):
"""Evaluate generalization for all agent checkpoints in output_folder.
Args:
----
output_folder (str): path to folder containing agent checkpoints
num_eval_files (int): how many files to use for eval
files (list[str]): list of scenario files to use for eval
file_type (str): 'train' or 'test' used to indicate if we are
testing in or out of distribution
scenario_dir (str): path to directory where `files` are stored
num_file_loops (int): how many times to iterate over the files.
Used for in-distribution testing if
in-distribution we trained on M files
but we want to test over N files where
N > M.
test_zsc (bool, optional): If true we pair up ever
agent in the folder and compute
all the cross-play scores. Defaults to False.
cfg_filter (_type_, optional): function used to filter over
whether eval should actually be done on that
agent. Filters using the agent config dict.
"""
file_paths = []
cfg_dicts = []
for (dirpath, dirnames, filenames) in os.walk(output_folder):
if 'cfg.json' in filenames:
with open(os.path.join(dirpath, 'cfg.json'), 'r') as file:
cfg_dict = json.load(file)
if cfg_filter is not None and not cfg_filter(cfg_dict):
continue
file_paths.append(dirpath)
cfg_dict['cli_args'] = {}
cfg_dict['fps'] = 0
cfg_dict['render_action_repeat'] = None
cfg_dict['no_render'] = None
cfg_dict['policy_index'] = 0
cfg_dict['record_to'] = os.path.join(os.getcwd(), '..', 'recs')
cfg_dict['continuous_actions_sample'] = False
cfg_dict['discrete_actions_sample'] = False
# for the train set, we don't want to loop over
# files we didn't train on
# also watch out for -1 which means "train on all files"
if cfg_dict[
'num_files'] < num_eval_files and 'train' in file_type and cfg_dict[
'num_files'] != -1:
cfg_dict['num_eval_files'] = cfg_dict['num_files']
cfg_dict['num_file_loops'] = num_file_loops * int(
max(num_eval_files // cfg_dict['num_files'], 1))
else:
cfg_dict['num_eval_files'] = num_eval_files
cfg_dict['num_file_loops'] = num_file_loops
cfg_dicts.append(cfg_dict)
if test_zsc:
# TODO(eugenevinitsky) we're currently storing the ZSC result in a random
# folder which seems bad.
run_eval([Bunch(cfg_dict) for cfg_dict in cfg_dicts],
test_zsc=test_zsc,
output_path=file_paths[0],
scenario_dir=scenario_dir,
files=files,
file_type=file_type)
print('stored ZSC result in {}'.format(file_paths[0]))
else:
# why 13? because a 16 GB GPU can do a forwards pass on 13 copies of the model
# for 20 vehicles at once. More than that and you'll run out of memory
num_cpus = min(13, mp.cpu_count() - 2)
device = 'cuda'
# if torch.cuda.is_available():
# device = 'cuda'
# else:
# device = 'cpu'
with mp.Pool(processes=num_cpus) as pool:
list(
pool.starmap(
run_eval,
zip(cfg_dicts, repeat(test_zsc), file_paths,
repeat(scenario_dir), repeat(files), repeat(file_type),
repeat(device))))
print(file_paths)
def main():
"""Script entry point."""
set_display_window()
register_custom_components()
RUN_EVAL = False
TEST_ZSC = False
PLOT_RESULTS = True
RELOAD_WANDB = False
VERSION = 5
NUM_EVAL_FILES = 200
NUM_FILE_LOOPS = 1 # the number of times to loop over a fixed set of files
experiment_names = ['srt_v27']
# output_folder = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.20/new_road_sample/18.32.35'
# output_folder = [
# '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.23/srt_v10/17.02.40/'
# ]
# 10 files
# output_folder = [
# '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/'
# ]
# SRT submission results
output_folder = [
'/checkpoint/eugenevinitsky/nocturne/sweep/2022.06.01/srt_v27/17.35.33'
]
generalization_dfs = []
cfg_filter = None
if TEST_ZSC:
def cfg_filter(cfg_dict):
if cfg_dict['scenario']['road_edge_first'] is False and cfg_dict[
'scenario']['max_visible_road_points'] == 500 and cfg_dict[
'algorithm']['encoder_hidden_size'] == 256 and cfg_dict[
'num_files'] == 10000:
return True
else:
return False
else:
def cfg_filter(cfg_dict):
if cfg_dict['scenario']['road_edge_first'] is False and cfg_dict[
'scenario']['max_visible_road_points'] == 500 and cfg_dict[
'algorithm']['encoder_hidden_size'] == 256:
return True
else:
return False
'''
###############################################################################
######### Build the generalization dataframes ######################
##############################################################################
'''
if RUN_EVAL:
if TEST_ZSC:
output_generator = [(PROCESSED_VALID_NO_TL,
'test_{}'.format(VERSION))]
else:
output_generator = [
(PROCESSED_TRAIN_NO_TL, 'train_{}'.format(VERSION)),
(PROCESSED_VALID_NO_TL, 'test_{}'.format(VERSION))
]
for file_path, file_type in output_generator:
with open(os.path.join(file_path, 'valid_files.json')) as file:
valid_veh_dict = json.load(file)
files = list(valid_veh_dict.keys())
if file_type == 'test_{}'.format(VERSION):
# sort the files so that we have a consistent order
np.random.seed(0)
np.random.shuffle(files)
if file_type == 'train_{}'.format(VERSION):
# for train make sure we use the same ordering
# that is used in base_env
# TODO(eugenevinitsky) this is dangerous and could
# break easily
files = sorted(files)
for folder in output_folder:
eval_generalization(folder,
NUM_EVAL_FILES,
files,
file_type=file_type,
scenario_dir=file_path,
num_file_loops=NUM_FILE_LOOPS,
test_zsc=TEST_ZSC,
cfg_filter=cfg_filter)
if PLOT_RESULTS:
# okay, now build a pandas dataframe of the results that we will use for plotting
# the generalization results
for folder in output_folder:
for file_type in [
'train_{}'.format(VERSION), 'test_{}'.format(VERSION)
# 'train',
# 'test'
]:
file_paths = []
data_dicts = []
for (dirpath, dirnames, filenames) in os.walk(folder):
if 'cfg.json' in filenames:
file_paths.append(dirpath)
with open(os.path.join(dirpath, 'cfg.json'),
'r') as file:
cfg_dict = json.load(file)
if cfg_filter(cfg_dict):
# TODO(eugenevinitsky) why do they not all have this?
goal = np.mean(
np.load(
os.path.join(
dirpath,
'{}_goal.npy'.format(file_type))))
collide = np.mean(
np.load(
os.path.join(
dirpath,
'{}_collision.npy'.format(file_type))))
ade = np.mean(
np.load(
os.path.join(
dirpath,
'{}_ade.npy'.format(file_type))))
fde = np.mean(
np.load(
os.path.join(
dirpath,
'{}_fde.npy'.format(file_type))))
veh_veh_collision = np.mean(
np.load(
os.path.join(
dirpath,
'{}_veh_veh_collision.npy'.format(
file_type))))
veh_edge_collision = np.mean(
np.load(
os.path.join(
dirpath,
'{}_veh_edge_collision.npy'.format(
file_type))))
success_by_num_intersections = np.load(
os.path.join(
dirpath,
'{}_success_by_num_intersections.npy'.
format(file_type)))
# there aren't a lot of data points past 3
# so just bundle them in
success_by_num_intersections[:,
3, :] = success_by_num_intersections[:, 3:, :].sum(
axis=1)
success_by_num_intersections = success_by_num_intersections[:,
0:
4, :]
success_by_veh_num = np.load(
os.path.join(
dirpath,
'{}_success_by_veh_number.npy'.format(
file_type)))
success_by_distance = np.load(
os.path.join(
dirpath, '{}_success_by_dist.npy'.format(
file_type)))
num_files = cfg_dict['num_files']
if int(num_files) == -1:
num_files = 134453
if int(num_files) == 1:
continue
data_dicts.append({
'num_files':
num_files,
'goal_rate':
goal * 100,
'collide_rate':
collide * 100,
'ade':
ade,
'fde':
fde,
'veh_veh_collision':
veh_veh_collision,
'veh_edge_collision':
veh_edge_collision,
'goal_by_intersections':
np.nan_to_num(
success_by_num_intersections[0, :, 0] /
success_by_num_intersections[0, :, 3]),
'collide_by_intersections':
np.nan_to_num(
success_by_num_intersections[0, :, 1] /
success_by_num_intersections[0, :, 3]),
'goal_by_vehicle_num':
np.nan_to_num(success_by_veh_num[0, :, 0] /
success_by_veh_num[0, :, 3]),
'collide_by_vehicle_num':
np.nan_to_num(success_by_veh_num[0, :, 1] /
success_by_veh_num[0, :, 3]),
'goal_by_distance':
np.nan_to_num(success_by_distance[0, :, 0] /
success_by_distance[0, :, 3]),
'collide_by_distance':
np.nan_to_num(success_by_distance[0, :, 1] /
success_by_distance[0, :, 3]),
})
if cfg_dict['num_files'] == 10000:
print('goal ',
success_by_num_intersections[0, :, 0])
print('num vehicles in bin',
success_by_num_intersections[0, :, 3])
df = pd.DataFrame(data_dicts)
new_dict = {}
for key in data_dicts[0].keys():
if key == 'num_files':
continue
new_dict[key] = df.groupby(['num_files'
])[key].mean().reset_index()
try:
new_dict[key + '_std'] = df.groupby(
['num_files'])[key].std().reset_index().rename(
columns={key: key + '_std'})
except ValueError:
# TODO(eugenevinitsky) learn to use pandas dawg
# what even is this
temp_dict = {}
for name, group in df.groupby(['num_files'])[key]:
temp = []
for arr in group:
temp.append(arr)
np_arr = np.vstack(temp)
std_err = np.std(np_arr, axis=0) / np.sqrt(
np_arr.shape[0])
temp_dict[name] = std_err
new_dict[key + '_stderr'] = pd.Series(
data=temp_dict).reset_index().rename(
columns={
'index': 'num_files',
0: key + '_stderr'
})
first_elem_key = 'goal_rate'
first_elem = new_dict[first_elem_key]
for key, value in new_dict.items():
if key == first_elem_key:
continue
first_elem = first_elem.merge(value,
how='inner',
on='num_files')
generalization_dfs.append(first_elem)
'''
###############################################################################
######### load the training dataframes from wandb ######################
##############################################################################
'''
global_step_cutoff = 3e9
training_dfs = []
for experiment_name in experiment_names:
load_wandb(experiment_name, cfg_filter, force_reload=RELOAD_WANDB)
training_dfs.append(
pd.read_csv('wandb_{}.csv'.format(experiment_name)))
num_seeds = len(np.unique(training_dfs[0].seed))
# create the goal plot
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
plt.plot(np.log10(df.num_files),
df.goal_rate,
color=CB_color_cycle[i],
label=file_type)
ax = plt.gca()
yerr = df.goal_rate_std.replace(np.nan, 0) / np.sqrt(num_seeds)
ax.fill_between(np.log10(df.num_files),
df.goal_rate - 2 * yerr,
df.goal_rate + 2 * yerr,
color=CB_color_cycle[i],
alpha=0.3)
print(f'{file_type} goal rate', df.goal_rate, yerr)
plt.ylim([0, 100])
plt.xlabel(' Number of Training Files (Logarithmic Scale)')
plt.ylabel('% Goals Achieved')
plt.legend()
plt.savefig('goal_achieved.png', bbox_inches='tight', pad_inches=0.1)
# create the collide plot
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
plt.plot(np.log10(df.num_files),
df.collide_rate,
color=CB_color_cycle[i],
label=file_type)
ax = plt.gca()
yerr = df.collide_rate_std.replace(np.nan, 0) / np.sqrt(num_seeds)
ax.fill_between(np.log10(df.num_files),
df.collide_rate - 2 * yerr,
df.collide_rate + 2 * yerr,
color=CB_color_cycle[i],
alpha=0.3)
print(f'{file_type} collide rate', df.collide_rate, yerr)
plt.ylim([0, 50])
plt.xlabel(' Number of Training Files (Logarithmic Scale)')
plt.ylabel('% Vehicles Collided')
plt.legend()
plt.savefig('collide_rate.png', bbox_inches='tight', pad_inches=0.1)
# create ADE and FDE plots
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
yerr = df.ade_std.replace(np.nan, 0) / np.sqrt(num_seeds)
plt.plot(np.log10(df.num_files),
df.ade,
label=file_type,
color=CB_color_cycle[i])
ax = plt.gca()
ax.fill_between(np.log10(df.num_files),
df.ade - 2 * yerr,
df.ade + 2 * yerr,
color=CB_color_cycle[i],
alpha=0.3)
print(f'{file_type} ade', df.ade, yerr)
plt.xlabel(' Number of Training Files (Logarithmic Scale)')
plt.ylabel('Average Displacement Error (m)')
plt.ylim([0, 5])
plt.legend()
plt.savefig('ade.png', bbox_inches='tight', pad_inches=0.1)
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
yerr = df.fde_std.replace(np.nan, 0) / np.sqrt(num_seeds)
plt.plot(np.log10(df.num_files),
df.fde,
label=file_type,
color=CB_color_cycle[i])
ax = plt.gca()
ax.fill_between(np.log10(df.num_files),
df.fde - 2 * yerr,
df.fde + 2 * yerr,
color=CB_color_cycle[i],
alpha=0.3)
print(f'{file_type} fde', df.fde, yerr)
plt.ylim([4, 10])
plt.xlabel(' Number of Training Files (Logarithmic Scale)')
plt.ylabel('Final Displacement Error (m)')
plt.legend()
plt.savefig('fde.png', bbox_inches='tight', pad_inches=0.1)
plot_goal_achieved(experiment_names[0], global_step_cutoff)
# create error by number of expert intersections plots
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
values_num_files = np.unique(df.num_files.values)
print(values_num_files)
for value in values_num_files:
if value != 10000:
continue
numpy_arr = df[df.num_files ==
value]['goal_by_intersections'].to_numpy()[0]
temp_df = pd.DataFrame(numpy_arr).melt()
plt.plot(temp_df.index,
temp_df.value * 100,
label=file_type,
color=CB_color_cycle[i])
numpy_arr = df[df.num_files == value][
'goal_by_intersections_stderr'].to_numpy()[0]
std_err_df = pd.DataFrame(numpy_arr).melt()
ax = plt.gca()
ax.fill_between(temp_df.index,
100 * (temp_df.value - 2 * std_err_df.value),
100 * (temp_df.value + 2 * std_err_df.value),
color=CB_color_cycle[i],
alpha=0.3)
plt.xlabel('Number of intersecting paths')
plt.ylabel('Percent Goals Achieved')
ax.set_xticks([i for i in range(numpy_arr.shape[-1])])
plt.legend()
plt.savefig('goal_v_intersection.png',
bbox_inches='tight',
pad_inches=0.1)
# create error by number of expert intersections plots
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
values_num_files = np.unique(df.num_files.values)
for value in values_num_files:
if value != 10000:
continue
numpy_arr = df[df.num_files ==
value]['collide_by_intersections'].to_numpy()[0]
temp_df = pd.DataFrame(numpy_arr).melt()
plt.plot(temp_df.index,
temp_df.value * 100,
color=CB_color_cycle[i],
label=file_type)
numpy_arr = df[df.num_files == value][
'collide_by_intersections_stderr'].to_numpy()[0]
std_err_df = pd.DataFrame(numpy_arr).melt()
ax = plt.gca()
ax.fill_between(temp_df.index,
100 * (temp_df.value - 2 * std_err_df.value),
100 * (temp_df.value + 2 * std_err_df.value),
color=CB_color_cycle[i],
alpha=0.3)
plt.xlabel('Number of Intersecting Paths')
plt.ylabel('Percent Collisions')
ax.set_xticks([i for i in range(numpy_arr.shape[-1])])
plt.legend()
plt.savefig('collide_v_intersection.png',
bbox_inches='tight',
pad_inches=0.1)
# create error by number of vehicles plots
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
values_num_files = np.unique(df.num_files.values)
print(values_num_files)
for value in values_num_files:
if value != 10000:
continue
numpy_arr = df[df.num_files ==
value]['goal_by_vehicle_num'].to_numpy()[0]
temp_df = pd.DataFrame(numpy_arr).melt()
plt.plot(temp_df.index,
temp_df.value * 100,
label=file_type,
color=CB_color_cycle[i])
numpy_arr = df[df.num_files == value][
'goal_by_vehicle_num_stderr'].to_numpy()[0]
std_err_df = pd.DataFrame(numpy_arr).melt()
ax = plt.gca()
ax.fill_between(temp_df.index,
100 * (temp_df.value - 2 * std_err_df.value),
100 * (temp_df.value + 2 * std_err_df.value),
color=CB_color_cycle[i],
alpha=0.3)
# sns.lineplot(x=temp_df.index, y=temp_df.value * 100)
plt.xlabel('Number of Controlled Vehicles')
plt.ylabel('Percent Goals Achieved')
ax.set_xticks([i for i in range(numpy_arr.shape[-1])])
plt.legend()
plt.savefig('goal_v_vehicle_num.png',
bbox_inches='tight',
pad_inches=0.1)
# create error by distance plots
plt.figure(dpi=300)
for i, (df, file_type) in enumerate(
zip(generalization_dfs, ['Train', 'Test'])):
values_num_files = np.unique(df.num_files.values)
print(values_num_files)
for value in values_num_files:
if value != 10000:
continue
numpy_arr = df[df.num_files ==
value]['goal_by_distance'].to_numpy()[0]
temp_df = pd.DataFrame(numpy_arr).melt()
plt.plot(temp_df.index,
temp_df.value * 100,
label=file_type,
color=CB_color_cycle[i])
numpy_arr = df[df.num_files ==
value]['goal_by_distance_stderr'].to_numpy()[0]
std_err_df = pd.DataFrame(numpy_arr).melt()
ax = plt.gca()
ax.fill_between(temp_df.index,
100 * (temp_df.value - 2 * std_err_df.value),
100 * (temp_df.value + 2 * std_err_df.value),
color=CB_color_cycle[i],
alpha=0.3)
# sns.lineplot(x=temp_df.index, y=temp_df.value * 100)
plt.xlabel('Starting Distance to Goal')
plt.ylabel('Percent Goals Achieved')
ax.set_xticks([i for i in range(numpy_arr.shape[-1])])
plt.legend()
plt.savefig('goal_v_distance.png', bbox_inches='tight', pad_inches=0.1)
if __name__ == '__main__':
sys.exit(main())
| 61,047 | 45.318665 | 118 | py |
nocturne | nocturne-main/scripts/paper_plots/eval_il_agents.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Run script that generates summary statistics for a folder of IL agents."""
import json
import os
import numpy as np
import torch
from nocturne.utils.eval.average_displacement import compute_average_displacement
from cfgs.config import PROCESSED_VALID_NO_TL, PROJECT_PATH
if __name__ == '__main__':
outer_model_folder = '/checkpoint/eugenevinitsky/nocturne/sweep/imitation/2022.06.13/arxiv_il_v4_1kf/18.49.39'
models = []
cfg_dicts = []
for (dirpath, dirnames, filenames) in os.walk(outer_model_folder):
if 'configs.json' in filenames:
with open(os.path.join(dirpath, 'configs.json'), 'r') as file:
cfg_dict = json.load(file)
# now snag the model with the largest checkpoint
max_val = -100
cur_model_name = None
for file in filenames:
if '.pth' in file:
checkpoint_val = int(file.split('.')[0].split('_')[-1])
if checkpoint_val > max_val:
max_val = checkpoint_val
cur_model_name = file
cfg_dicts.append(cfg_dict)
model = torch.load(os.path.join(dirpath, cur_model_name)).to('cpu')
model.actions_grids = [x.to('cpu') for x in model.actions_grids]
model.eval()
model.nn[0].eval()
models.append(model)
results = np.zeros((len(cfg_dicts), 8))
for i, (cfg_dict, model) in enumerate(zip(cfg_dicts, models)):
ade, fde, collisions, goals = compute_average_displacement(
PROCESSED_VALID_NO_TL, model=model, configs=cfg_dict)
results[i, 0] = ade[0]
results[i, 1] = ade[1]
results[i, 2] = fde[0]
results[i, 3] = fde[1]
results[i, 4] = collisions[0]
results[i, 5] = collisions[1]
results[i, 6] = goals[0]
results[i, 7] = goals[1]
np.save(os.path.join(PROJECT_PATH, 'scripts/paper_plots/il_results.npy'),
results)
print(
f'ade {np.mean(results[:, 0])} ± {np.std(results[:, 0]) / np.sqrt(results[:, 0].shape[0])}'
)
print(
f'fde {np.mean(results[:, 2])} ± {np.std(results[:, 2]) / np.sqrt(results[:, 0].shape[0])}'
)
print(
f'collisions {np.mean(results[:, 4])} ± {np.std(results[:, 4]) / np.sqrt(results[:, 0].shape[0])}'
)
print(
f'goals {np.mean(results[:, 6])} ± {np.std(results[:, 6]) / np.sqrt(results[:, 0].shape[0])}'
)
| 2,665 | 40.65625 | 114 | py |
nocturne | nocturne-main/scripts/paper_plots/create_zsc_plot.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Utilities for plotting ZSC results."""
import os
import matplotlib.pyplot as plt
import numpy as np
def create_heat_map(file, title, save_path, white_switch):
"""Construct a heatmap of the ZSC results.
Args:
----
file (str): file path to zsc results
title (str): title of the plot
save_path (str): path to save it at
white_switch (float): if the value is greater than white_switch
we write the cell text as black. This is just to make
the plots more readable.
"""
np_arr = np.load(os.path.join(zsc_path, file))
np_arr_mean = np.mean(np_arr, axis=-1)
agent_indices = [f'Agent {i}' for i in range(np_arr.shape[0])]
fig, ax = plt.subplots()
ax.imshow(np_arr_mean)
# Show all ticks and label them with the respective list entries
ax.set_xticks(np.arange(len(agent_indices)), labels=agent_indices)
ax.set_yticks(np.arange(len(agent_indices)), labels=agent_indices)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(),
rotation=45,
ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(agent_indices)):
for j in range(len(agent_indices)):
if np_arr_mean[i, j] > white_switch:
color = 'black'
else:
color = 'w'
ax.text(j,
i,
f'{np.round(np_arr_mean[i, j], decimals=2)}',
ha="center",
va="center",
color=color)
ax.set_title(title)
fig.tight_layout()
plt.savefig(save_path)
def compute_average_change(file):
"""Compare cross play to self play."""
np_arr = np.load(os.path.join(zsc_path, file))
np_arr_mean = np.mean(np_arr, axis=-1)
self_play = np.mean(np.diag(np_arr_mean))
cross_play = np.mean(
np_arr_mean[np.where(~np.eye(np_arr_mean.shape[0], dtype=bool))])
self_play_std = np.std(np.diag(np_arr_mean)) / np.sqrt(
np_arr_mean.shape[0])
cross_play_std = np.std(
np_arr_mean[np.where(~np.eye(np_arr_mean.shape[0], dtype=bool))]
) / np.sqrt(np_arr_mean.shape[0]**2 - np_arr_mean.shape[0])
print(
f'self play: {self_play} ± {self_play_std}, cross play: {cross_play} ± {cross_play_std}'
)
if __name__ == '__main__':
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.23/srt_v10/17.02.40/23/srt_v10'
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/4/srt_12'
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/4/srt_12'
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/4/srt_12'
# 10000 on valid
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/4/srt_12'
# 10000 on train
# zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.05.28/srt_12/16.43.16/4/srt_12'
zsc_path = '/checkpoint/eugenevinitsky/nocturne/sweep/2022.06.01/srt_v27/17.35.33/123/srt_v27'
create_heat_map('train_zsc_goal.npy',
"Cross-play Goal Rate",
'cross_play_heat_map.png',
white_switch=.8)
create_heat_map('train_zsc_collision.npy',
"Cross-play Collision Rate",
'cross_play_collision_map.png',
white_switch=0.18)
compute_average_change('train_zsc_goal.npy')
compute_average_change('train_zsc_collision.npy')
| 3,813 | 37.918367 | 99 | py |
nocturne | nocturne-main/scripts/paper_plots/generate_scenes.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Example of how to make movies of Nocturne scenarios."""
import hydra
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
from cfgs.config import PROCESSED_TRAIN_NO_TL, PROJECT_PATH, \
get_scenario_dict, set_display_window
from nocturne import Simulation
def get_sim(scenario_file, cfg):
"""Initialize the scenario."""
# load scenario, set vehicles to be expert-controlled
cfg['scenario']['allow_non_vehicles'] = False
sim = Simulation(scenario_path=str(scenario_file),
config=get_scenario_dict(cfg))
for obj in sim.getScenario().getObjectsThatMoved():
obj.expert_control = True
return sim
def make_movie(sim,
scenario_fn,
output_path='./vid.mp4',
dt=0.1,
steps=90,
fps=10):
"""Make a movie from the scenario."""
scenario = sim.getScenario()
movie_frames = []
timestep = 0
movie_frames.append(scenario_fn(scenario, timestep))
for i in range(steps):
sim.step(dt)
timestep += 1
movie_frames.append(scenario_fn(scenario, timestep))
movie_frames = np.stack(movie_frames, axis=0)
imageio.mimwrite(output_path, movie_frames, fps=fps)
print('>', output_path)
del sim
del movie_frames
def make_image(sim, scenario_file, scenario_fn, output_path='./img.png'):
"""Make a single image from the scenario."""
scenario = sim.getScenario()
img = scenario_fn(scenario)
dpi = 100
height, width, depth = img.shape
figsize = width / dpi, height / dpi
plt.figure(figsize=figsize, dpi=dpi)
plt.axis('off')
plt.imshow(img)
plt.savefig(output_path, bbox_inches='tight', pad_inches=0)
print('>', output_path)
@hydra.main(config_path="../../cfgs/", config_name="config")
def main(cfg):
"""See file docstring."""
set_display_window()
# files = ['tfrecord-00358-of-01000_{}.json'.format(i) for i in range(500)]
files = [
'tfrecord-00358-of-01000_60.json', # unprotected turn
'tfrecord-00358-of-01000_72.json', # four way stop
'tfrecord-00358-of-01000_257.json', # crowded four way stop
'tfrecord-00358-of-01000_332.json', # crowded merge road
'tfrecord-00358-of-01000_79.json', # crowded parking lot
]
for file in files:
file = os.path.join(PROCESSED_TRAIN_NO_TL, file)
sim = get_sim(file, cfg)
if os.path.exists(file):
# image of whole scenario
# make_image(
# sim,
# file,
# scenario_fn=lambda scenario: scenario.getImage(
# img_width=2000,
# img_height=2000,
# padding=50.0,
# draw_target_positions=True,
# ),
# output_path=PROJECT_PATH /
# 'scripts/paper_plots/figs/scene_{}.png'.format(
# os.path.basename(file)),
# )
veh_index = -3
make_image(
sim,
file,
scenario_fn=lambda scenario: scenario.getImage(
img_height=1600,
img_width=1600,
draw_target_positions=True,
padding=0.0,
source=scenario.getVehicles()[veh_index],
view_height=80,
view_width=80,
rotate_with_source=True,
),
output_path=PROJECT_PATH /
'scripts/paper_plots/figs/cone_original_{}.png'.format(
os.path.basename(file)),
)
make_image(
sim,
file,
scenario_fn=lambda scenario: scenario.getConeImage(
source=scenario.getVehicles()[veh_index],
view_dist=cfg['subscriber']['view_dist'],
view_angle=cfg['subscriber']['view_angle'],
head_angle=0.0,
img_height=1600,
img_width=1600,
padding=0.0,
draw_target_position=True,
),
output_path=PROJECT_PATH /
'scripts/paper_plots/figs/cone_{}.png'.format(
os.path.basename(file)),
)
make_image(
sim,
file,
scenario_fn=lambda scenario: scenario.getFeaturesImage(
source=scenario.getVehicles()[veh_index],
view_dist=cfg['subscriber']['view_dist'],
view_angle=cfg['subscriber']['view_angle'],
head_angle=0.0,
img_height=1600,
img_width=1600,
padding=0.0,
draw_target_position=True,
),
output_path=PROJECT_PATH /
'scripts/paper_plots/figs/feature_{}.png'.format(
os.path.basename(file)),
)
if __name__ == '__main__':
main()
| 5,323 | 33.797386 | 79 | py |
nocturne | nocturne-main/tests/test_dynamics.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test expert action computation from inverse dynamics."""
from hydra.core.global_hydra import GlobalHydra
from hydra import compose, initialize
import numpy as np
from cfgs.config import PROJECT_PATH, get_scenario_dict
from nocturne import Simulation
SIM_N_STEPS = 90 # number of steps per trajectory
SIM_STEP_TIME = 0.1 # dt (in seconds)
def _create_sim(file_path, expert_control):
# None in the config would cause a bug
GlobalHydra.instance().clear()
initialize(config_path="../cfgs/")
cfg = compose(config_name="config")
# create simulation
sim = Simulation(scenario_path=file_path, config=get_scenario_dict(cfg))
# get controlled objects
objects_that_moved = sim.getScenario().getObjectsThatMoved()
for obj in objects_that_moved:
obj.expert_control = expert_control
return sim, objects_that_moved
def test_inverse_dynamics():
"""Check that expert actions are computed correctly from inverse dynamics."""
file_path = str(PROJECT_PATH / 'tests/large_file_tfrecord.json')
# create a ground truth sim that will replay expert actions
sim_ground_truth, objects_ground_truth = _create_sim(file_path,
expert_control=True)
id2obj_ground_truth = {obj.id: obj for obj in objects_ground_truth}
# create a test sim that will replay actions from inverse dynamics
sim_test, objects_test = _create_sim(file_path, expert_control=False)
scenario_test = sim_test.getScenario()
# step simulation
for time in range(SIM_N_STEPS):
# set model actions
for obj in objects_test:
action = scenario_test.expert_action(obj, time)
if action is not None and not np.isnan(action.numpy()).any():
# set object action
obj.expert_control = False
obj.acceleration = action.acceleration
obj.steering = action.steering
else:
# set expert control for one time step
obj.expert_control = True
# step simulations
sim_ground_truth.step(SIM_STEP_TIME)
sim_test.step(SIM_STEP_TIME)
# check positions
for obj_test in objects_test:
# only consider objects that used inverse dynamics action
if obj_test.expert_control:
continue
# get corresponding ground truth object
obj_ground_truth = id2obj_ground_truth[obj_test.id]
# check that speeds and headings match
assert np.isclose(obj_test.speed, obj_ground_truth.speed)
assert np.isclose(obj_test.heading, obj_ground_truth.heading)
# reposition objects
obj_test.position = obj_ground_truth.position
obj_test.heading = obj_ground_truth.heading
obj_test.speed = obj_ground_truth.speed
if __name__ == '__main__':
test_inverse_dynamics()
| 3,137 | 37.740741 | 81 | py |
nocturne | nocturne-main/tests/test_config.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test configurations passed to the scenario."""
from hydra.core.global_hydra import GlobalHydra
from hydra import compose, initialize
from cfgs.config import PROJECT_PATH, get_scenario_dict
from nocturne import Simulation
def test_config_values():
"""Test that there are no invalid values in the default config."""
# None in the config would cause a bug
GlobalHydra.instance().clear()
initialize(config_path="../cfgs/")
cfg = compose(config_name="config")
assert None not in list(get_scenario_dict(cfg).values())
def test_custom_config():
"""Test that changes in the config are propagated to the scenario."""
GlobalHydra.instance().clear()
initialize(config_path="../cfgs/")
cfg = compose(config_name="config")
cfg['scenario'].update({
'max_visible_objects': 3,
'max_visible_road_points': 14,
'max_visible_traffic_lights': 15,
'max_visible_stop_signs': 92,
})
scenario_path = str(PROJECT_PATH / 'tests/large_file_tfrecord.json')
sim = Simulation(scenario_path=scenario_path,
config=get_scenario_dict(cfg))
scenario = sim.getScenario()
assert scenario.getMaxNumVisibleObjects() == 3
assert scenario.getMaxNumVisibleRoadPoints() == 14
assert scenario.getMaxNumVisibleTrafficLights() == 15
assert scenario.getMaxNumVisibleStopSigns() == 92
def main():
"""See file docstring."""
test_config_values()
test_custom_config()
if __name__ == '__main__':
main()
| 1,705 | 32.45098 | 73 | py |
nocturne | nocturne-main/tests/test_rl_env.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test step and rendering functions."""
import time
import os
from hydra import compose, initialize
from hydra.core.global_hydra import GlobalHydra
import numpy as np
from cfgs.config import PROJECT_PATH, set_display_window
from nocturne import Action
from nocturne.envs.wrappers import create_env
def test_rl_env():
"""Test step and rendering functions."""
set_display_window()
GlobalHydra.instance().clear()
initialize(config_path="../cfgs/")
cfg = compose(config_name="config")
cfg.scenario_path = os.path.join(PROJECT_PATH, 'tests')
cfg.max_num_vehicles = 50
env = create_env(cfg)
env.files = [str(PROJECT_PATH / "tests/large_file_tfrecord.json")]
times = []
_ = env.reset()
# quick check that rendering works
_ = env.scenario.getConeImage(env.scenario.getVehicles()[0],
80.0,
120 * (np.pi / 180),
0.0,
draw_target_position=False)
for _ in range(90):
vehs = env.scenario.getObjectsThatMoved()
prev_position = {
veh.getID(): [veh.position.x, veh.position.y]
for veh in vehs
}
t = time.perf_counter()
obs, rew, done, info = env.step(
{veh.id: Action(acceleration=2.0, steering=1.0)
for veh in vehs})
times.append(time.perf_counter() - t)
for veh in vehs:
if veh in env.scenario.getObjectsThatMoved():
new_position = [veh.position.x, veh.position.y]
assert prev_position[veh.getID(
)] != new_position, f'veh {veh.getID()} was in position \
{prev_position[veh.getID()]} which is the \
same as {new_position} but should have moved'
# temporarily disabled while we figure out
# how to make this machine independent
# assert 1 / np.mean(
# times
# ) > 1500, f'FPS should be greater than 1500 but is {1 / np.mean(times)}'
if __name__ == '__main__':
test_rl_env()
| 2,294 | 34.859375 | 78 | py |
nocturne | nocturne-main/tests/test_simulation_functions.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test that all available environment calls work + check collisions are recorded correctly."""
import os
from hydra.core.global_hydra import GlobalHydra
from hydra import initialize, compose
import matplotlib.pyplot as plt
import numpy as np
from cfgs.config import PROJECT_PATH, get_scenario_dict
from nocturne import Simulation
def test_scenario_functions():
"""Check that collisions are appropriately recorded and that functions don't error."""
GlobalHydra.instance().clear()
initialize(config_path="../cfgs/")
cfg = compose(config_name="config")
file_path = str(PROJECT_PATH / 'tests/large_file_tfrecord.json')
os.environ["DISPLAY"] = ":0.0"
################################
# Vehicle Collision checking
################################
# now lets test for collisions
# grab a vehicle and place it on top of another vehicle
sim = Simulation(scenario_path=file_path, config=get_scenario_dict(cfg))
scenario = sim.getScenario()
veh0 = scenario.getVehicles()[0]
veh1 = scenario.getVehicles()[1]
veh2 = scenario.getVehicles()[2]
# TODO(ev this fails unless the shift is non-zero)
veh1.setPosition(veh0.getPosition().x + 0.001, veh0.getPosition().y)
sim.step(0.000001)
assert veh1.getCollided(
), 'vehicle1 should have collided after being placed on vehicle 0'
assert veh0.getCollided(
), 'vehicle0 should have collided after vehicle 0 was placed on it'
assert not veh2.getCollided(), 'vehicle2 should not have collided'
# confirm that this is still true a time-step later
sim.step(0.000001)
assert veh1.getCollided(
), 'vehicle1 should have collided after being placed on vehicle 0'
assert veh0.getCollided(
), 'vehicle0 should have collided after vehicle 0 was placed on it'
assert not veh2.getCollided(), 'vehicle2 should not have collided'
# now offset them slightly and do the same thing again
sim = Simulation(scenario_path=file_path, config=get_scenario_dict(cfg))
scenario = sim.getScenario()
veh0 = scenario.getVehicles()[0]
veh1 = scenario.getVehicles()[1]
veh2 = scenario.getVehicles()[2]
veh0 = scenario.getVehicles()[0]
veh1 = scenario.getVehicles()[1]
veh1.setPosition(veh0.getPosition().x + 0.2, veh0.getPosition().y + 0.2)
sim.step(0.000001)
assert veh1.getCollided(
), 'vehicle1 should have collided after being placed overlapping vehicle 0'
assert veh0.getCollided(
), 'vehicle0 should have collided after vehicle 1 was placed on it'
assert not veh2.getCollided(), 'vehicle2 should not have collided'
################################
# Road Collision checking
################################
# check if we place it onto one of the road points that there should be a collision
print('entering road line - vehicle collision checking')
# find a road edge
colliding_road_line = None
for roadline in scenario.getRoadLines():
if roadline.canCollide():
colliding_road_line = roadline
break
roadpoints = colliding_road_line.getGeometry()
start_point = np.array([roadpoints[0].x, roadpoints[0].y])
road_segment_dir = np.array([roadpoints[1].x, roadpoints[1].y]) - np.array(
[roadpoints[0].x, roadpoints[0].y])
assert np.linalg.norm(
road_segment_dir) < 1 # it should be able to fit inside the vehicle
road_segment_angle = np.arctan2(
road_segment_dir[1], road_segment_dir[0]) # atan2 is (y, x) not (x,y)
veh0.setHeading(road_segment_angle)
# place the vehicle so that the segment is contained inside of it
new_center = start_point + 0.5 * road_segment_dir
veh0.setPosition(new_center[0], new_center[1])
sim.step(1e-6)
cone = scenario.getConeImage(veh0, view_angle=2 * np.pi, head_angle=0.0)
plt.figure()
plt.imshow(cone)
plt.savefig('line_veh_check.png')
assert veh0.getCollided(
), 'vehicle0 should have collided after a road edge is placed inside it'
# place the vehicle on one of the points so that the road segment intersects with a vehicle edge
sim.reset()
scenario = sim.getScenario()
veh0 = scenario.getVehicles()[0]
veh0.setHeading(road_segment_angle)
veh_length = veh0.length
new_center += veh_length / 2 * road_segment_dir
veh0.setPosition(new_center[0], new_center[1])
sim.step(1e-6)
assert veh0.getCollided(
), 'vehicle0 should have collided since a road edge intersects it'
######################
# Waymo Scene Construction
######################
# check that initializing things to a different time leads to a different
# image
cfg['scenario'].update({'start_time': 20})
sim = Simulation(scenario_path=file_path, config=get_scenario_dict(cfg))
scenario = sim.getScenario()
img1 = scenario.getConeImage(scenario.getVehicles()[4], 120.0, 2 * np.pi,
0.0)
# check that initializing things with and without pedestrians leads to a different
# image
cfg['scenario'].update({'start_time': 20, 'allow_non_vehicles': False})
sim = Simulation(scenario_path=file_path, config=get_scenario_dict(cfg))
scenario = sim.getScenario()
img2 = scenario.getConeImage(scenario.getVehicles()[4], 120.0, 2 * np.pi,
0.0)
assert not np.isclose(np.sum(img1 - img2),
0.0), 'adding pedestrians should change the image'
# check a variety of nocturne functions
_ = scenario.getPedestrians()
_ = scenario.getCyclists()
# check that the padding function for visible state is returning the right thing.
visible_dict = scenario.visible_state(object=scenario.getVehicles()[0],
view_dist=80,
view_angle=120 * (np.pi / 180),
padding=True)
scenario_cfg = cfg['scenario']
assert scenario_cfg['max_visible_objects'] == visible_dict['objects'].shape[0], \
'visible dict padding returned {} objects but should have been \
{}'.format(visible_dict['objects'].shape[0], scenario_cfg['max_visible_objects'])
assert scenario_cfg['max_visible_road_points'] == visible_dict['road_points'].shape[0], \
'visible dict padding returned {} objects but should have been \
{}'.format(visible_dict['road_points'].shape[0], scenario_cfg['max_visible_road_points'])
assert scenario_cfg['max_visible_traffic_lights'] == visible_dict['traffic_lights'].shape[0], \
'visible dict padding returned {} objects but should have been \
{}'.format(visible_dict['traffic_lights'].shape[0], scenario_cfg['max_visible_traffic_lights'])
assert scenario_cfg['max_visible_stop_signs'] == visible_dict['stop_signs'].shape[0], \
'visible dict padding returned {} objects but should have been \
{}'.format(visible_dict['stop_signs'].shape[0], scenario_cfg['max_visible_stop_signs'])
def main():
"""See file docstring."""
test_scenario_functions()
if __name__ == '__main__':
main()
| 7,339 | 43.484848 | 107 | py |
E2E-TBSA | E2E-TBSA-master/main.py | import argparse
from model import *
from utils import *
from evals import evaluate
import random
import os
separator = '========================================================================================'
def run(dataset, model, params):
"""
run the experiment
:param dataset: dataset
:param model: constructed neural model
:param params: settings of hyper-parameter
:return:
"""
train_set, val_set, test_set = dataset
n_train = len(train_set)
best_val_ote_score, best_val_ts_score = -999.0, -999.0
best_pred_ote, best_pred_ts = [], []
best_iter = -1
ote_tag_vocab = params.ote_tag_vocab
ts_tag_vocab = params.ts_tag_vocab
tagging_schema = params.tagging_schema
n_epoch = params.n_epoch
init_lr = model.optimizer.learning_rate
decay_rate = params.lr_decay
for n_iter in range(n_epoch):
cur_lr = init_lr / (1 + decay_rate * n_iter)
model.optimizer.learning_rate = cur_lr
total_train_loss = 0.0
train_pred_ote, train_pred_ts = [], []
print("In Epoch %s / %s (current lr: %.4f):" % (n_iter + 1, n_epoch, cur_lr))
# shuffle the training set in each epoch
random.shuffle(train_set)
train_gold_ote = [x['ote_tags'] for x in train_set]
train_gold_ts = [x['ts_tags'] for x in train_set]
if tagging_schema == 'BIO':
train_gold_ote, train_gold_ts = bio2ot_batch(
ote_tags=train_gold_ote, ts_tags=train_gold_ts)
train_gold_ote, train_gold_ts = ot2bieos_batch(
ote_tags=train_gold_ote, ts_tags=train_gold_ts)
elif tagging_schema == 'OT':
train_gold_ote, train_gold_ts = ot2bieos_batch(
ote_tags=train_gold_ote, ts_tags=train_gold_ts)
for i in range(n_train):
loss, pred_ote_labels, pred_ts_labels = model.forward(x=train_set[i], is_train=True)
total_train_loss += loss
if pred_ote_labels:
# if pred_ts_labels is empty, skip this expression
train_pred_ote.append(label2tag(label_sequence=pred_ote_labels, tag_vocab=ote_tag_vocab))
train_pred_ts.append(label2tag(label_sequence=pred_ts_labels, tag_vocab=ts_tag_vocab))
# before evaluation, transform the output tag sequence to BIEOS tag sequence
if tagging_schema == 'BIO':
if train_pred_ote:
train_pred_ote = bio2ot_ote_batch(ote_tag_seqs=train_pred_ote)
train_pred_ote = ot2bieos_ote_batch(ote_tag_seqs=train_pred_ote)
train_pred_ts = bio2ot_ts_batch(ts_tag_seqs=train_pred_ts)
train_pred_ts = ot2bieos_ts_batch(ts_tag_seqs=train_pred_ts)
elif tagging_schema == 'OT':
if train_pred_ote:
train_pred_ote = ot2bieos_ote_batch(ote_tag_seqs=train_pred_ote)
train_pred_ts = ot2bieos_ts_batch(ts_tag_seqs=train_pred_ts)
# evaluation
ts_scores = evaluate_ts(gold_ts=train_gold_ts, pred_ts=train_pred_ts)
ts_macro_f1, ts_micro_p, ts_micro_r, ts_micro_f1 = ts_scores
if train_pred_ote:
ote_scores = evaluate_ote(gold_ot=train_gold_ote, pred_ot=train_pred_ote)
ote_p, ote_r, ote_f1 = ote_scores
print("\ttrain loss: %.4f, ote: f1: %.4f, ts: precision: %.4f, recall: %.4f, "
"micro-f1: %.4f" % (total_train_loss / n_train, ote_f1, ts_micro_p, ts_micro_r, ts_micro_f1))
else:
print("\ttrain_loss: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f" %
(total_train_loss / n_train, ts_micro_p, ts_micro_r, ts_micro_f1))
val_outputs = model.predict(dataset=val_set)
val_ote_scores, val_ts_scores = val_outputs[0], val_outputs[1]
val_ts_macro_f1, val_ts_micro_p, val_ts_micro_r, val_ts_micro_f1 = val_ts_scores
if val_ote_scores:
val_ote_p, val_ote_r, val_ote_f1 = val_ote_scores
print("\tval performance: ote: f1: %.4f, ts: precision: %.4f, recall: %.4f, "
"micro-f1: %.4f" % (val_ote_f1, val_ts_micro_p, val_ts_micro_r, val_ts_micro_f1))
else:
print("\tval performance: ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
% (val_ts_micro_p, val_ts_micro_r, val_ts_micro_f1))
if val_ts_micro_f1 > best_val_ts_score:
best_val_ts_score = val_ts_micro_f1
test_outputs = model.predict(dataset=test_set)
test_ote_scores, test_ts_scores = test_outputs[0], test_outputs[1]
if len(test_outputs) > 2:
best_pred_ote, best_pred_ts = test_outputs[2], test_outputs[3]
best_iter = n_iter + 1
if test_ote_scores:
print("\tExceed: test performance: ote: f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f" % (test_ote_scores[2], test_ts_scores[1], test_ts_scores[2], test_ts_scores[3]))
else:
print("\tExceed: test performance: ts: precision: %.4f, recall: %.4f, micro-f1: %.4f"
% (test_ts_scores[1], test_ts_scores[2], test_ts_scores[3]))
model_path = './models/%s_%.6lf.model' % (params.ds_name, test_ts_scores[3])
print("Save the model to %s..." % model_path)
if not os.path.exists('./models'):
os.mkdir('./models')
model.pc.save(model_path)
if test_ote_scores:
final_res_string = "\nBest results obtained at %s: ote f1: %.4f, ts: precision: %.4f, recall: %.4f, " \
"ts micro-f1: %.4f" % (best_iter, test_ote_scores[2], test_ts_scores[1],
test_ts_scores[2], test_ts_scores[3])
else:
final_res_string = "\nBest results obtained at %s, ts: precision: %.4f, recall: %.4f, " \
"ts micro-f1: %.4f" % (best_iter, test_ts_scores[1],
test_ts_scores[2], test_ts_scores[3])
if best_pred_ote:
n_sample = len(test_set)
gold_ote = [x['ote_tags'] for x in test_set]
gold_ts = [x['ts_tags'] for x in test_set]
if model.tagging_schema == 'BIO':
gold_ote, gold_ts = bio2ot_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
elif model.tagging_schema == 'OT':
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
output_lines = ['Dataset: %s\n' % params.ds_name, 'Parameter settings: \n']
params_dict = vars(params)
for k in params_dict:
if k == 'char_vocab' or k == 'vocab':
continue
else:
v = params_dict[k]
output_lines.append('\t%s: %s\n' % (k, v))
output_lines.append("==============================================\n\n")
for i in range(n_sample):
ote_seq = best_pred_ote[i]
ts_seq = best_pred_ts[i]
w_seq = test_set[i]['words']
ote_seq_gold = gold_ote[i]
ts_seq_gold = gold_ts[i]
assert len(ote_seq) == len(ts_seq) == len(w_seq)
for j in range(len(ote_seq)):
word = w_seq[j]
ote_tag = ote_seq[j]
ote_tag_gold = ote_seq_gold[j]
ts_tag = ts_seq[j]
ts_tag_gold = ts_seq_gold[j]
output_lines.append('%s\t%s\t%s\t%s\t%s\n' % (word, ote_tag, ote_tag_gold, ts_tag, ts_tag_gold))
# use empty lines as the separator
output_lines.append('\n')
if not os.path.exists('./predictions'):
os.mkdir('./predictions')
model_path = './predictions/%s_%.6lf.txt' % (params.ds_name, test_ts_scores[3])
with open(model_path, 'w+') as fp:
fp.writelines(output_lines)
print(final_res_string)
return final_res_string, model_path
if __name__ == '__main__':
# random_seed = 1234
# random.seed(random_seed)
parser = argparse.ArgumentParser(description="Open Domain ABSA")
parser.add_argument("-ds_name", type=str, default='rest14', help="dataset name")
# dimension of LSTM hidden representations
parser.add_argument("-dim_char", type=int, default=30, help="dimension of char embeddings")
parser.add_argument("-dim_char_h", type=int, default=50, help="dimension of char hidden representations")
parser.add_argument("-dim_ote_h", type=int, default=50, help="hidden dimension for opinion target extraction")
parser.add_argument("-dim_ts_h", type=int, default=50, help="hidden dimension for targeted sentiment")
parser.add_argument("-input_win", type=int, default=3, help="window size of input")
parser.add_argument("-stm_win", type=int, default=3, help="window size of OE component")
parser.add_argument("-optimizer", type=str, default="sgd", help="optimizer (or, trainer)")
parser.add_argument("-n_epoch", type=int, default=40, help="number of training epoch")
parser.add_argument("-dropout", type=float, default=0.5, help="dropout rate for final representations")
parser.add_argument("-emb_name", type=str, default="glove_840B", help="name of word embedding")
# Note: tagging schema is OT in the original data record
parser.add_argument("-tagging_schema", type=str, default="BIEOS", help="tagging schema")
parser.add_argument("-rnn_type", type=str, default="LSTM",
help="type of rnn unit, currently only LSTM and GRU are supported")
parser.add_argument("-sgd_lr", type=float, default=0.1,
help="learning rate for sgd, only used when the optimizer is sgd")
parser.add_argument("-clip_grad", type=float, default=5.0, help="maximum gradients")
parser.add_argument("-lr_decay", type=float, default=0.05, help="decay rate of learning rate")
parser.add_argument("-use_char", type=int, default=0, help="if use character-level word embeddings")
parser.add_argument('-epsilon', type=float, default=0.5, help="maximum proportions of the boundary-based scores")
dy_seed = 1314159
random_seed = 1234
#random_seed = 1972
args = parser.parse_args()
if args.ds_name == 'laptop14':
random_seed = 13456
if args.ds_name.startswith("twitter"):
random_seed = 7788
args.dynet_seed = dy_seed
args.random_seed = random_seed
random.seed(random_seed)
emb_name = args.emb_name
emb2path = {
'glove_6B': '/projdata9/info_fil/lixin/Research/OTE/embeddings/glove_6B_300d.txt',
'glove_42B': '/projdata9/info_fil/lixin/Research/OTE/embeddings/glove_42B_300d.txt',
'glove_840B': '/projdata9/info_fil/lixin/Research/OTE/embeddings/glove_840B_300d.txt',
'glove_27B100d': '/projdata9/info_fil/lixin/Research/OTE/embeddings/glove_twitter_27B_100d.txt',
'glove_27B200d': '/projdata9/info_fil/lixin/Research/OTE/embeddings/glove_twitter_27B_200d.txt',
'yelp_rest1': '/projdata9/info_fil/lixin/Research/yelp/yelp_vec_200_2_win5_sent.txt',
'yelp_rest2': '/projdata9/info_fil/lixin/Research/yelp/yelp_vec_200_2_new.txt',
'amazon_laptop': '/projdata9/info_fil/lixin/Resources/amazon_full/vectors/amazon_laptop_vec_200_5.txt'
}
emb_path = emb2path[emb_name]
input_win = args.input_win
stm_win = args.stm_win
ds_name = args.ds_name
tagging_schema = args.tagging_schema
# build dataset
train, val, test, vocab, char_vocab, ote_tag_vocab, ts_tag_vocab = build_dataset(
ds_name=ds_name, input_win=input_win,
tagging_schema=tagging_schema, stm_win=stm_win
)
# obtain the pre-trained word embeddings
embeddings = load_embeddings(path=emb_path, vocab=vocab, ds_name=ds_name, emb_name=emb_name)
# obtain the pre-trained character embeddings
char_embeddings = None
# convert the datasets to the conll format and write them back to the specified folder
#to_conll(train=train, val=val, test=test, ds_name=ds_name, embeddings=embeddings, vocab=vocab)
args.dim_w = len(embeddings[0])
#args.dim_char = len(char_embeddings[0])
args.dim_char = 10
args.ote_tag_vocab = ote_tag_vocab
args.ts_tag_vocab = ts_tag_vocab
if ds_name.startswith("twitter"):
args.epsilon = 0.8
# content need to write to the log file
log_lines = [separator+"\n"]
#print(args)
print(separator)
for arg in vars(args):
arg_string = "\t-%s: %s" % (arg, str(getattr(args, arg)))
print(arg_string)
log_lines.append(arg_string+"\n")
args.char_vocab = char_vocab
model = Model(params=args, vocab=vocab, embeddings=embeddings, char_embeddings=char_embeddings)
mode = 'train-test'
if mode == 'train-test':
final_res_string, model_path = run(dataset=[train, val, test], model=model, params=args)
log_lines.append(final_res_string + "\n")
log_lines.append("Best model is saved at: %s\n" % model_path)
log_lines.append(separator + "\n\n")
print(separator)
if not os.path.exists('log'):
os.mkdir('log')
with open('log/%s.txt' % ds_name, 'a') as fp:
fp.writelines(log_lines)
else:
model.decoding(dataset=test, model_name='lstm_cascade_laptop14_0.573138.model')
| 13,387 | 47.683636 | 197 | py |
E2E-TBSA | E2E-TBSA-master/utils.py | import string
from nltk import ngrams
import numpy as np
# DO NOT change the random seed, otherwise, the train-test split will be inconsistent with those in the baselines
np.random.seed(7894)
import os
import pickle
def ot2bio_ote(ote_tag_sequence):
"""
ot2bio function for ote tag sequence
:param ote_tag_sequence:
:return:
"""
new_ote_sequence = []
n_tag = len(ote_tag_sequence)
prev_ote_tag = '$$$'
for i in range(n_tag):
cur_ote_tag = ote_tag_sequence[i]
assert cur_ote_tag == 'O' or cur_ote_tag == 'T'
if cur_ote_tag == 'O':
new_ote_sequence.append(cur_ote_tag)
else:
# cur_ote_tag is T
if prev_ote_tag == 'T':
new_ote_sequence.append('I')
else:
# cur tag is at the beginning of the opinion target
new_ote_sequence.append('B')
prev_ote_tag = cur_ote_tag
return new_ote_sequence
def ot2bio_ts(ts_tag_sequence):
"""
ot2bio function for ts tag sequence
:param ts_tag_sequence:
:return:
"""
new_ts_sequence = []
n_tag = len(ts_tag_sequence)
prev_pos = '$$$'
for i in range(n_tag):
cur_ts_tag = ts_tag_sequence[i]
if cur_ts_tag == 'O':
new_ts_sequence.append('O')
cur_pos = 'O'
else:
# current tag is subjective tag, i.e., cur_pos is T
# print(cur_ts_tag)
cur_pos, cur_sentiment = cur_ts_tag.split('-')
if cur_pos == prev_pos:
# prev_pos is T
new_ts_sequence.append('I-%s' % cur_sentiment)
else:
# prev_pos is O
new_ts_sequence.append('B-%s' % cur_sentiment)
prev_pos = cur_pos
return new_ts_sequence
def ot2bio(ote_tag_sequence, ts_tag_sequence):
"""
perform ot--->bio for both ote tag sequence and ts tag sequence
:param ote_tag_sequence: input tag sequence of opinion target extraction
:param ts_tag_sequence: input tag sequence of targeted sentiment
:return:
"""
new_ote_sequence = ot2bio_ote(ote_tag_sequence=ote_tag_sequence)
new_ts_sequence = ot2bio_ts(ts_tag_sequence=ts_tag_sequence)
assert len(new_ts_sequence) == len(ts_tag_sequence)
assert len(new_ote_sequence) == len(ote_tag_sequence)
return new_ote_sequence, new_ts_sequence
def ot2bio_ote_batch(ote_tag_seqs):
"""
batch version of function ot2bio_ote
:param ote_tags:
:return:
"""
new_ote_tag_seqs = []
n_seqs = len(ote_tag_seqs)
for i in range(n_seqs):
new_ote_seq = ot2bio_ote(ote_tag_sequence=ote_tag_seqs[i])
new_ote_tag_seqs.append(new_ote_seq)
return new_ote_tag_seqs
def ot2bio_ts_batch(ts_tag_seqs):
"""
batch version of function ot2bio_ts
:param ts_tag_seqs:
:return:
"""
new_ts_tag_seqs = []
n_seqs = len(ts_tag_seqs)
for i in range(n_seqs):
new_ts_seq = ot2bio_ts(ts_tag_sequence=ts_tag_seqs[i])
new_ts_tag_seqs.append(new_ts_seq)
return new_ts_tag_seqs
def ot2bio_batch(ote_tags, ts_tags):
"""
batch version of function ot2bio
:param ote_tags: a batch of ote tag sequence
:param ts_tags: a batch of ts tag sequence
:return:
"""
new_ote_tags, new_ts_tags = [], []
assert len(ote_tags) == len(ts_tags)
n_seqs = len(ote_tags)
for i in range(n_seqs):
ote, ts = ot2bio(ote_tag_sequence=ote_tags[i], ts_tag_sequence=ts_tags[i])
new_ote_tags.append(ote)
new_ts_tags.append(ts)
return new_ote_tags, new_ts_tags
def ot2bieos_ote(ote_tag_sequence):
"""
ot2bieos function for ote task
:param ote_tag_sequence:
:return:
"""
n_tags = len(ote_tag_sequence)
new_ote_sequence = []
prev_ote_tag = '$$$'
for i in range(n_tags):
cur_ote_tag = ote_tag_sequence[i]
if cur_ote_tag == 'O':
new_ote_sequence.append('O')
else:
# cur_ote_tag is T
if prev_ote_tag != cur_ote_tag:
# prev_ote_tag is O, new_cur_tag can only be B or S
if i == n_tags - 1:
new_ote_sequence.append('S')
elif ote_tag_sequence[i + 1] == cur_ote_tag:
new_ote_sequence.append('B')
elif ote_tag_sequence[i + 1] != cur_ote_tag:
new_ote_sequence.append('S')
else:
raise Exception("Invalid ner tag value: %s" % cur_ote_tag)
else:
# prev_tag is T, new_cur_tag can only be I or E
if i == n_tags - 1:
new_ote_sequence.append('E')
elif ote_tag_sequence[i + 1] == cur_ote_tag:
# next_tag is T
new_ote_sequence.append('I')
elif ote_tag_sequence[i + 1] != cur_ote_tag:
# next_tag is O
new_ote_sequence.append('E')
else:
raise Exception("Invalid ner tag value: %s" % cur_ote_tag)
prev_ote_tag = cur_ote_tag
return new_ote_sequence
def ot2bieos_ts(ts_tag_sequence):
"""
ot2bieos function for ts task
:param ts_tag_sequence: tag sequence for targeted sentiment
:return:
"""
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = '$$$'
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if cur_ts_tag == 'O':
new_ts_sequence.append('O')
cur_pos = 'O'
else:
cur_pos, cur_sentiment = cur_ts_tag.split('-')
# cur_pos is T
if cur_pos != prev_pos:
# prev_pos is O and new_cur_pos can only be B or S
if i == n_tags - 1:
new_ts_sequence.append('S-%s' % cur_sentiment)
else:
next_ts_tag = ts_tag_sequence[i + 1]
if next_ts_tag == 'O':
new_ts_sequence.append('S-%s' % cur_sentiment)
else:
new_ts_sequence.append('B-%s' % cur_sentiment)
else:
# prev_pos is T and new_cur_pos can only be I or E
if i == n_tags - 1:
new_ts_sequence.append('E-%s' % cur_sentiment)
else:
next_ts_tag = ts_tag_sequence[i + 1]
if next_ts_tag == 'O':
new_ts_sequence.append('E-%s' % cur_sentiment)
else:
new_ts_sequence.append('I-%s' % cur_sentiment)
prev_pos = cur_pos
return new_ts_sequence
def ot2bieos(ote_tag_sequence, ts_tag_sequence):
"""
perform ot-->bieos for both ote tag and ts tag sequence
:param ote_tag_sequence: input tag sequence of opinion target extraction
:param ts_tag_sequence: input tag sequence of targeted sentiment
:return:
"""
# new tag sequences of opinion target extraction and targeted sentiment
new_ote_sequence = ot2bieos_ote(ote_tag_sequence=ote_tag_sequence)
new_ts_sequence = ot2bieos_ts(ts_tag_sequence=ts_tag_sequence)
assert len(ote_tag_sequence) == len(new_ote_sequence)
assert len(ts_tag_sequence) == len(new_ts_sequence)
return new_ote_sequence, new_ts_sequence
def ot2bieos_ote_batch(ote_tag_seqs):
"""
batch version of function ot2bieos_ote
:param ote_tags:
:return:
"""
new_ote_tag_seqs = []
n_seqs = len(ote_tag_seqs)
for i in range(n_seqs):
new_ote_seq = ot2bieos_ote(ote_tag_sequence=ote_tag_seqs[i])
new_ote_tag_seqs.append(new_ote_seq)
return new_ote_tag_seqs
def ot2bieos_ts_batch(ts_tag_seqs):
"""
batch version of function ot2bieos_ts
:param ts_tag_seqs:
:return:
"""
new_ts_tag_seqs = []
n_seqs = len(ts_tag_seqs)
for i in range(n_seqs):
new_ts_seq = ot2bieos_ts(ts_tag_sequence=ts_tag_seqs[i])
new_ts_tag_seqs.append(new_ts_seq)
return new_ts_tag_seqs
def ot2bieos_batch(ote_tags, ts_tags):
"""
batch version of function ot2bieos
:param ote_tags: a batch of ote tag sequence
:param ts_tags: a batch of ts tag sequence
:return:
:param ote_tags:
:param ts_tags:
:return:
"""
new_ote_tags, new_ts_tags = [], []
assert len(ote_tags) == len(ts_tags)
n_seqs = len(ote_tags)
for i in range(n_seqs):
ote, ts = ot2bieos(ote_tag_sequence=ote_tags[i], ts_tag_sequence=ts_tags[i])
new_ote_tags.append(ote)
new_ts_tags.append(ts)
return new_ote_tags, new_ts_tags
def bio2ot_ote(ote_tag_sequence):
"""
perform bio-->ot for ote tag sequence
:param ote_tag_sequence:
:return:
"""
new_ote_sequence = []
n_tags = len(ote_tag_sequence)
for i in range(n_tags):
ote_tag = ote_tag_sequence[i]
if ote_tag == 'B' or ote_tag == 'I':
new_ote_sequence.append('T')
else:
new_ote_sequence.append('I')
return new_ote_sequence
def bio2ot_ts(ts_tag_sequence):
"""
perform bio-->ot for ts tag sequence
:param ts_tag_sequence:
:return:
"""
new_ts_sequence = []
n_tags = len(ts_tag_sequence)
for i in range(n_tags):
ts_tag = ts_tag_sequence[i]
if ts_tag == 'O':
new_ts_sequence.append('O')
else:
pos, sentiment = ts_tag.split('-')
new_ts_sequence.append('T-%s' % sentiment)
return new_ts_sequence
def bio2ot(ote_tag_sequence, ts_tag_sequence):
"""
perform bio-->ot for both ote and ts tag sequence
:param ote_tag_sequence: tag sequence for opinion target extraction
:param ts_tag_sequence: tag sequence for targeted sentiment
:return:
"""
assert len(ote_tag_sequence) == len(ts_tag_sequence)
new_ote_sequence = bio2ot_ote(ote_tag_sequence=ote_tag_sequence)
new_ts_sequence = bio2ot_ts(ts_tag_sequence=ts_tag_sequence)
assert len(new_ote_sequence) == len(ote_tag_sequence)
assert len(new_ts_sequence) == len(ts_tag_sequence)
return new_ote_sequence, new_ts_sequence
def bio2ot_ote_batch(ote_tag_seqs):
"""
batch version of function bio2ot_ote
:param ote_tag_seqs: ote tag sequences
:return:
"""
new_ote_tag_seqs = []
n_seqs = len(ote_tag_seqs)
for i in range(n_seqs):
new_ote_seq = bio2ot_ote(ote_tag_sequence=ote_tag_seqs[i])
new_ote_tag_seqs.append(new_ote_seq)
return new_ote_tag_seqs
def bio2ot_ts_batch(ts_tag_seqs):
"""
batch version of function bio2ot_ts
:param ts_tag_seqs:
:return:
"""
new_ts_tag_seqs = []
n_seqs = len(ts_tag_seqs)
for i in range(n_seqs):
new_ts_seq = bio2ot_ts(ts_tag_sequence=ts_tag_seqs[i])
new_ts_tag_seqs.append(new_ts_seq)
return new_ts_tag_seqs
def bio2ot_batch(ote_tags, ts_tags):
"""
batch version of function bio2ot
:param ote_tags: a batch of ote tag sequence
:param ts_tags: a batch of ts tag sequence
:return:
"""
new_ote_tags, new_ts_tags = [], []
assert len(ote_tags) == len(ts_tags)
n_seqs = len(ote_tags)
for i in range(n_seqs):
ote, ts = bio2ot(ote_tag_sequence=ote_tags[i], ts_tag_sequence=ts_tags[i])
new_ote_tags.append(ote)
new_ts_tags.append(ts)
return new_ote_tags, new_ts_tags
# TODO
def bieos2ot(tag_sequence):
"""
transform BIEOS tag sequence to OT tag sequence
:param tag_sequence: input tag sequence
:return:
"""
new_sequence = []
for t in tag_sequence:
assert t == 'B' or t == 'I' or t == 'E' or t == 'O' or t == 'S'
if t == 'O':
new_sequence.append(t)
else:
new_sequence.append('T')
assert len(new_sequence) == len(tag_sequence)
return new_sequence
def get_vocab(train_set, test_set):
"""
build the vocabulary of the whole dataset
:param train_set:
:param test_set:
:return:
"""
vocab = {'PUNCT': 0, 'PADDING': 1}
inv_vocab = {0: 'PUNCT', 1: 'PADDING'}
wid = 2
for record in train_set + test_set:
assert 'words' in record
words = record['words']
for w in words:
if w not in vocab:
vocab[w] = wid
inv_vocab[wid] = w
wid += 1
print("Find %s different words in the dataset" % len(vocab))
char_string = ''
for w in vocab:
char_string += w
chars = list(set(char_string))
cid, char_vocab = 0, {}
for ch in chars:
if ch not in char_vocab:
char_vocab[ch] = cid
cid += 1
print("Find %s different chars in the dataset" % len(char_vocab))
return vocab, char_vocab
def read_lexicon():
"""
read sentiment lexicon from the disk
:return:
"""
path = 'mpqa_full.txt'
sent_lexicon = {}
with open(path) as fp:
for line in fp:
word, polarity = line.strip().split('\t')
if word not in sent_lexicon:
sent_lexicon[word] = polarity
return sent_lexicon
def read_data(path):
"""
read data from the specified path
:param path: path of dataset
:return:
"""
dataset = []
with open(path, encoding='UTF-8') as fp:
for line in fp:
record = {}
sent, tag_string = line.strip().split('####')
record['sentence'] = sent
word_tag_pairs = tag_string.split(' ')
# tag sequence for targeted sentiment
ts_tags = []
# tag sequence for opinion target extraction
ote_tags = []
# word sequence
words = []
for item in word_tag_pairs:
# valid label is: O, T-POS, T-NEG, T-NEU
eles = item.split('=')
if len(eles) == 2:
word, tag = eles
elif len(eles) > 2:
tag = eles[-1]
word = (len(eles) - 2) * "="
if word not in string.punctuation:
# lowercase the words
words.append(word.lower())
else:
# replace punctuations with a special token
words.append('PUNCT')
if tag == 'O':
ote_tags.append('O')
ts_tags.append('O')
elif tag == 'T-POS':
ote_tags.append('T')
ts_tags.append('T-POS')
elif tag == 'T-NEG':
ote_tags.append('T')
ts_tags.append('T-NEG')
elif tag == 'T-NEU':
ote_tags.append('T')
ts_tags.append('T-NEU')
else:
raise Exception('Invalid tag %s!!!' % tag)
record['words'] = words.copy()
record['ote_raw_tags'] = ote_tags.copy()
record['ts_raw_tags'] = ts_tags.copy()
dataset.append(record)
print("Obtain %s records from %s" % (len(dataset), path))
return dataset
def set_wid(dataset, vocab, win=1):
"""
set wid field for the dataset
:param dataset: dataset
:param vocab: vocabulary
:param win: context window size, for window-based input, should be an odd number
:return: dataset with field wid
"""
n_records = len(dataset)
for i in range(n_records):
words = dataset[i]['words']
lm_labels = []
# set labels for the auxiliary language modeling task
for w in words:
lm_labels.append(vocab[w])
dataset[i]['lm_labels'] = lm_labels.copy()
n_padded_words = win // 2
pad_left = ['PADDING' for _ in range(n_padded_words)]
pad_right = ['PADDING' for _ in range(n_padded_words)]
padded_words = pad_left + words + pad_right
# the window-based input
win_input = list(ngrams(padded_words, win))
assert len(win_input) == len(words)
n_grams = []
for t in win_input:
n_grams.append(t)
wids = [[vocab[w] for w in ngram] for ngram in n_grams]
dataset[i]['wids'] = wids.copy()
return dataset
def set_cid(dataset, char_vocab):
"""
set cid field for the records in the dataset
:param dataset: dataset
:param char_vocab: vocabulary of character
:return:
"""
n_records = len(dataset)
cids = []
for i in range(n_records):
words = dataset[i]['words']
cids = []
for w in words:
cids.append([char_vocab[ch] for ch in list(w)])
dataset[i]['cids'] = cids.copy()
return dataset
def set_labels(dataset, tagging_schema='BIO'):
"""
set ote_label and ts_label for the dataset
:param dataset: dataset without ote_label and ts_label fields
:param tagging_schema: tagging schema of ote_tag and ts_tag
:return:
"""
if tagging_schema == 'OT':
ote_tag_vocab = {'O': 0, 'T': 1}
ts_tag_vocab = {'O': 0, 'T-POS': 1, 'T-NEG': 2, 'T-NEU': 3}
elif tagging_schema == 'BIO':
ote_tag_vocab = {'O': 0, 'B': 1, 'I': 2}
ts_tag_vocab = {'O': 0, 'B-POS': 1, 'I-POS': 2, 'B-NEG': 3, 'I-NEG': 4,
'B-NEU': 5, 'I-NEU': 6}
elif tagging_schema == 'BIEOS':
ote_tag_vocab = {'O': 0, 'B': 1, 'I': 2, 'E': 3, 'S': 4}
ts_tag_vocab = {'O': 0, 'B-POS': 1, 'I-POS': 2, 'E-POS': 3, 'S-POS': 4,
'B-NEG': 5, 'I-NEG': 6, 'E-NEG': 7, 'S-NEG': 8,
'B-NEU': 9, 'I-NEU': 10, 'E-NEU': 11, 'S-NEU': 12}
else:
raise Exception("Invalid tagging schema %s" % tagging_schema)
n_records = len(dataset)
for i in range(n_records):
ote_tags = dataset[i]['ote_raw_tags']
ts_tags = dataset[i]['ts_raw_tags']
if tagging_schema == 'OT':
pass
elif tagging_schema == 'BIO':
ote_tags, ts_tags = ot2bio(ote_tag_sequence=ote_tags, ts_tag_sequence=ts_tags)
elif tagging_schema == 'BIEOS':
ote_tags, ts_tags = ot2bieos(ote_tag_sequence=ote_tags, ts_tag_sequence=ts_tags)
else:
raise Exception("Invalid tagging schema %s" % tagging_schema)
ote_labels = [ote_tag_vocab[t] for t in ote_tags]
ts_labels = [ts_tag_vocab[t] for t in ts_tags]
dataset[i]['ote_tags'] = ote_tags.copy()
dataset[i]['ts_tags'] = ts_tags.copy()
dataset[i]['ote_labels'] = ote_labels.copy()
dataset[i]['ts_labels'] = ts_labels.copy()
return dataset, ote_tag_vocab, ts_tag_vocab
def set_lm_labels(dataset, vocab, stm_lex, stm_win=3):
"""
set labels of bi-directional language modeling and sentiment-aware language modeling
:param dataset: dataset
:param vocab: vocabulary
:param stm_lex: sentiment lexicon
:param stm_win: window size (i.e., length) of sentiment context
:return:
"""
n_records = len(dataset)
for i in range(n_records):
words = dataset[i]['words']
# labels of language modeling and sentiment aware language modeling
lm_labels_f, lm_labels_b = [], []
n_w = len(words)
# language modeling in forward direction
for j in range(n_w):
if j == n_w - 1:
next_word = 'PADDING'
else:
next_word = words[j+1]
lm_labels_f.append(vocab[next_word])
for j in range(n_w-1, -1, -1):
if j == 0:
next_word = 'PADDING'
else:
next_word = words[j-1]
lm_labels_b.append(vocab[next_word])
dataset[i]['lm_labels_f'] = lm_labels_f.copy()
dataset[i]['lm_labels_b'] = lm_labels_b.copy()
# sentiment aware language modeling
stm_lm_labels = []
for j in range(n_w):
# left boundary of sentimental context
stm_ctx_lb = j - stm_win
if stm_ctx_lb < 0:
stm_ctx_lb = 0
stm_ctx_rb = j + stm_win + 1
left_ctx = words[stm_ctx_lb:j]
right_ctx = words[j+1:stm_ctx_rb]
stm_ctx = left_ctx + right_ctx
flag = False
for w in stm_ctx:
if w in stm_lex:
flag = True
break
if flag:
stm_lm_labels.append(1)
else:
stm_lm_labels.append(0)
dataset[i]['stm_lm_labels'] = stm_lm_labels.copy()
return dataset
def build_dataset(ds_name, input_win=1, tagging_schema='BIO', stm_win=1):
"""
build dataset for model training, development and inference
:param ds_name: dataset name
:param input_win: window size input
:param tagging_schema: tagging schema
:param stm_win: window size of context for the OE component
:return:
"""
# read mpqa sentiment lexicon
stm_lex = read_lexicon()
# paths of training and testing dataset
train_path = './data/%s_train.txt' % ds_name
test_path = './data/%s_test.txt' % ds_name
# loaded datasets
train_set = read_data(path=train_path)
test_set = read_data(path=test_path)
vocab, char_vocab = get_vocab(train_set=train_set, test_set=test_set)
train_set = set_wid(dataset=train_set, vocab=vocab, win=input_win)
test_set = set_wid(dataset=test_set, vocab=vocab, win=input_win)
train_set = set_cid(dataset=train_set, char_vocab=char_vocab)
test_set = set_cid(dataset=test_set, char_vocab=char_vocab)
train_set, ote_tag_vocab, ts_tag_vocab = set_labels(dataset=train_set, tagging_schema=tagging_schema)
test_set, _, _ = set_labels(dataset=test_set, tagging_schema=tagging_schema)
train_set = set_lm_labels(dataset=train_set, vocab=vocab, stm_lex=stm_lex, stm_win=stm_win)
test_set = set_lm_labels(dataset=test_set, vocab=vocab, stm_lex=stm_lex, stm_win=stm_win)
n_train = len(train_set)
# use 10% training data for dev experiment
n_val = int(n_train * 0.1)
# generate a uniform random sample from np.range(n_train) of size n_val
# This is equivalent to np.random.permutation(np.arange(n_train))[:n_val]
val_sample_ids = np.random.choice(n_train, n_val, replace=False)
print("The first 15 validation samples:", val_sample_ids[:15])
val_set, tmp_train_set = [], []
for i in range(n_train):
record = train_set[i]
if i in val_sample_ids:
val_set.append(record)
else:
tmp_train_set.append(record)
train_set = [r for r in tmp_train_set]
return train_set, val_set, test_set, vocab, char_vocab, ote_tag_vocab, ts_tag_vocab
def load_embeddings(path, vocab, ds_name, emb_name):
"""
load pre-trained word embeddings from the disk
:param path: absolute path of the embedding files
:param vocab: vocabulary
:param ds_name: name of dataset
:param emb_name: name of word embedding
:return:
"""
# by default, we employ GloVe 840B word embeddings
pkl = './embeddings/%s_%s.pkl' % (ds_name, emb_name)
if os.path.exists(pkl):
print("Load embeddings from existing pkl file %s..." % pkl)
# word embeddings weights have been loaded
embeddings = pickle.load(open(pkl, 'rb'))
else:
print("Load embedding from %s..." % path)
raw_embeddings = {}
with open(path) as fp:
for line in fp:
eles = line.strip().split(' ')
word = eles[0]
if word in vocab:
raw_embeddings[word] = eles[1:]
dim_w = len(raw_embeddings['the'])
n_words = len(vocab)
embeddings = np.zeros(shape=(n_words, dim_w))
for w in vocab:
wid = vocab[w]
if w in raw_embeddings:
embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])
else:
# for OOV words, add random initialization
embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)
print("Find %s word embeddings..." % len(embeddings))
if not os.path.exists('./embeddings'):
os.mkdir('./embeddings')
emb_path = './embeddings/%s_%s.pkl' % (ds_name, emb_name)
# write the embedding weights back to the disk
pickle.dump(embeddings, open(emb_path, 'wb'))
embeddings = np.array(embeddings, dtype='float32')
return embeddings
def load_char_embeddings(char_vocab, ds_name):
"""
load pre-trained character-level embeddings
:param char_vocab: vocabulary of character
:param ds_name: name of dataset
:return:
"""
n_char = len(char_vocab)
pkl = './embeddings/%s_char.pkl' % ds_name
if os.path.exists(pkl):
print("Load character embeddings from %s..." % pkl)
embeddings = pickle.load(open(pkl, 'rb'))
else:
emb_path = './embeddings/char-embeddings.txt'
print("Load character embeddings from %s..." % emb_path)
raw_embeddings = {}
n_found = 0
with open(emb_path) as fp:
for line in fp:
eles = line.strip().split()
ch = eles[0]
vec = [float(ele) for ele in eles[1:]]
if ch not in raw_embeddings:
raw_embeddings[ch] = vec
dim_ch = len(raw_embeddings['A'])
embeddings = np.zeros(shape=(n_char, dim_ch))
for ch in char_vocab:
cid = char_vocab[ch]
if ch in raw_embeddings:
embeddings[cid] = np.array(raw_embeddings[ch])
n_found += 1
else:
embeddings[cid] = np.random.uniform(-0.25, 0.25, dim_ch)
print("Find %s chars in pre-trained character embeddings..." % n_found)
embeddings = np.array(embeddings, dtype='float32')
pickle.dump(embeddings, open(pkl, 'wb'))
return embeddings
def label2tag(label_sequence, tag_vocab):
"""
convert label sequence to tag sequence
:param label_sequence: label sequence
:param tag_vocab: tag vocabulary, i.e., mapping between tag and label
:return:
"""
inv_tag_vocab = {}
for tag in tag_vocab:
label = tag_vocab[tag]
inv_tag_vocab[label] = tag
tag_sequence = []
n_tag = len(tag_vocab)
for l in label_sequence:
if l in inv_tag_vocab:
tag_sequence.append(inv_tag_vocab[l])
elif l == n_tag or l == n_tag + 1:
tag_sequence.append("O")
else:
raise Exception("Invalid label %s" % l)
return tag_sequence
def tag2predictions(ote_tag_sequence, ts_tag_sequence):
"""
transform BIEOS tag sequence to the list of aspects together with sentiment
:param ote_tag_sequence: tag sequence for opinion target extraction
:param ts_tag_sequence: tag sequence for targeted sentiment
:return: a list of aspects/entities
"""
n_tag = len(ote_tag_sequence)
# opinion target sequence and targeted sentiment sequence
ot_sequence, ts_sequence = [], []
beg, end = -1, -1
for i in range(n_tag):
tag = ote_tag_sequence[i]
if tag == 'S':
ot_sequence.append((i, i))
elif tag == 'B':
beg = i
elif tag == 'E':
end = i
if end > beg and beg != -1:
ot_sequence.append((beg, end))
beg, end = -1, -1
sentiments = []
beg, end = -1, -1
for i in range(n_tag):
ts_tag = ts_tag_sequence[i]
# current position and sentiment
eles = ts_tag.split('-')
if len(eles) == 2:
pos, sentiment = eles
else:
pos, sentiment = 'O', 'O'
if sentiment != 'O':
# current word is a subjective word
sentiments.append(sentiment)
if pos == 'S':
# singleton
ts_sequence.append((i, i, sentiments[0]))
sentiments = []
elif pos == 'B':
beg = i
elif pos == 'E':
end = i
# schema1: only the consistent sentiment tags are accepted
# that is, all of the sentiment tags are the same
if end > beg > -1 and len(set(sentiments)) == 1:
ts_sequence.append((beg, end, sentiment))
sentiments = []
beg, end = -1, -1
# schema2: only consider the sentiment at the beginning of the aspect span
# if end > beg > -1:
# ts_sequence.append((beg, end, sentiments[0]))
# sentiments = []
# beg, end = -1, -1
return ot_sequence, ts_sequence
def tag2ot(ote_tag_sequence):
"""
transform ote tag sequence to a sequence of opinion target
:param ote_tag_sequence: tag sequence for ote task
:return:
"""
n_tags = len(ote_tag_sequence)
ot_sequence = []
beg, end = -1, -1
for i in range(n_tags):
tag = ote_tag_sequence[i]
if tag == 'S':
ot_sequence.append((i, i))
elif tag == 'B':
beg = i
elif tag == 'E':
end = i
if end > beg > -1:
ot_sequence.append((beg, end))
beg, end = -1, -1
return ot_sequence
def tag2ts(ts_tag_sequence):
"""
transform ts tag sequence to targeted sentiment
:param ts_tag_sequence: tag sequence for ts task
:return:
"""
n_tags = len(ts_tag_sequence)
ts_sequence, sentiments = [], []
beg, end = -1, -1
for i in range(n_tags):
ts_tag = ts_tag_sequence[i]
# current position and sentiment
eles = ts_tag.split('-')
if len(eles) == 2:
pos, sentiment = eles
else:
pos, sentiment = 'O', 'O'
if sentiment != 'O':
# current word is a subjective word
sentiments.append(sentiment)
if pos == 'S':
# singleton
ts_sequence.append((i, i, sentiment))
sentiments = []
elif pos == 'B':
beg = i
elif pos == 'E':
end = i
# schema1: only the consistent sentiment tags are accepted
# that is, all of the sentiment tags are the same
if end > beg > -1 and len(set(sentiments)) == 1:
ts_sequence.append((beg, end, sentiment))
sentiments = []
beg, end = -1, -1
return ts_sequence
def to_conll(train, val, test, embeddings, vocab, ds_name):
"""
:param train: training dataset
:param val: validation / development dataset
:param test: testing dataset
:param embeddings: pre-trained word embeddings
:param vocab: vocabulary
:return:
"""
inv_vocab = {}
for w in vocab:
wid = vocab[w]
inv_vocab[wid] = w
train_lines = semeval2conll(dataset=train)
dev_lines = semeval2conll(dataset=val)
test_lines = semeval2conll(dataset=test)
base_folder = '/projdata9/info_fil/lixin/Research/NCRFpp/sample_data'
with open('%s/%s_train.txt' % (base_folder, ds_name), 'w+') as fp:
fp.writelines(train_lines)
with open('%s/%s_dev.txt' % (base_folder, ds_name), 'w+') as fp:
fp.writelines(dev_lines)
with open('%s/%s_test.txt' % (base_folder, ds_name), 'w+') as fp:
fp.writelines(test_lines)
emb_lines = []
for i in range(len(embeddings)):
word = inv_vocab[i]
emb_vec = embeddings[i]
emb_lines.append('%s %s\n' % (word, ' '.join([str(ele) for ele in emb_vec])))
# write the embeddings back to the NCRFpp foler
with open('%s/%s_emb.txt' % (base_folder, ds_name), 'w+') as fp:
fp.writelines(emb_lines)
def semeval2conll(dataset):
"""
transform the format of semeval datasets to conll form
:param dataset: input dataset
:return:
"""
conll_lines = []
for record in dataset:
ote_raw_tags = record['ote_raw_tags']
ts_raw_tags = record['ts_raw_tags']
words = record['words']
ote_tags, ts_tags = ot2bieos(ote_tag_sequence=ote_raw_tags, ts_tag_sequence=ts_raw_tags)
for (w, t) in zip(words, ts_tags):
conll_lines.append('%s %s\n' % (w, t))
# use empty line to seprate the samples
conll_lines.append('\n')
return conll_lines
| 32,288 | 32.810471 | 113 | py |
E2E-TBSA | E2E-TBSA-master/model.py | import dynet_config
dynet_config.set(mem='4096', random_seed=1314159)
import dynet as dy
import random
from utils import *
from evals import *
from nltk import word_tokenize
def norm_vec(vec):
"""
normalize a dynet vector expression
:param vec:
:return:
"""
sum_item = dy.sum_elems(vec)
norm_vec = vec / sum_item.value()
print(norm_vec.npvalue())
return norm_vec
def calculate_confidence(vec, proportions=0.5):
"""
calculate the value of alpha, the employed metric is GINI index
:param vec:
:return:
"""
square_sum = dy.sum_elems(dy.cmult(vec, vec)).value()
if not 0 <= square_sum <= 1:
raise Exception("Invalid square sum %.3lf" % square_sum)
return (1 - square_sum) * proportions
class WDEmb:
def __init__(self, pc, n_words, dim_w, pretrained_embeddings=None):
"""
constructor of Word Embedding Layer
:param pc: parameter collection to hold the parameters
:param n_words: number of words in the vocabulary
:param dim_w: dimension of word embeddings
:param pretrained_embeddings:
"""
self.pc = pc.add_subcollection()
self.n_words = n_words
self.dim_w = dim_w
# add word embedding as lookup parameters
self.W = self.pc.add_lookup_parameters((self.n_words, self.dim_w))
if pretrained_embeddings is not None:
print("Use pre-trained word embeddings...")
self.W.init_from_array(pretrained_embeddings)
def parametrize(self):
"""
note: lookup parameters do not need parametrization
:return:
"""
pass
def __call__(self, xs):
"""
map input words (or ngrams) into the corresponding word embeddings
:param xs: a list of ngrams (or words if win is set to 1)
:return: embeddings looked from tables
"""
embeddings = [dy.concatenate([self.W[w] for w in ngram]) for ngram in xs]
return embeddings
class CharEmb:
# build character embedding layers from random initialization
def __init__(self, pc, n_chars, dim_char, pretrained_embeddings=None):
"""
:param pc: parameter collection
:param n_chars: number of distinct characters
:param dim_char: dimension of character embedding
"""
self.pc = pc.add_subcollection()
self.n_chars = n_chars
self.dim_char = dim_char
# network parameters
#self.W = self.pc.add_lookup_parameters((self.n_chars, self.dim_char),
# init='uniform', scale=np.sqrt(3.0 / self.dim_char))
self.W = self.pc.add_lookup_parameters((self.n_chars, self.dim_char),
init=dy.UniformInitializer(np.sqrt(3.0 / self.dim_char)))
if pretrained_embeddings is not None:
print("Use pre-trained character embeddings...")
self.W.init_from_array(pretrained_embeddings)
def __call__(self, xs):
"""
map input characters to low-dimensional character embeddings
:param xs: input chars
:return:
"""
char_embs = [self.W[cid] for cid in xs]
return char_embs
class Linear:
# fully connected layer without non-linear activation
def __init__(self, pc, n_in, n_out, use_bias=False, nonlinear=None):
"""
:param pc: parameter collection to hold the parameters
:param n_in: input dimension
:param n_out: output dimension
:param use_bias: if add bias or not, default NOT
:param nonlinear: non-linear activation function
"""
# create a sub-collection of the current parameters collection and returns it
# the returned sub-collection is simply a ParameterCollection object tied to a parent collection
self.pc = pc.add_subcollection()
self.n_in = n_in
self.n_out = n_out
self.use_bias = use_bias
self.nonlinear = nonlinear
# add a parameter to the ParameterCollection with a given initializer
self._W = self.pc.add_parameters((self.n_out, self.n_in), init=dy.UniformInitializer(0.2))
if self.use_bias:
self._b = self.pc.add_parameters((self.n_out,), init=dy.ConstInitializer(0.0))
def parametrize(self):
"""
put the parameters into the computational graph
:return:
"""
# add parameter to the computation graph (cg)
self.W = dy.parameter(self._W)
if self.use_bias:
self.b = dy.parameter(self._b)
def __call__(self, x):
"""
:param x: input feature vector
:return:
"""
Wx = self._W * x
if self.use_bias:
Wx = Wx + self._b
if self.nonlinear == 'sigmoid':
return dy.logistic(Wx)
elif self.nonlinear == 'tanh':
return dy.tanh(Wx)
elif not self.nonlinear:
return Wx
else:
raise Exception("Unimplemented non-linear activation function %s" % self.nonlinear)
class Model:
# cascaded LSTMs for joint aspect detection and sentiment prediction
def __init__(self, params, vocab, embeddings, char_embeddings):
"""
:param params:
:param vocab:
:param embeddings:
:param char_embeddings:
"""
self.params = params
self.name = 'lstm_cascade'
self.dim_char = params.dim_char
self.dim_w = params.dim_w
self.dim_char_h = params.dim_char_h
self.dim_ote_h = params.dim_ote_h
self.dim_ts_h = params.dim_ts_h
self.input_win = params.input_win
self.ds_name = params.ds_name
# tag vocabulary of opinion target extraction and targeted sentiment
self.ote_tag_vocab = params.ote_tag_vocab
self.ts_tag_vocab = params.ts_tag_vocab
self.dim_ote_y = len(self.ote_tag_vocab)
self.dim_ts_y = len(self.ts_tag_vocab)
self.n_epoch = params.n_epoch
self.dropout_rate = params.dropout
self.tagging_schema = params.tagging_schema
self.clip_grad = params.clip_grad
self.use_char = params.use_char
# name of word embeddings
self.emb_name = params.emb_name
self.embeddings = embeddings
self.vocab = vocab
# character vocabulary
self.char_vocab = params.char_vocab
#self.td_proportions = params.td_proportions
self.epsilon = params.epsilon
#self.tc_proportions = params.tc_proportions
self.pc = dy.ParameterCollection()
if self.use_char:
self.char_emb = CharEmb(pc=self.pc,
n_chars=len(self.char_vocab),
dim_char=self.dim_char,
pretrained_embeddings=char_embeddings)
self.lstm_char = dy.LSTMBuilder(1, self.dim_char, self.dim_char_h, self.pc)
dim_input = self.input_win * self.dim_w + 2 * self.dim_char_h
else:
dim_input = self.input_win * self.dim_w
# word embedding layer
self.emb = WDEmb(pc=self.pc, n_words=len(vocab), dim_w=self.dim_w, pretrained_embeddings=embeddings)
# lstm layers
self.lstm_ote = dy.LSTMBuilder(1, dim_input, self.dim_ote_h, self.pc)
self.lstm_ts = dy.LSTMBuilder(1, 2*self.dim_ote_h, self.dim_ts_h, self.pc)
# fully connected layer
self.fc_ote = Linear(pc=self.pc, n_in=2*self.dim_ote_h, n_out=self.dim_ote_y)
self.fc_ts = Linear(pc=self.pc, n_in=2 * self.dim_ts_h, n_out=self.dim_ts_y)
assert self.tagging_schema == 'BIEOS'
transition_path = {'B': ['B-POS', 'B-NEG', 'B-NEU'],
'I': ['I-POS', 'I-NEG', 'I-NEU'],
'E': ['E-POS', 'E-NEG', 'E-NEU'],
'S': ['S-POS', 'S-NEG', 'S-NEU'],
'O': ['O']}
self.transition_scores = np.zeros((self.dim_ote_y, self.dim_ts_y))
for t in transition_path:
next_tags = transition_path[t]
n_next_tag = len(next_tags)
ote_id = self.ote_tag_vocab[t]
for nt in next_tags:
ts_id = self.ts_tag_vocab[nt]
self.transition_scores[ote_id][ts_id] = 1.0 / n_next_tag
print(self.transition_scores)
self.transition_scores = np.array(self.transition_scores, dtype='float32').transpose()
# opinion target-opinion words co-occurrence modeling
self.stm_lm = Linear(pc=self.pc, n_in=2*self.dim_ote_h, n_out=2*self.dim_ote_h, nonlinear='tanh')
# fully connected layer for opinion-enhanced indicator prediction task
self.fc_stm = Linear(pc=self.pc, n_in=2 * self.dim_ote_h, n_out=2)
# gate for maintaining sentiment consistency
self.W_gate = self.pc.add_parameters((2*self.dim_ote_h, 2*self.dim_ote_h),
init=dy.UniformInitializer(0.2))
# determine the optimizer
if params.optimizer == 'sgd':
self.optimizer = dy.SimpleSGDTrainer(self.pc, params.sgd_lr)
elif params.optimizer == 'adam':
self.optimizer = dy.AdamTrainer(self.pc, 0.001, 0.9, 0.9)
elif params.optimizer == 'adadelta':
self.optimizer = dy.AdadeltaTrainer(self.pc)
elif params.optimizer == 'momentum':
self.optimizer = dy.MomentumSGDTrainer(self.pc, 0.01, 0.9)
else:
raise Exception("Unsupported optimizer type: %s" % params.optimizer)
def forward(self, x, is_train=True):
"""
feed the input x into the network
:param x: input example
:param is_train: model is in training stage or not, default yes
:return: loss value, predicted ote labels, predicted ts labels
"""
# renew computational graph
dy.renew_cg()
# push the parameters to the cg, no need to do this after v2.0.3
# self.parametrize()
wids = x['wids']
cids = x['cids']
gold_ote_labels = x['ote_labels']
gold_ts_labels = x['ts_labels']
gold_stm_labels = x['stm_lm_labels']
seq_len = len(wids)
if self.use_char:
# using both character-level word representations and word-level representations
ch_word_emb = []
for t in range(seq_len):
ch_seq = cids[t]
input_ch_emb = self.char_emb(xs=ch_seq)
ch_h0_f = self.lstm_char.initial_state()
ch_h0_b = self.lstm_char.initial_state()
ch_f = ch_h0_f.transduce(input_ch_emb)[-1]
ch_b = ch_h0_b.transduce(input_ch_emb[::-1])[-1]
ch_word_emb.append(dy.concatenate([ch_f, ch_b]))
word_emb = self.emb(xs=wids)
input_emb = [dy.concatenate([c, w]) for (c, w) in zip(ch_word_emb, word_emb)]
else:
# only using word-level representations
input_emb = self.emb(xs=wids)
# equivalent to applying partial dropout on the LSTM
if is_train:
input_emb = [dy.dropout(x, self.dropout_rate) for x in input_emb]
# obtain initial rnn states
ote_h0_f = self.lstm_ote.initial_state()
ote_h0_b = self.lstm_ote.initial_state()
ote_hs_f = ote_h0_f.transduce(input_emb)
ote_hs_b = ote_h0_b.transduce(input_emb[::-1])[::-1]
ote_hs = [dy.concatenate([f, b]) for (f, b) in zip(ote_hs_f, ote_hs_b)]
# hidden states for opinion-enhanced target prediction, we refer it as stm_lm here
stm_lm_hs = [self.stm_lm(h) for h in ote_hs]
ts_h0_f = self.lstm_ts.initial_state()
ts_h0_b = self.lstm_ts.initial_state()
ts_hs_f = ts_h0_f.transduce(ote_hs)
ts_hs_b = ts_h0_b.transduce(ote_hs[::-1])[::-1]
ts_hs = [dy.concatenate([f, b]) for (f, b) in zip(ts_hs_f, ts_hs_b)]
ts_hs_tilde = []
h_tilde_tm1 = object
for t in range(seq_len):
if t == 0:
h_tilde_t = ts_hs[t]
else:
# t-th hidden state for the task targeted sentiment
ts_ht = ts_hs[t]
gt = dy.logistic(self.W_gate * ts_ht)
h_tilde_t = dy.cmult(gt, ts_ht) + dy.cmult(1 - gt, h_tilde_tm1)
ts_hs_tilde.append(h_tilde_t)
h_tilde_tm1 = h_tilde_t
if is_train:
# perform dropout during training
ote_hs = [dy.dropout(h, self.dropout_rate) for h in ote_hs]
stm_lm_hs = [dy.dropout(h, self.dropout_rate) for h in stm_lm_hs]
ts_hs_tilde = [dy.dropout(h, self.dropout_rate) for h in ts_hs_tilde]
# weight matrix for boundary-guided transition
self.W_trans_ote = dy.inputTensor(self.transition_scores.copy())
losses = []
pred_ote_labels, pred_ts_labels = [], []
for i in range(seq_len):
# probability distribution over ote tag
p_y_x_ote = self.fc_ote(x=ote_hs[i])
p_y_x_ote = dy.softmax(p_y_x_ote)
loss_ote = -dy.log(dy.pick(p_y_x_ote, gold_ote_labels[i]))
# probability distribution over ts tag
p_y_x_ts = self.fc_ts(x=ts_hs_tilde[i])
p_y_x_ts = dy.softmax(p_y_x_ts)
# normalized the score
alpha = calculate_confidence(vec=p_y_x_ote, proportions=self.epsilon)
# transition score from ote tag to sentiment tag
ote2ts = self.W_trans_ote * p_y_x_ote
p_y_x_ts_tilde = alpha * ote2ts + (1 - alpha) * p_y_x_ts
loss_ts = -dy.log(dy.pick(p_y_x_ts_tilde, gold_ts_labels[i]))
loss_i = loss_ote / seq_len + loss_ts / seq_len
# predict if the current word is a target word according to the opinion information
p_y_x_stm = self.fc_stm(x=stm_lm_hs[i])
loss_stm = dy.pickneglogsoftmax(p_y_x_stm, gold_stm_labels[i])
loss_i += (loss_stm / seq_len)
losses.append(loss_i)
pred_ote_labels.append(np.argmax(p_y_x_ote.npvalue()))
pred_ts_labels.append(np.argmax(p_y_x_ts_tilde.npvalue()))
# total loss of the sequence predictions
loss = dy.esum(losses)
if is_train:
# run the backward pass based on the expression
loss.backward()
# update the model parameters
self.optimizer.update()
return loss.value(), pred_ote_labels, pred_ts_labels
def predict(self, dataset):
"""
perform prediction
:param dataset: dataset
:return: ote scores, ts_scores, predicted ote labels, predicted ts labels
"""
n_sample = len(dataset)
gold_ote = [x['ote_tags'] for x in dataset]
gold_ts = [x['ts_tags'] for x in dataset]
if self.tagging_schema == 'BIO':
gold_ote, gold_ts = bio2ot_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
elif self.tagging_schema == 'OT':
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
pred_ote, pred_ts = [], []
for i in range(n_sample):
_, pred_ote_labels, pred_ts_labels = self.forward(x=dataset[i], is_train=False)
pred_ote.append(label2tag(label_sequence=pred_ote_labels, tag_vocab=self.ote_tag_vocab))
pred_ts.append(label2tag(label_sequence=pred_ts_labels, tag_vocab=self.ts_tag_vocab))
# transform the output tag sequence to BIEOS tag sequence before evaluation
if self.tagging_schema == 'BIO':
pred_ote, pred_ts = bio2ot_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
elif self.tagging_schema == 'OT':
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
# evaluation
ote_scores, ts_scores = evaluate(gold_ot=gold_ote, gold_ts=gold_ts,
pred_ot=pred_ote, pred_ts=pred_ts)
return ote_scores, ts_scores, pred_ote, pred_ts
def decoding(self, dataset, model_name=None):
"""
predict the tag sequence for the dataset
:param dataset: dataset
:param model_name: path of the model parameters
:return:
"""
model_path = './models/%s' % model_name
if not os.path.exists(model_path):
raise Exception("Invalid model path %s..." % model_path)
self.pc.populate(model_path)
n_sample = len(dataset)
gold_ote = [x['ote_tags'] for x in dataset]
gold_ts = [x['ts_tags'] for x in dataset]
if self.tagging_schema == 'BIO':
gold_ote, gold_ts = bio2ot_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
elif self.tagging_schema == 'OT':
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
# predicted tag sequences and the input words
pred_ote, pred_ts, words = [], [], []
for i in range(n_sample):
_, pred_ote_labels, pred_ts_labels = self.forward(x=dataset[i], is_train=False)
pred_ote.append(label2tag(label_sequence=pred_ote_labels, tag_vocab=self.ote_tag_vocab))
pred_ts.append(label2tag(label_sequence=pred_ts_labels, tag_vocab=self.ts_tag_vocab))
words.append(dataset[i]['words'])
# transform the output tag sequence to BIEOS tag sequence before evaluation
if self.tagging_schema == 'BIO':
pred_ote, pred_ts = bio2ot_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
elif self.tagging_schema == 'OT':
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
# evaluation
ote_scores, ts_scores = evaluate(gold_ot=gold_ote, gold_ts=gold_ts,
pred_ot=pred_ote, pred_ts=pred_ts)
print("Evaluation scores: ote: f1: %.4f, ts: precision: %.4f, recall: %.4f, micro-f1: %.4f" %
(ote_scores[2], ts_scores[1], ts_scores[2], ts_scores[3]))
output_lines = ['Dataset: %s\n' % self.ds_name, 'Model: %s\n' % model_path, 'Parameter settings: \n']
params_dict = vars(self.params)
for k in params_dict:
if k == 'char_vocab' or k == 'vocab':
continue
else:
v = params_dict[k]
output_lines.append('\t%s: %s\n' % (k, v))
output_lines.append("==============================================\n\n")
for i in range(n_sample):
ote_seq = pred_ote[i]
ts_seq = pred_ts[i]
w_seq = words[i]
assert len(ote_seq) == len(ts_seq) == len(w_seq)
for j in range(len(ote_seq)):
word = w_seq[j]
ote_tag = ote_seq[j]
ts_tag = ts_seq[j]
output_lines.append('%s\t%s\t%s\n' % (word, ote_tag, ts_tag))
# use empty lines as the separator
output_lines.append('\n')
class LSTM_CRF:
# LSTM CRF model for sequence tagging
# NOT USED in the experiments
def __init__(self, params, vocab, embeddings):
"""
:param params: parameters
:param vocab: vocabulary
:param embeddings: pretrained word embeddings
"""
self.params = params
self.name = 'lstm_crf'
self.dim_char = params.dim_char
self.dim_w = params.dim_w
self.dim_char_h = params.dim_char_h
self.dim_ote_h = params.dim_ote_h
self.dim_ts_h = params.dim_ts_h
self.input_win = params.input_win
self.ds_name = params.ds_name
# tag vocabulary of opinion target extraction and targeted sentiment
self.ote_tag_vocab = params.ote_tag_vocab
self.ts_tag_vocab = params.ts_tag_vocab
self.dim_ote_y = len(self.ote_tag_vocab)
self.dim_ts_y = len(self.ts_tag_vocab)
self.n_epoch = params.n_epoch
self.dropout_rate = params.dropout
self.tagging_schema = params.tagging_schema
self.clip_grad = params.clip_grad
self.use_char = params.use_char
# name of word embeddings
self.emb_name = params.emb_name
self.embeddings = embeddings
self.vocab = vocab
# character vocabulary
self.char_vocab = params.char_vocab
self.pc = dy.ParameterCollection()
# word embedding layer
self.emb = WDEmb(pc=self.pc, n_words=len(vocab), dim_w=self.dim_w, pretrained_embeddings=embeddings)
# input dimension
dim_input = self.input_win * self.dim_w
self.lstm_ts = dy.LSTMBuilder(1, dim_input, self.dim_ts_h, self.pc)
# hidden layer between LSTM and CRF decoding layer
self.hidden = Linear(pc=self.pc, n_in=2*self.dim_ts_h,
n_out=self.dim_ts_h, use_bias=True, nonlinear='tanh')
# map the word representation to the ts label space
# in the label space, both BEG and END tag are considered
self.fc_ts = Linear(pc=self.pc, n_in=self.dim_ts_h, n_out=self.dim_ts_y)
# transition matrix, [i, j] is the transition score from tag i to tag j
self.transitions = self.pc.add_lookup_parameters((self.dim_ts_y + 2, self.dim_ts_y + 2))
# determine the optimizer
if params.optimizer == 'sgd':
self.optimizer = dy.SimpleSGDTrainer(self.pc, params.sgd_lr)
elif params.optimizer == 'adam':
self.optimizer = dy.AdamTrainer(self.pc, 0.001, 0.9, 0.9)
elif params.optimizer == 'adadelta':
self.optimizer = dy.AdadeltaTrainer(self.pc)
elif params.optimizer == 'momentum':
self.optimizer = dy.MomentumSGDTrainer(self.pc, 0.01, 0.9)
else:
raise Exception("Unsupported optimizer type: %s" % params.optimizer)
def log_sum_exp(self, scores):
"""
:param scores: observation scores for all possible tag sequences
:return: \log (\sum(exp(S(y))))
"""
scores_val = scores.npvalue()
max_idx = np.argmax(scores_val)
# introduce max_scores to avoid underflow
# if not, the results will be INF or -INF
# dynet expression of maximum scores
max_score = dy.pick(scores, max_idx)
max_score_broadcast = dy.concatenate([max_score] * (self.dim_ts_y + 2))
# shift the center of exponential sum to (scores - max)
return max_score + dy.log(dy.sum_elems(dy.transpose(dy.exp(scores - max_score_broadcast))))
def forward(self, x, is_train=True):
# renew computational graph
dy.renew_cg()
# push the parameters to the cg, no need to do this after v 2.0.3
# self.parametrize()
wids = x['wids']
gold_ts_labels = x['ts_labels']
input_emb = self.emb(xs=wids)
# add dropout on the embedding layer
if is_train:
input_emb = [dy.dropout(x, self.dropout_rate) for x in input_emb]
ts_h0_f = self.lstm_ts.initial_state()
ts_h0_b = self.lstm_ts.initial_state()
# bi-directional lstm
ts_hs_f = ts_h0_f.transduce(input_emb)
ts_hs_b = ts_h0_b.transduce(input_emb[::-1])[::-1]
ts_hs = [dy.concatenate([f, b]) for (f, b) in zip(ts_hs_f, ts_hs_b)]
ts_cs = [self.hidden(x=h) for h in ts_hs]
# tag scores output by the LSTM layer, shape: (n, dim_y)
label_scores = [self.fc_ts(x=c) for c in ts_cs]
min_val = -9999999
observations = [dy.concatenate([score, dy.inputVector([min_val, min_val])]) for score in label_scores]
assert len(observations) == len(gold_ts_labels)
# score generated from the gold standard sequence
gold_score = dy.scalarInput(0)
# sum of the observation scores
for t, score in enumerate(label_scores):
gold_score = gold_score + dy.pick(score, gold_ts_labels[t])
# <BEG> corresponds to dim_ts_y, <END> corresponds to dim_ts_y + 1
padded_gold_ts_labels = [self.dim_ts_y] + gold_ts_labels
# sum of the transition scores
for t in range(len(observations)):
# transition score A_{y_{t-1}, y_t}
gold_score = gold_score + dy.pick(self.transitions[padded_gold_ts_labels[t]], padded_gold_ts_labels[t+1])
# transition score from the last label to <END>
gold_score = gold_score + dy.pick(self.transitions[padded_gold_ts_labels[-1]], self.dim_ts_y + 1)
beg_obs = dy.inputVector([min_val] * self.dim_ts_y + [0, min_val])
end_obs = dy.inputVector([min_val] * self.dim_ts_y + [min_val, 0])
padded_observations = [beg_obs] + observations + [end_obs]
# observations at t=0,
init = padded_observations[0]
prev = init
for t, obs in enumerate(padded_observations[1:]):
alphas_t = []
for next_y in range(self.dim_ts_y + 2):
# dy.pick(obs, t), get the score of the tag t in the current observation vector (i.e., current word)
# transitions[:, next_y] is the transition scores ends in next_y
obs_broadcast = dy.concatenate([dy.pick(obs, next_y)] * (self.dim_ts_y + 2))
next_y_expr = prev + dy.transpose(self.transitions)[next_y] + obs_broadcast
alphas_t.append(self.log_sum_exp(scores=next_y_expr))
prev = dy.concatenate(alphas_t)
# dim_ts_y + 1 corresponds to the END tag
#final = prev + dy.transpose(self.transitions)[self.dim_ts_y + 1]
final = prev
all_path_score = self.log_sum_exp(scores=final)
loss = - (gold_score - all_path_score)
if is_train:
loss.backward()
self.optimizer.update()
pred_ts_labels, _ = self.viterbi_decoding(observations=padded_observations)
return loss.value(), [], pred_ts_labels
def predict(self, dataset):
"""
:param dataset:
:return:
"""
n_sample = len(dataset)
gold_ote = [x['ote_tags'] for x in dataset]
gold_ts = [x['ts_tags'] for x in dataset]
if self.tagging_schema == 'BIO':
gold_ote, gold_ts = bio2ot_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
elif self.tagging_schema == 'OT':
gold_ote, gold_ts = ot2bieos_batch(
ote_tags=gold_ote, ts_tags=gold_ts)
pred_ote, pred_ts = [], []
for i in range(n_sample):
_, _, pred_ts_labels = self.forward(x=dataset[i], is_train=False)
#pred_ote.append(label2tag(label_sequence=pred_ote_labels, tag_vocab=self.ote_tag_vocab))
pred_ts.append(label2tag(label_sequence=pred_ts_labels, tag_vocab=self.ts_tag_vocab))
# transform the output tag sequence to BIEOS tag sequence before evaluation
if self.tagging_schema == 'BIO':
pred_ote, pred_ts = bio2ot_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
elif self.tagging_schema == 'OT':
pred_ote, pred_ts = ot2bieos_batch(
ote_tags=pred_ote, ts_tags=pred_ts)
# evaluation
ts_scores = evaluate_ts(gold_ts=gold_ts, pred_ts=pred_ts)
return None, ts_scores
def viterbi_decoding(self, observations):
"""
viterbi decoding for CRF decoding layer
:param observations: observation scores
:return:
"""
back_pointers = []
# observation score for BEG tag
init = observations[0]
prev = init
transition_T = dy.transpose(self.transitions)
trans_exprs = [transition_T[idx] for idx in range(self.dim_ts_y + 2)]
for obs in observations[1:]:
bpts_t = []
vvars_t = []
for next_y in range(self.dim_ts_y + 2):
# trans_exprs[next_y], transition probabilities that ends with next_y
next_y_expr = prev + trans_exprs[next_y]
next_y_arr = next_y_expr.npvalue()
best_y = np.argmax(next_y_arr)
bpts_t.append(best_y)
vvars_t.append(dy.pick(next_y_expr, best_y))
prev = dy.concatenate(vvars_t) + obs
back_pointers.append(bpts_t)
# end tags
#terminal_expr = prev + trans_exprs[self.dim_ts_y+1]
#terminal_arr = terminal_expr.npvalue()
final = prev
final_arr = final.npvalue()
best_y = np.argmax(final_arr)
assert best_y == (self.dim_ts_y + 1)
path_score = dy.pick(final, best_y)
# reverse over the backpointers to get the best path
# backtracking
best_path = []
for bpts_t in reversed(back_pointers):
best_y = bpts_t[best_y]
best_path.append(best_y)
# remove the beg label
BEG = best_path.pop()
best_path.reverse()
assert BEG == self.dim_ts_y
return best_path, path_score
| 29,635 | 39.990318 | 117 | py |
E2E-TBSA | E2E-TBSA-master/process_data.py | # coding: UTF-8
__author__ = 'lixin77'
from scrapy.selector import Selector
#import cPickle
import nltk
from nltk import word_tokenize
import sys
import string
def process_text(text):
"""
process the text and filter some special symbol
:param text:
:return:
"""
# string preprocessing and aspect term will not be processed
dot_exist = ('.' in text)
cur_text = text.replace('.', '')
#cur_text = cur_text.replace('-', ' ')
cur_text = cur_text.replace(' - ', ', ').strip()
cur_text = cur_text.replace('- ', ' ').strip()
# split words and punctuations
if '? ' not in cur_text:
cur_text = cur_text.replace('?', '? ').strip()
if '! ' not in cur_text:
cur_text = cur_text.replace('!', '! ').strip()
cur_text = cur_text.replace('(', '')
cur_text = cur_text.replace(')', '')
cur_text = cur_text.replace('...', ', ').strip('.').strip().strip(',')
# remove quote
cur_text = cur_text.replace('"', '')
cur_text = cur_text.replace(" '", " ")
cur_text = cur_text.replace("' ", " ")
cur_text = cur_text.replace(':', ', ')
if dot_exist:
cur_text += '.'
# correct some typos
# mainly for processing English texts
cur_text = cur_text.replace('cant', "can't")
cur_text = cur_text.replace('wouldnt', "wouldn't")
cur_text = cur_text.replace('dont', "don't")
cur_text = cur_text.replace('didnt', "didn't")
cur_text = cur_text.replace("you 're", "you're")
# replace some special symbol
cur_text = cur_text.replace(u' – ', ', ').strip()
cur_text = cur_text.replace(u"‘", "")
# filter the non-ascii character
cur_text = ''.join([ch if ord(ch) < 128 else ' ' for ch in cur_text])
return cur_text
def extract_aspect(aspects, text, dataset_name):
"""
extract aspects from xml tags
:param aspects: a list of aspect tags / selectors
:param text: corresponding sentence
:param dataset_name: name of dataset
:return:
"""
counter = 0
# mapping between aspect id and aspect name
id2aspect = {}
# mapping between aspect id and the sentiment polarity of the aspect
id2polarity = {}
# number of aspects, singleton, multi-word-aspects in the sentence, respectively
n_aspect, n_singleton, n_mult_word = 0, 0, 0
cur_text = text
from_to_pairs = []
for t in aspects:
_from = int(t.xpath('.//@from').extract()[0])
_to = int(t.xpath('.//@to').extract()[0])
if _from == 0 and _to == 0:
# NULL target
continue
if not '14' in dataset_name:
target = t.xpath('.//@target').extract()[0].replace(u'\xa0', ' ')
else:
target = t.xpath('.//@term').extract()[0].replace(u'\xa0', ' ')
if target == 'NULL':
# there is no aspect in the text
continue
# for SemEval challenge, polarity can be positive, negative or neutral
polarity = t.xpath('.//@polarity').extract()[0]
if polarity == 'positive':
pol_val = 'POS'
elif polarity == 'negative':
pol_val = 'NEG'
elif polarity == 'neutral':
pol_val = 'NEU'
elif polarity == 'conflict':
# ignore the confilct aspects
continue
else:
raise Exception("Invalid polarity value #%s#" % polarity)
flag = False
# remove special symbol in aspect term
#if 'english' in dataset_name:
target = target.replace(u'é', 'e')
target = target.replace(u'’', "'")
if text[_from:_to] == target:
flag = True
elif (_from - 1 >= 0) and text[(_from - 1):(_to - 1)] == target:
_from -= 1
_to -= 1
flag = True
elif (_to + 1 < len(text)) and text[(_from + 1):(_to + 1)] == target:
_from += 1
_to += 1
flag = True
# we can find the aspect in the raw text
assert flag
if (_from, _to) in from_to_pairs:
continue
aspect_temp_value = 'ASPECT%s' % counter
counter += 1
id2aspect[aspect_temp_value] = target
id2polarity[aspect_temp_value] = pol_val
cur_text = cur_text.replace(target, aspect_temp_value)
from_to_pairs.append((_from, _to))
n_aspect += 1
if len(target.split()) > 1:
n_mult_word += 1
else:
n_singleton += 1
return id2aspect, id2polarity, n_aspect, n_singleton, n_mult_word, cur_text
def format_output(x, y, text):
"""
format the dataset output
:param x: word sequence
:param y: tag sequence
:param text: raw text
:return:
"""
tag_sequence = ''
for i in range(len(x)):
if i == (len(x) - 1):
tag_sequence = '%s%s=%s' % (tag_sequence, x[i], y[i])
else:
tag_sequence = '%s%s=%s ' % (tag_sequence, x[i], y[i])
data_line = '%s####%s\n' % (text, tag_sequence)
#print(data_line)
return data_line
def extract_text(dataset_name):
"""
extract textual information from the xml file
:param dataset_name: dataset name
"""
delset = string.punctuation
fpath = './raw_data/%s.xml' % dataset_name
print("Process %s..." % fpath)
page_source = ''
with open(fpath) as fp:
for line in fp:
page_source = '%s%s' % (page_source, line.strip())
reviews = []
# regard one sentence as an example
sentences = Selector(text=page_source).xpath('//sentences/sentence')
reviews = [sentences]
n_sen = 0
n_word = 0
# number of aspects, singletons and multi-words in the dataset, respectively
n_aspect, n_singleton, n_mult_word = 0, 0, 0
n_sen_with_no_aspect = 0
lines = []
for sentences in reviews:
# scan all of the reviews
x, y, review_text = [], [], ''
for sid in range(len(sentences)):
sen = sentences[sid]
prev = ''
n_sen += 1
text = sen.xpath('.//text/text()').extract()[0]
text = text.replace(u'\xa0', ' ')
# note: preprocessing in the raw text should not change the index
# perform this only for English texts
# in spanish, it can be a normal word
text = text.replace(u'é', 'e')
text = text.replace(u'’', "'")
cur_text = text
assert isinstance(dataset_name, str)
if '14' in dataset_name:
aspects = sen.xpath('.//aspectterms/aspectterm')
else:
aspects = sen.xpath('.//opinions/opinion')
if not aspects:
# sent with no aspect
n_sen_with_no_aspect += 1
else:
id2aspect, id2polarity, n_a, n_s, n_m, cur_text = extract_aspect(aspects=aspects, text=cur_text,
dataset_name=dataset_name)
n_aspect += n_a
n_singleton += n_s
n_mult_word += n_m
# flush output buffer every sentence
x, y = [], []
# process the text and filter the unnecessary characters
cur_text = process_text(text=cur_text)
tokens = word_tokenize(cur_text)
for t in tokens:
if t.startswith('ASPECT'):
# in this case, t is actually the id of aspect
# raw_string is the aspect word or aspect phrase
raw_string = id2aspect[t[:7]]
pol_val = id2polarity[t[:7]]
aspect_words = raw_string.split()
n_aw = len(aspect_words)
x.extend(aspect_words)
y.extend(['T-%s' % pol_val] * n_aw)
n_word += n_aw
else:
# t is the literal value
if not t.strip() == '':
# t is not blank space or empty string
x.append(t.strip())
y.append('O')
n_word += 1
# length check for every sentence
assert len(x) == len(y)
# write back after processing a sentence
lines.append(format_output(x=x, y=y, text=text))
with open('./data/%s.txt' % (dataset_name), 'w+') as fp:
fp.writelines(lines)
print("dataset:", dataset_name)
print("n_sen:", n_sen)
print("average length:", int(n_word / n_sen))
print("total aspects:", n_aspect)
print("n_singleton:", n_singleton)
print("n_mult_words:", n_mult_word)
print("n_without_aspect:", n_sen_with_no_aspect)
print("n_tokens:", n_word)
print("\n\n")
if __name__ == '__main__':
# this script is used for converting the original xml files into the formatted files
dataset_names = ['laptop14_train', 'laptop14_test',
'rest14_train', 'rest14_test',
'rest15_train', 'rest15_test', 'hotel15_test',
'rest16_train', 'rest16_test']
for ds_name in dataset_names:
extract_text(ds_name)
| 9,144 | 34.583658 | 112 | py |
E2E-TBSA | E2E-TBSA-master/config.py | laptop14 = {
"dim_ote_h": 50,
"dim_ts_h": 50,
"input_win": 1,
"stm_win": 3,
"optimizer": "adam",
"n_epoch": 30,
"dropout": 0.5,
"tagging_schema": "BIEOS",
"lr_decay": 0.05,
"use_char": 0,
"dynet_seed": 1314159,
"random_seem": 13456,
"epsilon": 0.5
}
rest_total = {
"dim_ote_h": 50,
"dim_ts_h": 50,
"input_win": 3,
"stm_win": 3,
"optimizer": "adam",
"n_epoch": 50,
"dropout": 0.5,
"tagging_schema": "BIEOS",
"lr_decay": 0.05,
"use_char": 0,
"dynet_seed": 1314159,
"random_seem": 1234,
"epsilon": 0.5
}
twitter = {
"dim_ote_h": 50,
"dim_ts_h": 50,
"input_win": 3,
"stm_win": 3,
"optimizer": "adam",
"n_epoch": 50,
"dropout": 0.5,
"tagging_schema": "BIEOS",
"lr_decay": 0.05,
"use_char": 0,
"dynet_seed": 1314159,
"random_seem": 7788,
"epsilon": 0.8
} | 906 | 18.297872 | 30 | py |
E2E-TBSA | E2E-TBSA-master/evals.py | from utils import *
import numpy as np
SMALL_POSITIVE_CONST = 1e-4
def evaluate_ote(gold_ot, pred_ot):
"""
evaluate the model performce for the ote task
:param gold_ot: gold standard ote tags
:param pred_ot: predicted ote tags
:return:
"""
assert len(gold_ot) == len(pred_ot)
n_samples = len(gold_ot)
# number of true positive, gold standard, predicted opinion targets
n_tp_ot, n_gold_ot, n_pred_ot = 0, 0, 0
for i in range(n_samples):
g_ot = gold_ot[i]
p_ot = pred_ot[i]
g_ot_sequence, p_ot_sequence = tag2ot(ote_tag_sequence=g_ot), tag2ot(ote_tag_sequence=p_ot)
# hit number
n_hit_ot = match_ot(gold_ote_sequence=g_ot_sequence, pred_ote_sequence=p_ot_sequence)
n_tp_ot += n_hit_ot
n_gold_ot += len(g_ot_sequence)
n_pred_ot += len(p_ot_sequence)
# add 0.001 for smoothing
# calculate precision, recall and f1 for ote task
ot_precision = float(n_tp_ot) / float(n_pred_ot + SMALL_POSITIVE_CONST)
ot_recall = float(n_tp_ot) / float(n_gold_ot + SMALL_POSITIVE_CONST)
ot_f1 = 2 * ot_precision * ot_recall / (ot_precision + ot_recall + SMALL_POSITIVE_CONST)
ote_scores = (ot_precision, ot_recall, ot_f1)
return ote_scores
def evaluate_ts(gold_ts, pred_ts):
"""
evaluate the model performance for the ts task
:param gold_ts: gold standard ts tags
:param pred_ts: predicted ts tags
:return:
"""
assert len(gold_ts) == len(pred_ts)
n_samples = len(gold_ts)
# number of true postive, gold standard, predicted targeted sentiment
n_tp_ts, n_gold_ts, n_pred_ts = np.zeros(3), np.zeros(3), np.zeros(3)
ts_precision, ts_recall, ts_f1 = np.zeros(3), np.zeros(3), np.zeros(3)
for i in range(n_samples):
g_ts = gold_ts[i]
p_ts = pred_ts[i]
g_ts_sequence, p_ts_sequence = tag2ts(ts_tag_sequence=g_ts), tag2ts(ts_tag_sequence=p_ts)
hit_ts_count, gold_ts_count, pred_ts_count = match_ts(gold_ts_sequence=g_ts_sequence,
pred_ts_sequence=p_ts_sequence)
n_tp_ts += hit_ts_count
n_gold_ts += gold_ts_count
n_pred_ts += pred_ts_count
# calculate macro-average scores for ts task
for i in range(3):
n_ts = n_tp_ts[i]
n_g_ts = n_gold_ts[i]
n_p_ts = n_pred_ts[i]
ts_precision[i] = float(n_ts) / float(n_p_ts + SMALL_POSITIVE_CONST)
ts_recall[i] = float(n_ts) / float(n_g_ts + SMALL_POSITIVE_CONST)
ts_f1[i] = 2 * ts_precision[i] * ts_recall[i] / (ts_precision[i] + ts_recall[i] + SMALL_POSITIVE_CONST)
ts_macro_f1 = ts_f1.mean()
# calculate micro-average scores for ts task
n_tp_total = sum(n_tp_ts)
# total sum of TP and FN
n_g_total = sum(n_gold_ts)
# total sum of TP and FP
n_p_total = sum(n_pred_ts)
ts_micro_p = float(n_tp_total) / (n_p_total + SMALL_POSITIVE_CONST)
ts_micro_r = float(n_tp_total) / (n_g_total + SMALL_POSITIVE_CONST)
ts_micro_f1 = 2 * ts_micro_p * ts_micro_r / (ts_micro_p + ts_micro_r + SMALL_POSITIVE_CONST)
ts_scores = (ts_macro_f1, ts_micro_p, ts_micro_r, ts_micro_f1)
return ts_scores
def evaluate(gold_ot, gold_ts, pred_ot, pred_ts):
"""
evaluate the performance of the predictions
:param gold_ot: gold standard opinion target tags
:param gold_ts: gold standard targeted sentiment tags
:param pred_ot: predicted opinion target tags
:param pred_ts: predicted targeted sentiment tags
:return: metric scores of ner and sa
"""
assert len(gold_ot) == len(gold_ts) == len(pred_ot) == len(pred_ts)
ote_scores = evaluate_ote(gold_ot=gold_ot, pred_ot=pred_ot)
ts_scores = evaluate_ts(gold_ts=gold_ts, pred_ts=pred_ts)
return ote_scores, ts_scores
def match_ot(gold_ote_sequence, pred_ote_sequence):
"""
calculate the number of correctly predicted opinion target
:param gold_ote_sequence: gold standard opinion target sequence
:param pred_ote_sequence: predicted opinion target sequence
:return: matched number
"""
n_hit = 0
for t in pred_ote_sequence:
if t in gold_ote_sequence:
n_hit += 1
return n_hit
def match_ts(gold_ts_sequence, pred_ts_sequence):
"""
calculate the number of correctly predicted targeted sentiment
:param gold_ts_sequence: gold standard targeted sentiment sequence
:param pred_ts_sequence: predicted targeted sentiment sequence
:return:
"""
# positive, negative and neutral
tag2tagid = {'POS': 0, 'NEG': 1, 'NEU': 2}
hit_count, gold_count, pred_count = np.zeros(3), np.zeros(3), np.zeros(3)
for t in gold_ts_sequence:
#print(t)
ts_tag = t[2]
tid = tag2tagid[ts_tag]
gold_count[tid] += 1
for t in pred_ts_sequence:
ts_tag = t[2]
tid = tag2tagid[ts_tag]
if t in gold_ts_sequence:
hit_count[tid] += 1
pred_count[tid] += 1
return hit_count, gold_count, pred_count
| 5,028 | 36.251852 | 111 | py |
fork--wilds-public | fork--wilds-public-main/setup.py | import setuptools
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(here, 'wilds'))
from version import __version__
print(f'Version {__version__}')
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="wilds",
version=__version__,
author="WILDS team",
author_email="[email protected]",
url="https://wilds.stanford.edu",
description="WILDS distribution shift benchmark",
long_description=long_description,
long_description_content_type="text/markdown",
install_requires = [
'numpy>=1.19.1',
'pandas>=1.1.0',
'scikit-learn>=0.20.0',
'pillow>=7.2.0',
'torch>=1.7.0',
'ogb>=1.2.6',
'tqdm>=4.53.0',
'outdated>=0.2.0',
'pytz>=2020.4',
],
license='MIT',
packages=setuptools.find_packages(exclude=['dataset_preprocessing', 'examples', 'examples.models', 'examples.models.bert']),
classifiers=[
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
python_requires='>=3.6',
)
| 1,281 | 28.136364 | 128 | py |
fork--wilds-public | fork--wilds-public-main/examples/losses.py | import torch.nn as nn
from wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss
from wilds.common.metrics.all_metrics import MSE
def initialize_loss(config, d_out):
if config.loss_function == 'cross_entropy':
return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.loss_function == 'lm_cross_entropy':
return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.loss_function == 'mse':
return MSE(name='loss')
elif config.loss_function == 'multitask_bce':
return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))
elif config.loss_function == 'fasterrcnn_criterion':
from examples.models.detection.fasterrcnn import FasterRCNNLoss
return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.device))
else:
raise ValueError(f'config.loss_function {config.loss_function} not recognized')
| 939 | 38.166667 | 87 | py |
fork--wilds-public | fork--wilds-public-main/examples/evaluate.py | import argparse
import json
import os
import urllib.request
from ast import literal_eval
from typing import Dict, List
from urllib.parse import urlparse
import numpy as np
import torch
from wilds import benchmark_datasets
from wilds import get_dataset
from wilds.datasets.wilds_dataset import WILDSDataset, WILDSSubset
"""
Evaluate predictions for WILDS datasets.
Usage:
python examples/evaluate.py <Path to directory with predictions> <Path to output directory>
python examples/evaluate.py <Path to directory with predictions> <Path to output directory> --dataset <A WILDS dataset>
"""
def evaluate_all_benchmarks(predictions_dir: str, output_dir: str, root_dir: str):
"""
Evaluate predictions for all the WILDS benchmarks.
Parameters:
predictions_dir (str): Path to the directory with predictions. Can be a URL
output_dir (str): Output directory
root_dir (str): The directory where datasets can be found
"""
all_results: Dict[str, Dict[str, Dict[str, float]]] = dict()
for dataset in benchmark_datasets:
try:
all_results[dataset] = evaluate_benchmark(
dataset, os.path.join(predictions_dir, dataset), output_dir, root_dir
)
except Exception as e:
print(f"Could not evaluate predictions for {dataset}:\n{str(e)}")
# Write out aggregated results to output file
print(f"Writing complete results to {output_dir}...")
with open(os.path.join(output_dir, "all_results.json"), "w") as f:
json.dump(all_results, f, indent=4)
def evaluate_benchmark(
dataset_name: str, predictions_dir: str, output_dir: str, root_dir: str
) -> Dict[str, Dict[str, float]]:
"""
Evaluate across multiple replicates for a single benchmark.
Parameters:
dataset_name (str): Name of the dataset. See datasets.py for the complete list of datasets.
predictions_dir (str): Path to the directory with predictions. Can be a URL.
output_dir (str): Output directory
root_dir (str): The directory where datasets can be found
Returns:
Metrics as a dictionary with metrics as the keys and metric values as the values
"""
def get_replicates(dataset_name: str) -> List[str]:
if dataset_name == "poverty":
return [f"fold:{fold}" for fold in ["A", "B", "C", "D", "E"]]
else:
if dataset_name == "camelyon17":
seeds = range(0, 10)
elif dataset_name == "civilcomments":
seeds = range(0, 5)
else:
seeds = range(0, 3)
return [f"seed:{seed}" for seed in seeds]
def get_prediction_file(
predictions_dir: str, dataset_name: str, split: str, replicate: str
) -> str:
run_id = f"{dataset_name}_split:{split}_{replicate}"
for file in os.listdir(predictions_dir):
if file.startswith(run_id) and (
file.endswith(".csv") or file.endswith(".pth")
):
return file
raise FileNotFoundError(
f"Could not find CSV or pth prediction file that starts with {run_id}."
)
def get_metrics(dataset_name: str) -> List[str]:
if "amazon" == dataset_name:
return ["10th_percentile_acc", "acc_avg"]
elif "camelyon17" == dataset_name:
return ["acc_avg"]
elif "civilcomments" == dataset_name:
return ["acc_wg", "acc_avg"]
elif "fmow" == dataset_name:
return ["acc_worst_region", "acc_avg"]
elif "iwildcam" == dataset_name:
return ["F1-macro_all", "acc_avg"]
elif "ogb-molpcba" == dataset_name:
return ["ap"]
elif "poverty" == dataset_name:
return ["r_wg", "r_all"]
elif "py150" == dataset_name:
return ["acc", "Acc (Overall)"]
elif "globalwheat" == dataset_name:
return ["detection_acc_avg_dom"]
elif "rxrx1" == dataset_name:
return ["acc_avg"]
else:
raise ValueError(f"Invalid dataset: {dataset_name}")
# Dataset will only be downloaded if it does not exist
wilds_dataset: WILDSDataset = get_dataset(
dataset=dataset_name, root_dir=root_dir, download=True
)
splits: List[str] = list(wilds_dataset.split_dict.keys())
if "train" in splits:
splits.remove("train")
replicates_results: Dict[str, Dict[str, List[float]]] = dict()
replicates: List[str] = get_replicates(dataset_name)
metrics: List[str] = get_metrics(dataset_name)
# Store the results for each replicate
for split in splits:
replicates_results[split] = {}
for metric in metrics:
replicates_results[split][metric] = []
for replicate in replicates:
predictions_file = get_prediction_file(
predictions_dir, dataset_name, split, replicate
)
print(
f"Processing split={split}, replicate={replicate}, predictions_file={predictions_file}..."
)
full_path = os.path.join(predictions_dir, predictions_file)
# GlobalWheat's predictions are a list of dictionaries, so it has to be handled separately
if dataset_name == "globalwheat":
metric_results: Dict[str, float] = evaluate_replicate_for_globalwheat(
wilds_dataset, split, full_path
)
else:
predicted_labels: torch.Tensor = get_predictions(full_path)
metric_results = evaluate_replicate(
wilds_dataset, split, predicted_labels
)
for metric in metrics:
replicates_results[split][metric].append(metric_results[metric])
aggregated_results: Dict[str, Dict[str, float]] = dict()
# Aggregate results of replicates
for split in splits:
aggregated_results[split] = {}
for metric in metrics:
replicates_metric_values: List[float] = replicates_results[split][metric]
aggregated_results[split][f"{metric}_std"] = np.std(
replicates_metric_values, ddof=1
)
aggregated_results[split][metric] = np.mean(replicates_metric_values)
# Write out aggregated results to output file
print(f"Writing aggregated results for {dataset_name} to {output_dir}...")
with open(os.path.join(output_dir, f"{dataset_name}_results.json"), "w") as f:
json.dump(aggregated_results, f, indent=4)
return aggregated_results
def evaluate_replicate(
dataset: WILDSDataset, split: str, predicted_labels: torch.Tensor
) -> Dict[str, float]:
"""
Evaluate the given predictions and return the appropriate metrics.
Parameters:
dataset (WILDSDataset): A WILDS Dataset
split (str): split we are evaluating on
predicted_labels (torch.Tensor): Predictions
Returns:
Metrics as a dictionary with metrics as the keys and metric values as the values
"""
# Dataset will only be downloaded if it does not exist
subset: WILDSSubset = dataset.get_subset(split)
metadata: torch.Tensor = subset.metadata_array
true_labels = subset.y_array
if predicted_labels.shape != true_labels.shape:
predicted_labels.unsqueeze_(-1)
return dataset.eval(predicted_labels, true_labels, metadata)[0]
def evaluate_replicate_for_globalwheat(
dataset: WILDSDataset, split: str, path_to_predictions: str
) -> Dict[str, float]:
predicted_labels = torch.load(path_to_predictions)
subset: WILDSSubset = dataset.get_subset(split)
metadata: torch.Tensor = subset.metadata_array
true_labels = [subset.dataset.y_array[idx] for idx in subset.indices]
return dataset.eval(predicted_labels, true_labels, metadata)[0]
def get_predictions(path: str) -> torch.Tensor:
"""
Extract out the predictions from the file at path.
Parameters:
path (str): Path to the file that has the predicted labels. Can be a URL.
Return:
Tensor representing predictions
"""
if is_path_url(path):
data = urllib.request.urlopen(path)
else:
file = open(path, mode="r")
data = file.readlines()
file.close()
predicted_labels = [literal_eval(line.rstrip()) for line in data if line.rstrip()]
return torch.from_numpy(np.array(predicted_labels))
def is_path_url(path: str) -> bool:
"""
Returns True if the path is a URL.
"""
try:
result = urlparse(path)
return all([result.scheme, result.netloc, result.path])
except:
return False
def main():
if args.dataset:
evaluate_benchmark(
args.dataset, args.predictions_dir, args.output_dir, args.root_dir
)
else:
print("A dataset was not specified. Evaluating for all WILDS datasets...")
evaluate_all_benchmarks(args.predictions_dir, args.output_dir, args.root_dir)
print("\nDone.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Evaluate predictions for WILDS datasets."
)
parser.add_argument(
"predictions_dir",
type=str,
help="Path to prediction CSV or pth files.",
)
parser.add_argument(
"output_dir",
type=str,
help="Path to output directory.",
)
parser.add_argument(
"--dataset",
type=str,
choices=benchmark_datasets,
help="WILDS dataset to evaluate for.",
)
parser.add_argument(
"--root-dir",
type=str,
default="data",
help="The directory where the datasets can be found (or should be downloaded to, if they do not exist).",
)
# Parse args and run this script
args = parser.parse_args()
main()
| 9,843 | 33.784452 | 124 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.