repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
ocp | ocp-main/ocpmodels/trainers/forces_trainer.py | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import pathlib
from collections import defaultdict
from pathlib import Path
from typing import Optional
import numpy as np
import torch
import torch_geometric
from tqdm import tqdm
from ocpmodels.common import distutils
from ocpmodels.common.registry import registry
from ocpmodels.common.relaxation.ml_relaxation import ml_relax
from ocpmodels.common.utils import check_traj_files
from ocpmodels.modules.evaluator import Evaluator
from ocpmodels.modules.normalizer import Normalizer
from ocpmodels.modules.scaling.util import ensure_fitted
from ocpmodels.trainers.base_trainer import BaseTrainer
@registry.register_trainer("forces")
class ForcesTrainer(BaseTrainer):
"""
Trainer class for the Structure to Energy & Force (S2EF) and Initial State to
Relaxed State (IS2RS) tasks.
.. note::
Examples of configurations for task, model, dataset and optimizer
can be found in `configs/ocp_s2ef <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2re/>`_
and `configs/ocp_is2rs <https://github.com/Open-Catalyst-Project/baselines/tree/master/configs/ocp_is2rs/>`_.
Args:
task (dict): Task configuration.
model (dict): Model configuration.
dataset (dict): Dataset configuration. The dataset needs to be a SinglePointLMDB dataset.
optimizer (dict): Optimizer configuration.
identifier (str): Experiment identifier that is appended to log directory.
run_dir (str, optional): Path to the run directory where logs are to be saved.
(default: :obj:`None`)
is_debug (bool, optional): Run in debug mode.
(default: :obj:`False`)
is_hpo (bool, optional): Run hyperparameter optimization with Ray Tune.
(default: :obj:`False`)
print_every (int, optional): Frequency of printing logs.
(default: :obj:`100`)
seed (int, optional): Random number seed.
(default: :obj:`None`)
logger (str, optional): Type of logger to be used.
(default: :obj:`tensorboard`)
local_rank (int, optional): Local rank of the process, only applicable for distributed training.
(default: :obj:`0`)
amp (bool, optional): Run using automatic mixed precision.
(default: :obj:`False`)
slurm (dict): Slurm configuration. Currently just for keeping track.
(default: :obj:`{}`)
"""
def __init__(
self,
task,
model,
dataset,
optimizer,
identifier,
normalizer=None,
timestamp_id: Optional[str] = None,
run_dir: Optional[str] = None,
is_debug: bool = False,
is_hpo: bool = False,
print_every: int = 100,
seed: Optional[int] = None,
logger: str = "tensorboard",
local_rank: int = 0,
amp: bool = False,
cpu: bool = False,
slurm={},
noddp: bool = False,
) -> None:
super().__init__(
task=task,
model=model,
dataset=dataset,
optimizer=optimizer,
identifier=identifier,
normalizer=normalizer,
timestamp_id=timestamp_id,
run_dir=run_dir,
is_debug=is_debug,
is_hpo=is_hpo,
print_every=print_every,
seed=seed,
logger=logger,
local_rank=local_rank,
amp=amp,
cpu=cpu,
name="s2ef",
slurm=slurm,
noddp=noddp,
)
def load_task(self) -> None:
logging.info(f"Loading dataset: {self.config['task']['dataset']}")
if "relax_dataset" in self.config["task"]:
self.relax_dataset = registry.get_dataset_class("lmdb")(
self.config["task"]["relax_dataset"]
)
self.relax_sampler = self.get_sampler(
self.relax_dataset,
self.config["optim"].get(
"eval_batch_size", self.config["optim"]["batch_size"]
),
shuffle=False,
)
self.relax_loader = self.get_dataloader(
self.relax_dataset,
self.relax_sampler,
)
self.num_targets = 1
# If we're computing gradients wrt input, set mean of normalizer to 0 --
# since it is lost when compute dy / dx -- and std to forward target std
if self.config["model_attributes"].get("regress_forces", True):
if self.normalizer.get("normalize_labels", False):
if "grad_target_mean" in self.normalizer:
self.normalizers["grad_target"] = Normalizer(
mean=self.normalizer["grad_target_mean"],
std=self.normalizer["grad_target_std"],
device=self.device,
)
else:
self.normalizers["grad_target"] = Normalizer(
tensor=self.train_loader.dataset.data.y[
self.train_loader.dataset.__indices__
],
device=self.device,
)
self.normalizers["grad_target"].mean.fill_(0)
# Takes in a new data source and generates predictions on it.
@torch.no_grad()
def predict(
self,
data_loader,
per_image: bool = True,
results_file=None,
disable_tqdm: bool = False,
):
ensure_fitted(self._unwrapped_model, warn=True)
if distutils.is_master() and not disable_tqdm:
logging.info("Predicting on test.")
assert isinstance(
data_loader,
(
torch.utils.data.dataloader.DataLoader,
torch_geometric.data.Batch,
),
)
rank = distutils.get_rank()
if isinstance(data_loader, torch_geometric.data.Batch):
data_loader = [[data_loader]]
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
if self.normalizers is not None and "target" in self.normalizers:
self.normalizers["target"].to(self.device)
self.normalizers["grad_target"].to(self.device)
predictions = {"id": [], "energy": [], "forces": [], "chunk_idx": []}
for i, batch_list in tqdm(
enumerate(data_loader),
total=len(data_loader),
position=rank,
desc="device {}".format(rank),
disable=disable_tqdm,
):
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch_list)
if self.normalizers is not None and "target" in self.normalizers:
out["energy"] = self.normalizers["target"].denorm(
out["energy"]
)
out["forces"] = self.normalizers["grad_target"].denorm(
out["forces"]
)
if per_image:
systemids = [
str(i) + "_" + str(j)
for i, j in zip(
batch_list[0].sid.tolist(), batch_list[0].fid.tolist()
)
]
predictions["id"].extend(systemids)
batch_natoms = torch.cat(
[batch.natoms for batch in batch_list]
)
batch_fixed = torch.cat([batch.fixed for batch in batch_list])
# total energy target requires predictions to be saved in float32
# default is float16
if (
self.config["task"].get("prediction_dtype", "float16")
== "float32"
or self.config["task"]["dataset"] == "oc22_lmdb"
):
predictions["energy"].extend(
out["energy"].cpu().detach().to(torch.float32).numpy()
)
forces = out["forces"].cpu().detach().to(torch.float32)
else:
predictions["energy"].extend(
out["energy"].cpu().detach().to(torch.float16).numpy()
)
forces = out["forces"].cpu().detach().to(torch.float16)
per_image_forces = torch.split(forces, batch_natoms.tolist())
per_image_forces = [
force.numpy() for force in per_image_forces
]
# evalAI only requires forces on free atoms
if results_file is not None:
_per_image_fixed = torch.split(
batch_fixed, batch_natoms.tolist()
)
_per_image_free_forces = [
force[(fixed == 0).tolist()]
for force, fixed in zip(
per_image_forces, _per_image_fixed
)
]
_chunk_idx = np.array(
[
free_force.shape[0]
for free_force in _per_image_free_forces
]
)
per_image_forces = _per_image_free_forces
predictions["chunk_idx"].extend(_chunk_idx)
predictions["forces"].extend(per_image_forces)
else:
predictions["energy"] = out["energy"].detach()
predictions["forces"] = out["forces"].detach()
if self.ema:
self.ema.restore()
return predictions
predictions["forces"] = np.array(predictions["forces"])
predictions["chunk_idx"] = np.array(predictions["chunk_idx"])
predictions["energy"] = np.array(predictions["energy"])
predictions["id"] = np.array(predictions["id"])
self.save_results(
predictions, results_file, keys=["energy", "forces", "chunk_idx"]
)
if self.ema:
self.ema.restore()
return predictions
def update_best(
self,
primary_metric,
val_metrics,
disable_eval_tqdm: bool = True,
) -> None:
if (
"mae" in primary_metric
and val_metrics[primary_metric]["metric"] < self.best_val_metric
) or (
"mae" not in primary_metric
and val_metrics[primary_metric]["metric"] > self.best_val_metric
):
self.best_val_metric = val_metrics[primary_metric]["metric"]
self.save(
metrics=val_metrics,
checkpoint_file="best_checkpoint.pt",
training_state=False,
)
if self.test_loader is not None:
self.predict(
self.test_loader,
results_file="predictions",
disable_tqdm=disable_eval_tqdm,
)
def train(self, disable_eval_tqdm: bool = False) -> None:
ensure_fitted(self._unwrapped_model, warn=True)
eval_every = self.config["optim"].get(
"eval_every", len(self.train_loader)
)
checkpoint_every = self.config["optim"].get(
"checkpoint_every", eval_every
)
primary_metric = self.config["task"].get(
"primary_metric", self.evaluator.task_primary_metric[self.name]
)
if (
not hasattr(self, "primary_metric")
or self.primary_metric != primary_metric
):
self.best_val_metric = 1e9 if "mae" in primary_metric else -1.0
else:
primary_metric = self.primary_metric
self.metrics = {}
# Calculate start_epoch from step instead of loading the epoch number
# to prevent inconsistencies due to different batch size in checkpoint.
start_epoch = self.step // len(self.train_loader)
for epoch_int in range(
start_epoch, self.config["optim"]["max_epochs"]
):
self.train_sampler.set_epoch(epoch_int)
skip_steps = self.step % len(self.train_loader)
train_loader_iter = iter(self.train_loader)
for i in range(skip_steps, len(self.train_loader)):
self.epoch = epoch_int + (i + 1) / len(self.train_loader)
self.step = epoch_int * len(self.train_loader) + i + 1
self.model.train()
# Get a batch.
batch = next(train_loader_iter)
# Forward, loss, backward.
with torch.cuda.amp.autocast(enabled=self.scaler is not None):
out = self._forward(batch)
loss = self._compute_loss(out, batch)
loss = self.scaler.scale(loss) if self.scaler else loss
self._backward(loss)
scale = self.scaler.get_scale() if self.scaler else 1.0
# Compute metrics.
self.metrics = self._compute_metrics(
out,
batch,
self.evaluator,
self.metrics,
)
self.metrics = self.evaluator.update(
"loss", loss.item() / scale, self.metrics
)
# Log metrics.
log_dict = {k: self.metrics[k]["metric"] for k in self.metrics}
log_dict.update(
{
"lr": self.scheduler.get_lr(),
"epoch": self.epoch,
"step": self.step,
}
)
if (
self.step % self.config["cmd"]["print_every"] == 0
and distutils.is_master()
and not self.is_hpo
):
log_str = [
"{}: {:.2e}".format(k, v) for k, v in log_dict.items()
]
logging.info(", ".join(log_str))
self.metrics = {}
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split="train",
)
if (
checkpoint_every != -1
and self.step % checkpoint_every == 0
):
self.save(
checkpoint_file="checkpoint.pt", training_state=True
)
# Evaluate on val set every `eval_every` iterations.
if self.step % eval_every == 0:
if self.val_loader is not None:
val_metrics = self.validate(
split="val",
disable_tqdm=disable_eval_tqdm,
)
self.update_best(
primary_metric,
val_metrics,
disable_eval_tqdm=disable_eval_tqdm,
)
if self.is_hpo:
self.hpo_update(
self.epoch,
self.step,
self.metrics,
val_metrics,
)
if self.config["task"].get("eval_relaxations", False):
if "relax_dataset" not in self.config["task"]:
logging.warning(
"Cannot evaluate relaxations, relax_dataset not specified"
)
else:
self.run_relaxations()
if self.scheduler.scheduler_type == "ReduceLROnPlateau":
if self.step % eval_every == 0:
self.scheduler.step(
metrics=val_metrics[primary_metric]["metric"],
)
else:
self.scheduler.step()
torch.cuda.empty_cache()
if checkpoint_every == -1:
self.save(checkpoint_file="checkpoint.pt", training_state=True)
self.train_dataset.close_db()
if self.config.get("val_dataset", False):
self.val_dataset.close_db()
if self.config.get("test_dataset", False):
self.test_dataset.close_db()
def _forward(self, batch_list):
# forward pass.
if self.config["model_attributes"].get("regress_forces", True):
out_energy, out_forces = self.model(batch_list)
else:
out_energy = self.model(batch_list)
if out_energy.shape[-1] == 1:
out_energy = out_energy.view(-1)
out = {
"energy": out_energy,
}
if self.config["model_attributes"].get("regress_forces", True):
out["forces"] = out_forces
return out
def _compute_loss(self, out, batch_list) -> int:
loss = []
# Energy loss.
energy_target = torch.cat(
[batch.y.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
energy_target = self.normalizers["target"].norm(energy_target)
energy_mult = self.config["optim"].get("energy_coefficient", 1)
loss.append(
energy_mult * self.loss_fn["energy"](out["energy"], energy_target)
)
# Force loss.
if self.config["model_attributes"].get("regress_forces", True):
force_target = torch.cat(
[batch.force.to(self.device) for batch in batch_list], dim=0
)
if self.normalizer.get("normalize_labels", False):
force_target = self.normalizers["grad_target"].norm(
force_target
)
tag_specific_weights = self.config["task"].get(
"tag_specific_weights", []
)
if tag_specific_weights != []:
# handle tag specific weights as introduced in forcenet
assert len(tag_specific_weights) == 3
batch_tags = torch.cat(
[
batch.tags.float().to(self.device)
for batch in batch_list
],
dim=0,
)
weight = torch.zeros_like(batch_tags)
weight[batch_tags == 0] = tag_specific_weights[0]
weight[batch_tags == 1] = tag_specific_weights[1]
weight[batch_tags == 2] = tag_specific_weights[2]
if self.config["optim"].get("loss_force", "l2mae") == "l2mae":
# zero out nans, if any
found_nans_or_infs = not torch.all(
out["forces"].isfinite()
)
if found_nans_or_infs is True:
logging.warning("Found nans while computing loss")
out["forces"] = torch.nan_to_num(
out["forces"], nan=0.0
)
dists = torch.norm(
out["forces"] - force_target, p=2, dim=-1
)
weighted_dists_sum = (dists * weight).sum()
num_samples = out["forces"].shape[0]
num_samples = distutils.all_reduce(
num_samples, device=self.device
)
weighted_dists_sum = (
weighted_dists_sum
* distutils.get_world_size()
/ num_samples
)
force_mult = self.config["optim"].get(
"force_coefficient", 30
)
loss.append(force_mult * weighted_dists_sum)
else:
raise NotImplementedError
else:
# Force coefficient = 30 has been working well for us.
force_mult = self.config["optim"].get("force_coefficient", 30)
if self.config["task"].get("train_on_free_atoms", False):
fixed = torch.cat(
[batch.fixed.to(self.device) for batch in batch_list]
)
mask = fixed == 0
if (
self.config["optim"]
.get("loss_force", "mae")
.startswith("atomwise")
):
force_mult = self.config["optim"].get(
"force_coefficient", 1
)
natoms = torch.cat(
[
batch.natoms.to(self.device)
for batch in batch_list
]
)
natoms = torch.repeat_interleave(natoms, natoms)
force_loss = force_mult * self.loss_fn["force"](
out["forces"][mask],
force_target[mask],
natoms=natoms[mask],
batch_size=batch_list[0].natoms.shape[0],
)
loss.append(force_loss)
else:
loss.append(
force_mult
* self.loss_fn["force"](
out["forces"][mask], force_target[mask]
)
)
else:
loss.append(
force_mult
* self.loss_fn["force"](out["forces"], force_target)
)
# Sanity check to make sure the compute graph is correct.
for lc in loss:
assert hasattr(lc, "grad_fn")
loss = sum(loss)
return loss
def _compute_metrics(self, out, batch_list, evaluator, metrics={}):
natoms = torch.cat(
[batch.natoms.to(self.device) for batch in batch_list], dim=0
)
target = {
"energy": torch.cat(
[batch.y.to(self.device) for batch in batch_list], dim=0
),
"forces": torch.cat(
[batch.force.to(self.device) for batch in batch_list], dim=0
),
"natoms": natoms,
}
out["natoms"] = natoms
if self.config["task"].get("eval_on_free_atoms", True):
fixed = torch.cat(
[batch.fixed.to(self.device) for batch in batch_list]
)
mask = fixed == 0
out["forces"] = out["forces"][mask]
target["forces"] = target["forces"][mask]
s_idx = 0
natoms_free = []
for natoms in target["natoms"]:
natoms_free.append(
torch.sum(mask[s_idx : s_idx + natoms]).item()
)
s_idx += natoms
target["natoms"] = torch.LongTensor(natoms_free).to(self.device)
out["natoms"] = torch.LongTensor(natoms_free).to(self.device)
if self.normalizer.get("normalize_labels", False):
out["energy"] = self.normalizers["target"].denorm(out["energy"])
out["forces"] = self.normalizers["grad_target"].denorm(
out["forces"]
)
metrics = evaluator.eval(out, target, prev_metrics=metrics)
return metrics
def run_relaxations(self, split: str = "val") -> None:
ensure_fitted(self._unwrapped_model)
# When set to true, uses deterministic CUDA scatter ops, if available.
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html#torch.use_deterministic_algorithms
# Only implemented for GemNet-OC currently.
registry.register(
"set_deterministic_scatter",
self.config["task"].get("set_deterministic_scatter", False),
)
logging.info("Running ML-relaxations")
self.model.eval()
if self.ema:
self.ema.store()
self.ema.copy_to()
evaluator_is2rs, metrics_is2rs = Evaluator(task="is2rs"), {}
evaluator_is2re, metrics_is2re = Evaluator(task="is2re"), {}
# Need both `pos_relaxed` and `y_relaxed` to compute val IS2R* metrics.
# Else just generate predictions.
if (
hasattr(self.relax_dataset[0], "pos_relaxed")
and self.relax_dataset[0].pos_relaxed is not None
) and (
hasattr(self.relax_dataset[0], "y_relaxed")
and self.relax_dataset[0].y_relaxed is not None
):
split = "val"
else:
split = "test"
ids = []
relaxed_positions = []
chunk_idx = []
for i, batch in tqdm(
enumerate(self.relax_loader), total=len(self.relax_loader)
):
if i >= self.config["task"].get("num_relaxation_batches", 1e9):
break
# If all traj files already exist, then skip this batch
if check_traj_files(
batch, self.config["task"]["relax_opt"].get("traj_dir", None)
):
logging.info(f"Skipping batch: {batch[0].sid.tolist()}")
continue
relaxed_batch = ml_relax(
batch=batch,
model=self,
steps=self.config["task"].get("relaxation_steps", 200),
fmax=self.config["task"].get("relaxation_fmax", 0.0),
relax_opt=self.config["task"]["relax_opt"],
save_full_traj=self.config["task"].get("save_full_traj", True),
device=self.device,
transform=None,
)
if self.config["task"].get("write_pos", False):
systemids = [str(i) for i in relaxed_batch.sid.tolist()]
natoms = relaxed_batch.natoms.tolist()
positions = torch.split(relaxed_batch.pos, natoms)
batch_relaxed_positions = [pos.tolist() for pos in positions]
relaxed_positions += batch_relaxed_positions
chunk_idx += natoms
ids += systemids
if split == "val":
mask = relaxed_batch.fixed == 0
s_idx = 0
natoms_free = []
for natoms in relaxed_batch.natoms:
natoms_free.append(
torch.sum(mask[s_idx : s_idx + natoms]).item()
)
s_idx += natoms
target = {
"energy": relaxed_batch.y_relaxed,
"positions": relaxed_batch.pos_relaxed[mask],
"cell": relaxed_batch.cell,
"pbc": torch.tensor([True, True, True]),
"natoms": torch.LongTensor(natoms_free),
}
prediction = {
"energy": relaxed_batch.y,
"positions": relaxed_batch.pos[mask],
"cell": relaxed_batch.cell,
"pbc": torch.tensor([True, True, True]),
"natoms": torch.LongTensor(natoms_free),
}
metrics_is2rs = evaluator_is2rs.eval(
prediction,
target,
metrics_is2rs,
)
metrics_is2re = evaluator_is2re.eval(
{"energy": prediction["energy"]},
{"energy": target["energy"]},
metrics_is2re,
)
if self.config["task"].get("write_pos", False):
rank = distutils.get_rank()
pos_filename = os.path.join(
self.config["cmd"]["results_dir"], f"relaxed_pos_{rank}.npz"
)
np.savez_compressed(
pos_filename,
ids=ids,
pos=np.array(relaxed_positions, dtype=object),
chunk_idx=chunk_idx,
)
distutils.synchronize()
if distutils.is_master():
gather_results = defaultdict(list)
full_path = os.path.join(
self.config["cmd"]["results_dir"],
"relaxed_positions.npz",
)
for i in range(distutils.get_world_size()):
rank_path = os.path.join(
self.config["cmd"]["results_dir"],
f"relaxed_pos_{i}.npz",
)
rank_results = np.load(rank_path, allow_pickle=True)
gather_results["ids"].extend(rank_results["ids"])
gather_results["pos"].extend(rank_results["pos"])
gather_results["chunk_idx"].extend(
rank_results["chunk_idx"]
)
os.remove(rank_path)
# Because of how distributed sampler works, some system ids
# might be repeated to make no. of samples even across GPUs.
_, idx = np.unique(gather_results["ids"], return_index=True)
gather_results["ids"] = np.array(gather_results["ids"])[idx]
gather_results["pos"] = np.concatenate(
np.array(gather_results["pos"])[idx]
)
gather_results["chunk_idx"] = np.cumsum(
np.array(gather_results["chunk_idx"])[idx]
)[
:-1
] # np.split does not need last idx, assumes n-1:end
logging.info(f"Writing results to {full_path}")
np.savez_compressed(full_path, **gather_results)
if split == "val":
for task in ["is2rs", "is2re"]:
metrics = eval(f"metrics_{task}")
aggregated_metrics = {}
for k in metrics:
aggregated_metrics[k] = {
"total": distutils.all_reduce(
metrics[k]["total"],
average=False,
device=self.device,
),
"numel": distutils.all_reduce(
metrics[k]["numel"],
average=False,
device=self.device,
),
}
aggregated_metrics[k]["metric"] = (
aggregated_metrics[k]["total"]
/ aggregated_metrics[k]["numel"]
)
metrics = aggregated_metrics
# Make plots.
log_dict = {
f"{task}_{k}": metrics[k]["metric"] for k in metrics
}
if self.logger is not None:
self.logger.log(
log_dict,
step=self.step,
split=split,
)
if distutils.is_master():
logging.info(metrics)
if self.ema:
self.ema.restore()
registry.unregister("set_deterministic_scatter")
| 31,922 | 37.554348 | 127 | py |
msvi | msvi-main/experiments/rmnist/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,789 | 25.323529 | 121 | py |
msvi | msvi-main/experiments/rmnist/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,815 | 25.705882 | 121 | py |
msvi | msvi-main/experiments/rmnist/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
msvi | msvi-main/experiments/rmnist/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.rmnist as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,556 | 29.663793 | 107 | py |
msvi | msvi-main/experiments/tests/lv_vms.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"seed": 1400, # random seed
"max_len": 201, # truncation length for the trajectories
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, _ = utils.get_model_components(param, construct_h=False)
elbo = utils.create_vms_elbo(g, F, param, S=len(train_dataset))
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=10, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,390 | 27.979167 | 85 | py |
msvi | msvi-main/experiments/tests/lv_avms.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"max_len": 201, # truncation length for the trajectories
"seed": 1400, # random seed
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
# Parameters for recognition network.
"h_agg_attn": "tdp",
"h_agg_pos_enc": "rpeNN",
"h_agg_stat_layers": 2,
"K": 2,
"m_h": 16,
"h_agg_max_tokens": 500,
"h_agg_max_time": 100,
"h_agg_delta_r": 10,
"h_agg_p": -1,
"n": 1,
"drop_prob": 0,
"block_size": 1,
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, h = utils.get_model_components(param, construct_h=True)
elbo = utils.create_avms_elbo(g, F, h, param)
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
elbo.q.rec_net.update_time_grids(t)
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=param.block_size, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,733 | 25.676923 | 85 | py |
msvi | msvi-main/experiments/tests/lv_vss.py | from types import SimpleNamespace
import torch.optim as optim
from tqdm import tqdm
import utils
param = {
"T": 50, # terminal time
"M": 250, # number of observations in [0, T]
"sigY": 0.001, # observation noise
"seed": 1400, # random seed
"max_len": 10, # truncation length for the trajectories
"batch_size": 3,
"lr": 0.01, # learning rate
"n_iters": 5000, # number of optimization iterations
"solver_kwargs": {"method": "rk4", "rtol": 1e-5, "atol": 1e-5, "adjoint": False},
}
param = SimpleNamespace(**param)
train_dataset = utils.create_datasets(param)
train_loader = utils.create_dataloaders(train_dataset, param)
utils.set_seed(param.seed)
g, F, _ = utils.get_model_components(param, construct_h=False)
elbo = utils.create_vss_elbo(g, F, param, S=len(train_dataset))
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[4500], gamma=0.1)
for i in tqdm(range(param.n_iters), total=param.n_iters):
t, y, traj_inds = next(iter(train_loader))
L1, L2, L3, _, _ = elbo(t, y, traj_inds, block_size=1, scaler=1)
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
print("Inferred parameter values =", elbo.q.posterior_param["mu_theta_F"][0:4])
print(f"True parameter values = {utils.LV_PARAM}")
| 1,388 | 27.9375 | 85 | py |
msvi | msvi-main/experiments/tests/utils.py | import numpy as np
import scipy.integrate
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
from einops import rearrange
from einops.layers.torch import Rearrange
import msvi.decoder
import msvi.trans_func
import msvi.rec_net
import msvi.model
import msvi.posterior
import msvi.elbo
import msvi.utils.utils
from msvi.dataset import TrajectoryDataset
# Use Lotka-Volterra for sanity check.
ndarray = np.ndarray
LV_PARAM = [2.0/3, 4.0/3, 1.0, 1.0] # parameters of the system
LV_IC = np.array(
[
[0.9, 1.8],
[1.9, 0.9],
[0.45, 0.9]
]
) # initial conditions
def generate_irregular_time_grid(T, intensity, min_dist):
"""Generates irregular time grid on the interval [0, T].
Args:
T (float): Terminal time.
intensity (float): Intensity of the observations (per second).
min_dist (float): Smallest distance between time points.
Returns:
t (ndarray): 1D array with time points.
"""
t = [0.0]
while t[-1] < T:
t.append(t[-1] + np.random.exponential(1.0/intensity))
t.pop(-1)
t[-1] = T
leave_mask = [True] * len(t)
for i in range(0, len(t)):
if leave_mask[i] is True:
for j in range(i+1, len(t)):
dist = t[j] - t[i]
if dist < min_dist:
leave_mask[j] = False
return np.array(t)[leave_mask]
def lv_dynamics(t, x):
alpha, beta, gamma, delta = LV_PARAM
dzdt = np.array(
[
alpha * x[0] - beta * x[0] * x[1],
delta * x[0] * x[1] - gamma * x[1],
]
)
return dzdt
def generate_data(T: float, M: int, sigY: float, seed: int) -> tuple[ndarray, ...]:
np.random.seed(seed)
t = np.empty(len(LV_IC), dtype=object)
x = np.empty(len(LV_IC), dtype=object)
y = np.empty(len(LV_IC), dtype=object)
for i in range(len(LV_IC)):
# ti = np.linspace(0, LV_T, LV_M)
ti = generate_irregular_time_grid(T, M/T, min_dist=0.02)
xi = scipy.integrate.solve_ivp(lv_dynamics, ti[[0, -1]], LV_IC[i], method="RK45", rtol=1e-5, atol=1e-5, t_eval=ti).y.T
t[i] = rearrange(ti, "m -> m ()")
x[i] = rearrange(xi, "m d -> m () d")
y[i] = x[i] + sigY * np.random.randn(*x[i].shape)
return t, x, y
def create_datasets(param) -> TrajectoryDataset:
t, _, y = generate_data(param.T, param.M, param.sigY, param.seed)
t = [torch.tensor(ti, dtype=torch.float64) for ti in t]
y = [torch.tensor(yi, dtype=torch.float32) for yi in y]
train_dataset = TrajectoryDataset(t, y, max_len=param.max_len)
return train_dataset
def create_dataloaders(dataset: TrajectoryDataset, param) -> DataLoader:
dataloader = DataLoader(dataset, batch_size=param.batch_size, shuffle=True)
return dataloader
def get_model_components(param, construct_h: bool):
g = Decoder(param.sigY)
F = msvi.trans_func.ODETransitionFunction(
f=nn.Sequential(TrueDynamicsFunction()),
layers_to_count=[TrueDynamicsFunction],
solver_kwargs=param.solver_kwargs
)
if construct_h is True:
phi_enc = nn.Sequential(Rearrange("s m () d -> s m d"), nn.Linear(2, param.m_h*param.K))
phi_agg = msvi.utils.utils.create_agg_net(param, "static")
phi_gamma = nn.Linear(param.m_h*param.K, 2)
phi_tau = nn.Linear(param.m_h*param.K, 2)
h = msvi.rec_net.RecognitionNet(phi_enc, phi_agg, phi_gamma, phi_tau, 0)
else:
h = None
return g, F, h
def create_vss_elbo(g, F, param, S):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
"gamma": Parameter(0.0 * torch.ones([S, 1, 2])),
"log_tau": Parameter(-7.0 * torch.ones([S, param.max_len-1, 2])),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.SingleShootingPosterior(posterior_param_dict, F)
elbo = msvi.elbo.SingleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def create_vms_elbo(g, F, param, S):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
"gamma": Parameter(0.0 * torch.ones([S, param.max_len-1, 2])),
"log_tau": Parameter(-7.0 * torch.ones([S, param.max_len-1, 2])),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.MultipleShootingPosterior(posterior_param_dict, F)
elbo = msvi.elbo.MultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def create_avms_elbo(g, F, h, param):
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([2]), False),
"sig0": Parameter(1.0 * torch.ones([2]), False),
"sigXi": Parameter(0.001 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(0.0 * torch.ones(g.param_count())),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(0.0 * torch.ones(F.param_count())),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
p = msvi.model.ModelNormal(prior_param_dict, g, F)
q = msvi.posterior.AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = msvi.elbo.AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
class TrueDynamicsFunction(nn.Module):
def __init__(self):
super().__init__()
self.weight = Parameter(torch.zeros(4)) # alpha, beta, gamma, delta
self.bias = Parameter(torch.zeros(1)) # dummy parameter required for compatibility with msvi.trans_func
def forward(self, x):
alpha, beta, gamma, delta = self.weight
x1, x2 = x[..., [0]], x[..., [1]]
dxdt = torch.zeros_like(x)
dxdt[..., [0]] = alpha * x1 - beta * x1 * x2
dxdt[..., [1]] = delta * x1 * x2 - gamma * x2
return dxdt
class Decoder(msvi.decoder.IDecoder):
def __init__(self, sigY: float) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x: torch.Tensor) -> torch.Tensor:
S, M, D = x.shape
p = torch.empty((S, M, 1, D, 2), device=x.device)
p[:, :, 0, :, 0] = x
p[:, :, 0, :, 1] = self.sigY
return p
def set_param(self, param: torch.Tensor) -> None:
return None
def param_count(self) -> int:
return 0
| 7,855 | 31.733333 | 126 | py |
msvi | msvi-main/experiments/pendulum/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,791 | 25.352941 | 121 | py |
msvi | msvi-main/experiments/pendulum/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,817 | 25.735294 | 121 | py |
msvi | msvi-main/experiments/pendulum/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
msvi | msvi-main/experiments/pendulum/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.pendulum as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,558 | 29.681034 | 107 | py |
msvi | msvi-main/experiments/bballs/val.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("val")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(val_loader, total=len(val_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_val_loss": mean_loss}) # type: ignore
| 1,789 | 25.323529 | 121 | py |
msvi | msvi-main/experiments/bballs/test.py | from types import SimpleNamespace
import torch
import wandb
from tqdm import tqdm
from einops import reduce
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("test")
# Load data.
train_dataset, val_dataset, test_dataset = data_utils.create_datasets(param)
train_loader, val_loader, test_loader = data_utils.create_dataloaders(param, train_dataset, val_dataset, test_dataset)
# Create and load model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
utils.load_model(elbo, param.model_folder, param.name, device)
elbo.eval()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
loss_fn = torch.nn.MSELoss(reduction="none")
with torch.no_grad():
losses = []
for batch in tqdm(test_loader, total=len(test_loader)):
t, y, traj_inds = [bi.to(device) for bi in batch]
t_inf, y_inf = utils.get_inference_data(t, y, param.delta_inf)
y_pd = torch.zeros_like(y)
for i in range(param.n_mc_samples):
x0 = utils.get_x0(elbo, t_inf, y_inf)
y_pd += utils._pred_full_traj(elbo, t, x0)
y_pd /= param.n_mc_samples
loss_per_traj = reduce(loss_fn(y_pd, y), "s m n d -> s () () ()", "mean").view(-1).detach().cpu().numpy().ravel()
losses.extend(loss_per_traj)
mean_loss = sum(losses) / len(losses)
print(mean_loss)
wandb.run.summary.update({"mean_test_loss": mean_loss}) # type: ignore
| 1,815 | 25.705882 | 121 | py |
msvi | msvi-main/experiments/bballs/utils.py | import os
from collections import deque
import numpy as np
import torch
import msvi.posterior
from einops import rearrange
ndarray = np.ndarray
Tensor = torch.Tensor
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
def save_model(model, path, name):
if not os.path.isdir(path):
os.makedirs(path)
torch.save(model.state_dict(), path+name+".pt")
def load_model(model, path, name, device):
model.load_state_dict(torch.load(path+name+".pt", map_location=device), strict=False)
def get_inference_data(t: Tensor, y: Tensor, delta_inf: float) -> tuple[list[Tensor], list[Tensor]]:
t_inf, y_inf = [], []
for i in range(t.shape[0]):
inf_inds = torch.argwhere(t[[i]] <= delta_inf)[:, 1]
t_inf.append(t[[i]][:, inf_inds, :])
y_inf.append(y[[i]][:, inf_inds, :, :])
return t_inf, y_inf
def get_x0(elbo, t: list[Tensor], y: list[Tensor]) -> Tensor:
x0 = []
for ti, yi in zip(t, y):
elbo.q.rec_net.update_time_grids(ti)
gamma, tau = elbo.q.rec_net(yi)
x0.append(gamma[:, [0], :] + tau[:, [0], :] * torch.randn_like(tau[:, [0], :]))
return torch.cat(x0)
def _pred_full_traj(elbo, t: Tensor, x0: Tensor) -> Tensor:
elbo.p.set_theta(elbo.q.sample_theta())
S, M, K = x0.shape[0], t.shape[1], x0.shape[2]
x = torch.zeros((S, M, K), dtype=x0.dtype, device=x0.device)
x[:, [0], :] = x0
for i in range(1, M):
x[:, [i], :] = elbo.p.F(x[:, [i-1], :], t=msvi.posterior.extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
return elbo.p._sample_lik(x)
def pred_full_traj(param, elbo, t: Tensor, y: Tensor) -> Tensor:
t_inf, y_inf = get_inference_data(t, y, param.delta_inf)
x0 = get_x0(elbo, t_inf, y_inf)
y_full_traj = _pred_full_traj(elbo, t, x0)
return y_full_traj
class BatchMovingAverage():
"""Computes moving average over the last `k` mini-batches
and stores the smallest recorded moving average in `min_avg`."""
def __init__(self, k: int) -> None:
self.values = deque([], maxlen=k)
self.min_avg = np.inf
def add_value(self, value: float) -> None:
self.values.append(value)
def get_average(self) -> float:
if len(self.values) == 0:
avg = np.nan
else:
avg = sum(self.values) / len(self.values)
if avg < self.min_avg:
self.min_avg = avg
return avg
def get_min_average(self):
return self.min_avg
def kl_norm_norm(mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
def create_mask(x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
def param_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.data.norm(2).item()
return total_norm
def grad_norm(module):
total_norm = 0.0
for p in module.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm(2).item()
return total_norm
def split_trajectories(t, y, new_traj_len, batch_size):
s, m, n, d = y.shape
t_new = torch.empty((s, m-new_traj_len+1, new_traj_len, 1), dtype=t.dtype, device=t.device)
y_new = torch.empty((s, m-new_traj_len+1, new_traj_len, n, d), dtype=y.dtype, device=y.device)
for i in range(m - new_traj_len + 1):
t_new[:, i] = t[:, i:i+new_traj_len]
y_new[:, i] = y[:, i:i+new_traj_len]
t_new = rearrange(t_new, "a b c () -> (a b) c ()")
t_new -= torch.min(t_new, dim=1, keepdim=True)[0]
y_new = rearrange(y_new, "a b c n d -> (a b) c n d")
inds = np.random.choice(t_new.shape[0], size=batch_size, replace=False)
t_new = t_new[inds]
y_new = y_new[inds]
return t_new, y_new
| 4,823 | 30.122581 | 121 | py |
msvi | msvi-main/experiments/bballs/train.py | from types import SimpleNamespace
import torch
import torch.nn as nn
import torch.optim as optim
import wandb
from tqdm import tqdm
import msvi.utils.bballs as data_utils
import utils
torch.backends.cudnn.benchmark = True # type: ignore
# Read parameters.
argparser = data_utils.create_argparser()
param = SimpleNamespace(**vars(argparser.parse_args()))
param.tags.append("train")
# Load data.
train_dataset, val_dataset, _ = data_utils.create_datasets(param)
train_loader, val_loader, _ = data_utils.create_dataloaders(param, train_dataset, val_dataset, val_dataset)
# Create model.
utils.set_seed(param.seed)
device = torch.device(param.device)
g, F, h = data_utils.get_model_components(param)
elbo = data_utils.create_elbo(g, F, h, param).to(device)
# Training.
optimizer = optim.Adam(elbo.parameters(), lr=param.lr)
scheduler = data_utils.get_scheduler(optimizer, param.n_iters, param.lr)
bma = utils.BatchMovingAverage(k=10)
data_transform = data_utils.get_data_transform()
wandb.init(
mode="disabled", # online/disabled
project="AVMS",
group=param.group,
tags=param.tags,
name=param.name,
config=vars(param),
save_code=True,
)
utils.set_seed(param.seed)
for i in tqdm(range(param.n_iters), total=param.n_iters):
elbo.train()
t, y, traj_inds = [bi.to(device) for bi in next(iter(train_loader))]
# t = t + (torch.rand_like(t) - 0.5) * 2 * param.sigT
y = data_transform(y)
L1, L2, L3, x, s = elbo(t, y, traj_inds, param.block_size, scaler=1.0)
L1 *= len(train_dataset) / param.batch_size
L2 *= len(train_dataset) / param.batch_size
loss = -(L1 - L2 - L3)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# Validation on full trajectory predictions.
if i % int(0.00333 * param.n_iters) == 0 or i == param.n_iters - 1:
with torch.no_grad():
elbo.eval()
t_val, y_val, _ = [bi.to(device) for bi in next(iter(val_loader))]
y_full_traj = utils.pred_full_traj(param, elbo, t, y)
y_val_full_traj = utils.pred_full_traj(param, elbo, t_val, y_val)
train_full_traj_mse = nn.MSELoss()(y_full_traj, y).item()
val_full_traj_mse = nn.MSELoss()(y_val_full_traj, y_val).item()
bma.add_value(val_full_traj_mse)
if bma.get_average() <= bma.get_min_average():
utils.save_model(elbo, param.model_folder, param.name)
wandb.log(
{
"-L1": -L1.item(),
"L2": L2.item(),
"L3": L3.item(),
"-ELBO": loss.item(),
"train_full_traj_mse": train_full_traj_mse,
"val_full_traj_mse": val_full_traj_mse,
"lr": optimizer.param_groups[0]["lr"],
"scaler": 1.0,
},
step=i
)
if param.visualize == 1:
data_utils.visualize_trajectories(
traj=[
y[[0]].detach().cpu().numpy(),
y_full_traj[[0]].detach().cpu().numpy(),
y_val[[0]].detach().cpu().numpy(),
y_val_full_traj[[0]].detach().cpu().numpy(),
],
vis_inds=list(range(y.shape[1]))[:-1:max(1, int(0.09*y.shape[1]))],
title=f"Iteration {i}",
path=f"./img/{param.name}/",
img_name=f"iter_{i}.png",
)
| 3,556 | 29.663793 | 107 | py |
msvi | msvi-main/msvi/model.py | from typing import Union
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.bernoulli import Bernoulli
from torch.distributions.continuous_bernoulli import ContinuousBernoulli
from einops import reduce
from msvi.decoder import IDecoder
from msvi.trans_func import ITransitionFunction
from msvi.posterior import extract_time_grids
Tensor = torch.Tensor
ParameterDict = nn.ParameterDict
class IModel(ABC, nn.Module):
@property
@abstractmethod
def g(self) -> IDecoder:
"""Returns the decoder."""
pass
@property
@abstractmethod
def F(self) -> ITransitionFunction:
"""Returns the transition function."""
pass
@property
@abstractmethod
def prior_param(self) -> ParameterDict:
"""Returns parameters of prior distributions."""
pass
@abstractmethod
def sample(self, t: Tensor, x0: Tensor) -> Tensor:
"""Samples a trajectory from the model. If x0=None, uses the prior to
sample the initial condition.
Args:
t: Time points at which to evaluate the trajectory. Has shape (M, ).
x0: Initial condition. Has shape (K, ).
Returns:
Trajectory sampled from the model. Has shape (1, M, N, D).
"""
pass
@abstractmethod
def loglik(self, y: Tensor, x: Tensor) -> Tensor:
"""Evaluates log likelihood p(y|x) for each snapshot.
Args:
y: Observations. Has shape (S, M, N, D).
x: Latent states. Has shape (S, M, K).
Returns:
Log likelihood for each snapshot. Has shape (S, M, 1).
"""
pass
@abstractmethod
def set_theta(self, theta: dict[str, Tensor]) -> None:
"""Sets parameters of g and F to theta["theta_g"] and theta["theta_F"] respectively.
Args:
theta: Dictionary with new parameter values. Must contain keys
theta_g and theta_F.
"""
pass
class ModelBase(IModel):
def __init__(
self,
prior_param_dict: ParameterDict,
g: IDecoder,
F: ITransitionFunction,
) -> None:
super().__init__()
self._check_param_shapes(prior_param_dict)
self._prior_param = prior_param_dict
self._g = g
self._F = F
@property
def g(self) -> IDecoder:
return self._g
@property
def F(self) -> ITransitionFunction:
return self._F
@property
def prior_param(self) -> ParameterDict:
return self._prior_param
def sample(self, t: Tensor, x0: Tensor) -> Tensor:
x = self._sample_x(t, x0)
y = self._sample_lik(x)
return y
def loglik(self, y: Tensor, x: Tensor) -> Tensor:
return self._eval_loglik(y, x)
def set_theta(self, theta: dict[str, Tensor]) -> None:
self.g.set_param(theta["theta_g"])
self.F.set_param(theta["theta_F"])
def _sample_x(self, t: Tensor, x0: Union[None, Tensor] = None) -> Tensor:
if x0 is None:
x0 = self._sample_ic()
x = self._sample_traj(t, x0)
return x
def _sample_ic(self):
mu0, sig0 = self.prior_param["mu0"], self.prior_param["sig0"]
x0 = mu0 + sig0 * torch.randn_like(sig0)
return x0
def _sample_traj(self, t, x0):
x = torch.empty((1, t.shape[0], x0.shape[0]), device=x0.device)
x[0, 0, :] = x0
s_curr = x0
for i in range(1, t.shape[0]):
x[:, [i], :] = self.F(s_curr, t=extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
eps = self.prior_param["sigXi"] * torch.randn_like(x[:, [i], :])
s_curr = x[:, [i], :] + eps
return x
def _check_param_shapes(self, d: ParameterDict) -> None:
scalar_param_names = ["sigXi", "mu_theta", "sig_theta"]
for param_name in scalar_param_names:
assert d[param_name].shape == torch.Size([1]), f"{param_name} must have shape (1, ) but has {d[param_name].shape}."
assert len(d["mu0"].shape) == 1, f"mu0 must have shape (K, ) but has {d['mu0'].shape}."
assert len(d["sig0"].shape) == 1, f"sig0 must have shape (K, ) but has {d['sig0'].shape}."
def _sample_lik(self, x: Tensor) -> Tensor:
raise NotImplementedError()
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
raise NotImplementedError()
class ModelNormal(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
param = self.g(x)
mu, sig = param[..., 0], param[..., 1]
y = Normal(mu, sig).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
param = self.g(x)
mu, sig = param[..., 0], param[..., 1]
loglik = Normal(mu, sig).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
class ModelNormalSecondOrder(ModelNormal):
def _sample_lik(self, x: Tensor) -> Tensor:
mask = self.create_mask(x)
return super()._sample_lik(x * mask)
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
mask = self.create_mask(x)
return super()._eval_loglik(y, x * mask)
def create_mask(self, x: Tensor) -> Tensor:
"""Masks the 'velocity' part of the latent space since we want to use
only the 'position' to reconstruct the observsations."""
K = x.shape[2]
mask = torch.ones_like(x)
mask[:, :, K//2:] = 0.0
return mask
class ModelBernoulli(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
y = Bernoulli(p).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
loglik = Bernoulli(p).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
class ModelContinuousBernoulli(ModelBase):
def _sample_lik(self, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
y = ContinuousBernoulli(p).rsample()
return y
def _eval_loglik(self, y: Tensor, x: Tensor) -> Tensor:
p = self.g(x)[..., 0]
loglik = ContinuousBernoulli(p).log_prob(y)
loglik = reduce(loglik, "s m n d -> s m ()", "sum")
return loglik
| 6,335 | 29.315789 | 127 | py |
msvi | msvi-main/msvi/dataset.py | from typing import Union
import torch
from torch.utils.data import Dataset
Tensor = torch.Tensor
class TrajectoryDataset(Dataset):
"""Stores trajectories and time grids.
Used to store trajectories `y` and the corresponding time grids `t`.
Each trajectory is assumed to have three dimensions:
(time points, observation points, observation dim.).
Each time grid is assimed to have two dimensions: (time points, 1).
If `max_len` is not None, a subtrajectory of length `max_len` is
selected from each trajectory and time grid.
Args:
t: Contains time grids of various lengths M.
The shape of each time grid t[i] must be (M_i, 1).
y: Contrains trajectories of various lengths.
The shape of each trajectory y[i] must be (M_i, N, D).
max_len: Length of subtrajectories selected from each trajectory and time grid.
"""
def __init__(self, t: list[Tensor], y: list[Tensor], max_len: Union[None, int] = None) -> None:
self.t = t
self.y = y
self.max_len = max_len
def __len__(self) -> int:
return len(self.t)
def __getitem__(self, idx: int) -> tuple[Tensor, Tensor, Tensor]:
t = self.t[idx]
y = self.y[idx]
traj_ind = torch.tensor(idx, dtype=torch.long)
if self.max_len is not None:
t = t[:self.max_len]
y = y[:self.max_len]
return t, y, traj_ind
| 1,444 | 31.840909 | 99 | py |
msvi | msvi-main/msvi/decoder.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
Parameter = nn.parameter.Parameter
class IDecoder(Module, ABC):
@abstractmethod
def forward(self, x: Tensor) -> Tensor:
"""Maps latent state to parameters of p(y|x).
Args:
x: Latent state. Has shape (S, M, K).
Returns:
param: Parameters of p(y|x). Has shape (S, M, N, D, num. of param. groups in p(y|x)).
For example, the number of parameter groups in a Normal p(y|x) is 2 (mean and variance).
"""
pass
@abstractmethod
def set_param(self, param: Tensor) -> None:
"""Sets parameters to `param`.
Args:
param: New parameter values.
"""
pass
@abstractmethod
def param_count(self) -> int:
"""Calculates the number of parameters.
Returns:
The number of parameters.
"""
pass
class NeuralDecoder(IDecoder):
"""Neural-network-based decoder."""
def __init__(self, decoder: Module, layers_to_count: list = []) -> None:
super().__init__()
self.decoder = decoder
self.layers_to_count = [
nn.Linear,
nn.Conv2d, nn.ConvTranspose2d,
nn.LayerNorm, nn.BatchNorm1d, nn.BatchNorm2d,
] # default
self.layers_to_count.extend(layers_to_count) # user-specified (must contain weight and bias)
def forward(self, x: Tensor) -> Tensor:
return self.decoder(x)
def set_param(self, param: Tensor) -> None:
# Note: after calling set_param() weight and bias of each layer will become tensors,
# so calling .parameters() will not show them.
assert self.param_count() == param.numel(), (
f"The size of param ({param.numel()}) must be the same as self.param_count()"
f"({self.param_count()})"
)
layers = self._get_layers(self.layers_to_count)
self._set_layer_param_to_vec(layers, param)
def param_count(self) -> int:
param_count = 0
layers = self._get_layers(self.layers_to_count)
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel()
param_count += layer_param_count
return param_count
def _get_layers(self, layer_types: list) -> list:
"""Returns all layers in `self.decoder` whose type is present in `layer_types`.
Args:
layer_types: A list with the requred layer types (e.g. nn.Linear).
Returns:
Layers of `self.decoder` whose type is in `layer_types`.
"""
return_layers = []
for layer in self.decoder.modules():
if type(layer) in layer_types:
return_layers.append(layer)
return return_layers
def _set_layer_param_to_vec(self, layers: list[Module], vec: Tensor) -> None:
"""Sets parameters of Modules in `layers` to elements of `vec`.
Args:
layers: List of Modules whose parameters need to be set.
vec: 1D Tensor with parameters.
"""
pointer = 0
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel() # type: ignore
layer_weight_count = layer.weight.numel() # type: ignore
layer_param = vec[pointer:pointer + layer_param_count]
layer_weight = layer_param[:layer_weight_count].view_as(layer.weight) # type: ignore
layer_bias = layer_param[layer_weight_count:].view_as(layer.bias) # type: ignore
self._del_set_layer_attr(layer, "weight", layer_weight)
self._del_set_layer_attr(layer, "bias", layer_bias)
pointer += layer_param_count
def _del_set_layer_attr(self, layer, attr_name, attr_val):
delattr(layer, attr_name)
setattr(layer, attr_name, attr_val)
def _check_weight_and_bias_of_layer(self, layer: Module) -> None:
assert (type(layer.weight) is Tensor or type(layer.weight) is Parameter), (
f"weight of layer {layer} must be Tensor or Parameter.")
assert (type(layer.bias) is Tensor or type(layer.bias) is Parameter), (
f"bias of layer {layer} must be Tensor or Parameter.")
| 4,429 | 34.15873 | 104 | py |
msvi | msvi-main/msvi/tf_encoder.py | import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
class TFEncoder(nn.Module):
# Modified https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
def __init__(
self,
d_model: int,
self_attn: Module,
t: Tensor,
dim_feedforward: int = 2048,
dropout: float = 0.0,
layer_norm_eps: float = 1e-5,
**kwargs,
) -> None:
super().__init__()
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm([d_model], eps=layer_norm_eps)
self.norm2 = nn.LayerNorm([d_model], eps=layer_norm_eps)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = torch.nn.functional.relu # type: ignore
self.self_attn = self_attn
def forward(self, x: Tensor) -> Tensor:
x = self.norm1(x + self._sa_block(x))
x = self.norm2(x + self._ff_block(x))
return x
# self-attention block
def _sa_block(self, x: Tensor) -> Tensor:
x = self.self_attn(x, return_weights=False)[0]
return self.dropout1(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.linear2(self.dropout(self.activation(self.linear1(x))))
return self.dropout2(x)
| 1,465 | 27.745098 | 113 | py |
msvi | msvi-main/msvi/elbo.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from msvi.model import IModel
from msvi.posterior import IVariationalPosterior, AmortizedMultipleShootingPosterior
from einops import repeat
Tensor = torch.Tensor
class IELBO(nn.Module, ABC):
@abstractmethod
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
"""Evaluates ELBO for the observations y.
Args:
t: Time grid for the observations. Has shape (S, M, 1).
y: A batch of observations. Has shape (S, M, N, D).
batch_ids: Global indices of trajectories in the batch. Has shape (S, ).
block_size: Block size.
scaler: Scaler for KL(q(s_i)||p(s_i|s_i-1)) terms.
Returns:
Parts of the ELBO (L1, L2, L3), states (x), and shooting variables (s).
"""
pass
class ELBOBase(IELBO):
def __init__(
self,
p: IModel,
q: IVariationalPosterior,
) -> None:
super().__init__()
self.p = p
self.q = q
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
# Sample approximate posterior.
self.p.set_theta(self.q.sample_theta())
s, x = self.q.sample_s(t, y, batch_ids, block_size)
# Calculate parts of ELBO.
L1 = self.calc_L1(x, y)
L2 = self.calc_L2(x, batch_ids, block_size, scaler)
L3 = self.calc_L3()
return L1, L2, L3, x, s
def calc_L1(self, x: Tensor, y: Tensor) -> Tensor:
return self.p.loglik(y, x).sum()
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
raise NotImplementedError()
def calc_L3(self) -> Tensor:
n = self.q.posterior_param["mu_theta_g"].numel()
L3_0 = self.kl_norm_norm(
self.q.posterior_param["mu_theta_g"],
self.p.prior_param["mu_theta"].expand(n),
torch.exp(self.q.posterior_param["log_sig_theta_g"]),
self.p.prior_param["sig_theta"].expand(n),
).sum()
n = self.q.posterior_param["mu_theta_F"].numel()
L3_1 = self.kl_norm_norm(
self.q.posterior_param["mu_theta_F"],
self.p.prior_param["mu_theta"].expand(n),
torch.exp(self.q.posterior_param["log_sig_theta_F"]),
self.p.prior_param["sig_theta"].expand(n),
).sum()
return L3_0 + L3_1
def kl_norm_norm(self, mu0: Tensor, mu1: Tensor, sig0: Tensor, sig1: Tensor) -> Tensor:
"""Calculates KL divergence between two K-dimensional Normal
distributions with diagonal covariance matrices.
Args:
mu0: Mean of the first distribution. Has shape (*, K).
mu1: Mean of the second distribution. Has shape (*, K).
std0: Diagonal of the covatiance matrix of the first distribution. Has shape (*, K).
std1: Diagonal of the covatiance matrix of the second distribution. Has shape (*, K).
Returns:
KL divergence between the distributions. Has shape (*, 1).
"""
assert mu0.shape == mu1.shape == sig0.shape == sig1.shape, (f"{mu0.shape=} {mu1.shape=} {sig0.shape=} {sig1.shape=}")
a = (sig0 / sig1).pow(2).sum(-1, keepdim=True)
b = ((mu1 - mu0).pow(2) / sig1**2).sum(-1, keepdim=True)
c = 2 * (torch.log(sig1) - torch.log(sig0)).sum(-1, keepdim=True)
kl = 0.5 * (a + b + c - mu0.shape[-1])
return kl
class SingleShootingELBO(ELBOBase):
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
S, M, K = x.shape
gamma = self.q.posterior_param["gamma"][batch_ids]
tau = torch.exp(self.q.posterior_param["log_tau"][batch_ids])
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
x[:, 1:-1, :],
x[:, 1:-1, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s m k", s=S, m=M-2, k=K)
).sum()
return L2_0 + scaler * L2_1
class MultipleShootingELBO(ELBOBase):
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
gamma = self.q.posterior_param["gamma"][batch_ids, ::block_size, :]
tau = torch.exp(self.q.posterior_param["log_tau"][batch_ids, ::block_size, :])
x_sub = x[:, 0:-1:block_size, :]
S, B, K = x_sub.shape
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
gamma[:, 1:, :],
x_sub[:, 1:, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s b k", s=S, b=B-1, k=K)
).sum()
return L2_0 + scaler * L2_1
class AmortizedMultipleShootingELBO(ELBOBase):
def __init__(self, p: IModel, q: AmortizedMultipleShootingPosterior) -> None:
super().__init__(p, q)
self.q = q
def forward(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
scaler: float = 1.0,
) -> tuple[Tensor, ...]:
self.q.rec_net.update_time_grids(t) # update recognition network before sampling s
return super().forward(t, y, batch_ids, block_size, scaler)
def calc_L2(self, x: Tensor, batch_ids: Tensor, block_size: int, scaler: float) -> Tensor:
gamma = self.q.gamma[:, ::block_size, :]
tau = self.q.tau[:, ::block_size, :]
x_sub = x[:, 0:-1:block_size, :]
S, B, K = x_sub.shape
L2_0 = self.kl_norm_norm(
gamma[:, 0, :],
repeat(self.p.prior_param["mu0"], "k -> s k", s=S, k=K),
tau[:, 0, :],
repeat(self.p.prior_param["sig0"], "k -> s k", s=S, k=K)
).sum()
L2_1 = self.kl_norm_norm(
gamma[:, 1:, :],
x_sub[:, 1:, :],
tau[:, 1:, :],
repeat(self.p.prior_param["sigXi"], "() -> s b k", s=S, b=B-1, k=K)
).sum()
return L2_0 + scaler * L2_1
| 6,622 | 31.465686 | 125 | py |
msvi | msvi-main/msvi/attention.py | from abc import ABC, abstractmethod
from typing import Union
import numpy as np
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
class IAttention(Module, ABC):
@abstractmethod
def forward(
self,
x: Tensor,
return_weights: bool = True
) -> Union[tuple[Tensor, Tensor], tuple[Tensor, None]]:
"""Maps input sequence x to output sequence.
Args:
x: Input sequence. Has shape (S, M, K).
return_weights: If True, returns attention weights. Otherwise, returns None.
Returns:
y: Output sequence. Has shape (S, M, K).
attn_weights: Attention weights. Has shape (S, M, M).
None is returned if `return_weights` is False.
"""
pass
@abstractmethod
def update_time_grid(self, t: Tensor) -> None:
"""Updates all parts of the class that depend on time grids (except submodules
which might also depend on time grids, those must be upated separately
(see msvi.rec_net)).
Args:
t: New time grids. Has shape (S, M, 1).
"""
pass
class AttentionBase(IAttention):
def __init__(self, d_model: int, rpe: Union[Module, None] = None, drop_prob: float = 0.0):
super().__init__()
self.d_model = d_model
self.rpe = rpe
self.drop_prob = drop_prob
def forward(self, x: Tensor, return_weights: bool = True) -> Union[tuple[Tensor, Tensor], tuple[Tensor, None]]:
attn_weights = self._eval_attn_weights(x)
output = self._eval_output(attn_weights, x)
if return_weights:
return output, attn_weights
else:
return output, None
def drop(self, w: Tensor) -> Tensor:
"""Sets an element of w to -inf with probability self.drop_prob.
Does not drop the diagonal and one of the neighboring elements."""
dont_drop = torch.eye(w.shape[1], dtype=w.dtype, device=w.device) # leave the diagonal
inds = torch.arange(0, w.shape[1], 1, device=w.device)
shift = torch.randint(low=0, high=2, size=(w.shape[1],), device=w.device)
shift[0] = 1 # leave right neighbor for y1
shift[-1] = -1 # leave left neighbor for yM
shift[shift == 0] = -1 # randomly leave left or right neighbor for y2,...yM-1
dont_drop[inds, inds+shift] = 1
prob = torch.ones_like(w) * (1.0 - self.drop_prob)
prob = torch.clip(prob + dont_drop, 0, 1)
mask = torch.bernoulli(prob) # 1 - don't drop, 0 - drop
mask[mask == 0] = torch.inf
mask[mask == 1] = 0
return w - mask
def update_time_grid(self, t: Tensor) -> None:
pass
def _eval_attn_weights(self, x: Tensor) -> Tensor:
raise NotImplementedError()
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
raise NotImplementedError()
class DotProductAttention(AttentionBase):
def __init__(self, d_model: int, rpe: Union[Module, None] = None, **kwargs):
super().__init__(d_model, rpe)
self.W_k = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_q = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
Q, K = self.W_q(x), self.W_k(x)
unnorm_attn_weights = torch.bmm(Q, torch.transpose(K, 1, 2)) / self.d_model**0.5
attn_weights = nn.Softmax(-1)(unnorm_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
class TemporalAttention(AttentionBase):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
super().__init__(d_model, rpe, drop_prob)
self.eps = eps
self.delta_r = delta_r
self.p = p if p != -1 else torch.inf
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
self.attn_weights: Tensor
self.update_time_grid(t)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
if self.training:
attn_weights = nn.Softmax(-1)(self.drop(self.unnorm_temporal_attn_weights))
else:
attn_weights = nn.Softmax(-1)(self.unnorm_temporal_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
assert x.shape[0:2] == attn_weights.shape[0:2], (
"Batch size and number of time points in `x` and `attn_weights` must be the same. "
f"Currently {x.shape=} and {attn_weights.shape=}."
)
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
dt = torch.cdist(t, t, p=1).float()
self.unnorm_temporal_attn_weights = np.log(self.eps) * torch.pow(dt/self.delta_r, self.p)
class TemporalDotProductAttention(AttentionBase):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
super().__init__(d_model, rpe, drop_prob)
self.eps = eps
self.delta_r = delta_r
self.p = p if p != -1 else torch.inf
self.W_k = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_v = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_q = nn.Linear(self.d_model, self.d_model, bias=False)
self.W_out = nn.Linear(self.d_model, self.d_model, bias=False)
self.unnorm_temporal_attn_weights: Tensor
self.update_time_grid(t)
def _eval_attn_weights(self, x: Tensor) -> Tensor:
Q, K = self.W_q(x), self.W_k(x)
unnorm_dotprod_attn_weights = torch.bmm(Q, torch.transpose(K, 1, 2)) / self.d_model**0.5
if self.training:
attn_weights = nn.Softmax(-1)(self.drop(unnorm_dotprod_attn_weights + self.unnorm_temporal_attn_weights))
else:
attn_weights = nn.Softmax(-1)(unnorm_dotprod_attn_weights + self.unnorm_temporal_attn_weights)
return attn_weights
def _eval_output(self, attn_weights: Tensor, x: Tensor) -> Tensor:
assert x.shape[0:2] == attn_weights.shape[0:2], (
"Batch size and number of time points in `x` and `attn_weights` must be the same. "
f"Currently {x.shape=} and {attn_weights.shape=}."
)
V = self.W_v(x)
if self.rpe is None:
output = torch.bmm(attn_weights, V)
else:
output = torch.bmm(attn_weights, V) + (attn_weights.unsqueeze(-1) * self.rpe()).sum(2)
return self.W_out(output)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
dt = torch.cdist(t, t, p=1).float()
self.unnorm_temporal_attn_weights = np.log(self.eps) * torch.pow(dt/self.delta_r, self.p)
class TemporalDotProductAttentionBaseline(TemporalDotProductAttention):
def __init__(
self,
d_model: int,
t: Tensor,
eps: float,
delta_r: float,
p: float,
n: int,
rpe: Union[Module, None] = None,
drop_prob: float = 0.0,
**kwargs
) -> None:
self.n = n
super().__init__(d_model, t, eps, delta_r, p, rpe, drop_prob, **kwargs)
@torch.no_grad()
def update_time_grid(self, t: Tensor) -> None:
super().update_time_grid(t)
self.unnorm_temporal_attn_weights += self._create_mask()
def _create_mask(self) -> Tensor:
M = self.unnorm_temporal_attn_weights.shape[1]
device = self.unnorm_temporal_attn_weights.device
ones = torch.ones((M, M), device=device).triu(self.n+1)
mask = ones + ones.T
mask[mask == 1] = -torch.inf
return mask.unsqueeze(0)
| 8,637 | 33.690763 | 117 | py |
msvi | msvi-main/msvi/trans_func.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from torchdiffeq import odeint
from torchdiffeq import odeint_adjoint
from einops import rearrange
Tensor = torch.Tensor
Module = nn.Module
Parameter = nn.parameter.Parameter
class ITransitionFunction(Module, ABC):
@abstractmethod
def forward(self, s: Tensor, t: Tensor) -> Tensor:
"""Moves latent states forward in time.
Args:
s: Latent states. Has shape (S, B, K).
t: Time grids at which the latent states are evaluated (except the first time point).
The first time point of each time grid must be the temporal position
of the corresponding latent state. Has shape (S, B, block_size+1).
Returns:
s_new: New latent states. Has shape (S, B*block_size, K).
"""
pass
@abstractmethod
def set_param(self, param: Tensor) -> None:
"""Sets parameters of the module to `params`.
Args:
param: New parameter values.
"""
pass
@abstractmethod
def param_count(self) -> int:
"""Calculates the number of parameters over which the posterior is to be evaluated.
Returns:
The number of parameters.
"""
pass
class NeuralTransitionFunctionBase(ITransitionFunction):
def __init__(self, f: Module, layers_to_count: list = []):
super().__init__()
self.f = f
self.layers_to_count = [nn.Linear, nn.LayerNorm, nn.BatchNorm1d] # default
self.layers_to_count.extend(layers_to_count) # user-specified (must contain weight and bias)
def forward(self, s: Tensor, t: Tensor) -> Tensor:
raise NotImplementedError()
def set_param(self, param: Tensor) -> None:
assert self.param_count() == param.numel(), (
f"The size of param ({param.numel()}) must be the same as self.param_count()"
f"({self.param_count()})"
)
layers = self._get_layers(self.f, self.layers_to_count)
self._set_layer_param_to_vec(layers, param)
def param_count(self) -> int:
"""Each layer must contain weight and bias variables."""
param_count = 0
layers = self._get_layers(self.f, self.layers_to_count)
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel()
param_count += layer_param_count
return param_count
def _get_layers(self, f, layer_types: list) -> list:
"""Returns all layers in `f` whose type is present in `layer_types`.
Args:
layer_types: A list with the requred layer types (e.g. [nn.Linear]).
Returns:
A list of layers in `f` whose types are in `layer_types`
"""
return_layers = []
for fi in f.modules():
if type(fi) in layer_types:
return_layers.append(fi)
return return_layers
def _set_layer_param_to_vec(self, layers: list[Module], vec: torch.Tensor) -> None:
"""Sets parameters of Modules in `layers` to elements of `vec`.
Args:
layers: A list of Modules whose parameters need to be set.
vec: A 1D Tensor with the parameters.
"""
pointer = 0
for layer in layers:
self._check_weight_and_bias_of_layer(layer)
layer_param_count = layer.weight.numel() + layer.bias.numel() # type: ignore
layer_weight_count = layer.weight.numel() # type: ignore
layer_param = vec[pointer:pointer + layer_param_count]
layer_weight = layer_param[:layer_weight_count].view_as(layer.weight) # type: ignore
layer_bias = layer_param[layer_weight_count:].view_as(layer.bias) # type: ignore
self._del_set_layer_attr(layer, "weight", layer_weight)
self._del_set_layer_attr(layer, "bias", layer_bias)
pointer += layer_param_count
def _del_set_layer_attr(self, layer, attr_name, attr_val):
delattr(layer, attr_name)
setattr(layer, attr_name, attr_val)
def _check_weight_and_bias_of_layer(self, layer: Module) -> None:
assert (type(layer.weight) is Tensor or type(layer.weight) is Parameter), (
f"weight of layer {layer} must be Tensor or Parameter.")
assert (type(layer.bias) is Tensor or type(layer.bias) is Parameter), (
f"bias of layer {layer} must be Tensor or Parameter.")
class MLPTransitionFunction(NeuralTransitionFunctionBase):
"""Time steps must be uniform and the number of blocks must be M-1."""
def forward(self, s: Tensor, t: Tensor) -> Tensor:
return self.f(s)
class ODETransitionFunction(NeuralTransitionFunctionBase):
def __init__(self, f: Module, layers_to_count: list = [], solver_kwargs: dict = {}):
super().__init__(f, layers_to_count=layers_to_count)
if "adjoint" in solver_kwargs.keys():
self.adjoint = solver_kwargs["adjoint"] == 1
del solver_kwargs["adjoint"]
else:
self.adjoint = False
self.solver_kwargs = solver_kwargs
def forward(self, s: Tensor, t: Tensor) -> Tensor:
S, B, K, block_size = *s.shape, t.shape[2] - 1
s_new = torch.zeros((S, B, block_size, K), dtype=s.dtype, device=s.device)
delta = torch.diff(t, dim=2).to(s.dtype)
t_sim = torch.tensor([0., 1.], dtype=t.dtype, device=t.device)
for i in range(block_size):
f = self.get_scaled_dynamics_function(delta[:, :, [i]])
# Squeeze-unsqueeze to avoid in-place modification which causes error during backward pass.
if i == 0:
s0 = s.unsqueeze(-2)
else:
s0 = s_new[:, :, [i-1], :]
if self.adjoint is True:
s_new[:, :, i, :] = odeint_adjoint(f, s0.squeeze(-2), t_sim, **self.solver_kwargs)[-1] # type: ignore
else:
s_new[:, :, i, :] = odeint(f, s0.squeeze(-2), t_sim, **self.solver_kwargs)[-1] # type: ignore
return rearrange(s_new, "S B block_size K -> S (B block_size) K")
def get_dynamics_function(self):
def dynamics_function(t, x):
return self.f(x)
return dynamics_function
def get_scaled_dynamics_function(self, delta):
f = self.get_dynamics_function()
def scaled_dynamics_function(t, x):
return f(t, x) * delta
return scaled_dynamics_function
class ODETransitionFunctionSecondOrder(ODETransitionFunction):
def get_dynamics_function(self):
"""Assumes that x = (x^s || x^d), then returns dxdt=(x^d || f(x^s||x^d))."""
def dynamics_function(t, x):
K = x.shape[2]
dxdt = torch.cat((x[:, :, K//2:], self.f(x)), dim=2)
return dxdt
return dynamics_function
| 6,931 | 34.917098 | 118 | py |
msvi | msvi-main/msvi/pos_enc.py | from typing import Union
import numpy as np
import torch
import torch.nn as nn
Tensor = torch.Tensor
Module = nn.Module
Sequential = nn.Sequential
class DiscreteSinCosPositionalEncoding(Module):
# Modified https://pytorch.org/tutorials/beginner/transformer_tutorial.html
def __init__(self, d_model: int, t: Tensor, max_tokens: int, dropout: float = 0.0, **kwargs):
assert d_model % 2 == 0, "d_model must be even."
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
self.max_tokens = max_tokens
self.update_time_grid(t)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K).
x = x + self.pe
return self.dropout(x)
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
# assert torch.all((t - t[0]) < 1e-7).item() is True, "All time grids must be the same."
_, M, _ = t.shape
position = torch.arange(M, device=t.device).unsqueeze(1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, device=t.device) * (-np.log(self.max_tokens) / self.d_model))
pe = torch.zeros(1, M, self.d_model, device=t.device)
pe[0, :, 0::2] = torch.sin(position * div_term)
pe[0, :, 1::2] = torch.cos(position * div_term)
self.pe = pe
class ContinuousSinCosPositionalEncoding(Module):
# Modified https://pytorch.org/tutorials/beginner/transformer_tutorial.html
def __init__(self, d_model: int, t: Tensor, max_tokens: int, max_time: float, dropout: float = 0.0, **kwargs):
assert d_model % 2 == 0, "d_model must be even."
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.d_model = d_model
self.max_tokens = max_tokens
self.max_time = max_time
self.update_time_grid(t)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K).
x = x + self.pe
return self.dropout(x)
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
S, M, _ = t.shape
position = t / self.max_time * (self.max_tokens - 1)
div_term = torch.exp(torch.arange(0, self.d_model, 2, device=t.device) * (-np.log(self.max_tokens) / self.d_model)) # (K/2,)
pe = torch.zeros(S, M, self.d_model, device=t.device)
pe[:, :, 0::2] = torch.sin(position * div_term)
pe[:, :, 1::2] = torch.cos(position * div_term)
self.pe = pe
class RelativePositionalEncodingNN(Module):
def __init__(self, f: Union[Module, Sequential], t: Tensor, delta_r: float, **kwargs):
super().__init__()
self.f = f
self.delta_r = delta_r
self.squish_fn = nn.Hardtanh()
self.update_time_grid(t)
def forward(self) -> Tensor:
rpe = self.f(self.dt_prime_mat)
return rpe
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
dt_mat = self._get_dt_matrix(t)
self.dt_prime_mat = self.squish_fn(dt_mat / self.delta_r).float()
def _get_dt_matrix(self, t: Tensor) -> Tensor:
"""Calculates the matrix of relative distances between all time points in `t`."""
dist_mat = torch.cdist(t, t, p=1) # (S, M, M)
dir_mat = torch.ones_like(dist_mat).triu() - torch.ones_like(dist_mat).tril() # (S, M, M)
dt_mat = (dir_mat * dist_mat).unsqueeze(-1) # (S, M, M, 1)
return dt_mat
class RelativePositionalEncodingInterp(Module):
def __init__(self, d_model: int, t: Tensor, delta_r: float, **kwargs):
super().__init__()
self.delta_r = delta_r
self.squish_fn = nn.Hardtanh()
self._set_random_vectors(d_model)
self.update_time_grid(t)
def forward(self) -> Tensor:
return self.pe
def update_time_grid(self, t: Tensor) -> None:
# t: Tensor, shape (S, M, 1).
dt_mat = self._get_dt_matrix(t)
dt_prime_mat = self.squish_fn(dt_mat / self.delta_r).float()
self.lm = (dt_prime_mat + 1) / 2
pe = ((1 - self.lm) * self.va + self.lm * self.vb)
self.pe = pe
def _set_random_vectors(self, d_model: int) -> None:
va_ = (torch.rand((1, d_model)) - 0.5) * 2
va = va_ / torch.linalg.norm(va_, ord=np.inf)
vb = -va
self.register_buffer("va", va)
self.register_buffer("vb", vb)
def _get_dt_matrix(self, t: Tensor) -> Tensor:
"""Calculates the matrix of relative distances between all time points in `t`."""
dist_mat = torch.cdist(t, t, p=1) # (S, M, M)
dir_mat = torch.ones_like(dist_mat).triu() - torch.ones_like(dist_mat).tril() # (S, M, M)
dt_mat = (dir_mat * dist_mat).unsqueeze(-1) # (S, M, M, 1)
return dt_mat
| 4,793 | 35.045113 | 133 | py |
msvi | msvi-main/msvi/posterior.py | from abc import ABC, abstractmethod
import torch
import torch.nn as nn
from msvi.trans_func import ITransitionFunction
from msvi.rec_net import RecognitionNet
Tensor = torch.Tensor
Module = nn.Module
ParameterDict = nn.ParameterDict
class IVariationalPosterior(ABC, Module):
@property
@abstractmethod
def posterior_param(self) -> nn.ParameterDict:
"""Returns parameters of the posterior distribution."""
pass
@abstractmethod
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
"""Samples shooting variables (s_1, ..., s_B) from the posterior q(s|y).
Also returns states (x_1, ..., x_M).
Args:
t: Time points at which to evaluate the latent states. Has shape (S, M, 1).
y: Observations corresponding to the latent states. Used only for
amortized variational inference. Has shape (S, M, N, D).
batch_ids: Indices of the trajectories for which to sample the shooting variables.
Has shape (S, ).
block_size: Size of the blocks.
Returns:
A sample of the shooting variables with shape (S, B, K)
and the corresponding latent states with shape (S, M, K).
"""
pass
@abstractmethod
def sample_theta(self) -> dict[str, Tensor]:
"""Samples parameters of g and F from the posterior.
Returns:
Dictionary with a sample of the parameters.
"""
pass
class VariationalPosteriorBase(IVariationalPosterior):
def __init__(self, posterior_param_dict: ParameterDict):
super().__init__()
self._check_param_shapes(posterior_param_dict)
self._posterior_param = posterior_param_dict
@property
def posterior_param(self):
return self._posterior_param
def sample_theta(self):
mu_g, sig_g = self.posterior_param["mu_theta_g"], torch.exp(self.posterior_param["log_sig_theta_g"])
mu_F, sig_F = self.posterior_param["mu_theta_F"], torch.exp(self.posterior_param["log_sig_theta_F"])
theta = {
"theta_g": mu_g + sig_g * torch.randn_like(sig_g),
"theta_F": mu_F + sig_F * torch.randn_like(sig_F),
}
return theta
def _check_param_shapes(self, p: ParameterDict) -> None:
raise NotImplementedError()
def sample_s(self, t: Tensor, y: Tensor, batch_ids: Tensor, block_size: int) -> tuple[Tensor, Tensor]:
raise NotImplementedError()
class SingleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction,
) -> None:
super().__init__(posterior_param_dict)
self.F = F
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
gamma_0 = self.posterior_param["gamma"][batch_ids]
tau = torch.exp(self.posterior_param["log_tau"][batch_ids])
S, M, K = batch_ids.shape[0], t.shape[1], gamma_0.shape[2]
s = torch.zeros((S, M-1, K), device=tau.device)
x = torch.zeros((S, M, K), device=tau.device)
s[:, [0], :] = gamma_0 + tau[:, [0], :] * torch.randn((S, 1, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
for i in range(1, M):
x_i = self.F(s[:, [i-1], :], t=extract_time_grids(t[:, i-1:i+1, :], n_blocks=1))
x[:, [i], :] = x_i
if i < (M - 1):
s[:, [i], :] = x_i + tau[:, [i], :] * torch.randn((S, 1, K), device=tau.device)
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
assert len(p["gamma"].shape) == 3 and p["gamma"].shape[1] == 1, f"gamma must have shape (S, 1, K) but has {p['gamma'].shape}."
assert len(p["log_tau"].shape) == 3, f"log_tau must have shape (S, M-1, K) but has {p['log_tau'].shape}."
class MultipleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction
) -> None:
super().__init__(posterior_param_dict)
self.F = F
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
gamma = self.posterior_param["gamma"][batch_ids, ::block_size, :]
tau = torch.exp(self.posterior_param["log_tau"][batch_ids, ::block_size, :])
s = gamma + tau * torch.randn_like(gamma)
S, M, B, K = batch_ids.shape[0], t.shape[1], gamma.shape[1], gamma.shape[2]
x = torch.zeros((S, M, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
x[:, 1:, :] = self.F(s, t=extract_time_grids(t, n_blocks=B))
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
assert len(p["gamma"].shape) == 3, f"gamma must have shape (S, M, K) but has {p['gamma'].shape}."
assert p["gamma"].shape == p["log_tau"].shape, f"shapes of gamma ({p['gamma'].shape}) and log_tau ({p['log_tau'].shape}) must be the same."
class AmortizedMultipleShootingPosterior(VariationalPosteriorBase):
def __init__(
self,
posterior_param_dict: ParameterDict,
F: ITransitionFunction,
rec_net: RecognitionNet,
) -> None:
super().__init__(posterior_param_dict)
self.F = F
self.rec_net = rec_net
self.gamma: Tensor
self.tau: Tensor
def sample_s(
self,
t: Tensor,
y: Tensor,
batch_ids: Tensor,
block_size: int,
) -> tuple[Tensor, Tensor]:
assert y is not None, "Amortized posterior requires data y."
gamma, tau = self.rec_net(y)
self.gamma, self.tau = gamma[:, :-1, :], tau[:, :-1, :]
gamma = self.gamma[:, ::block_size, :]
tau = self.tau[:, ::block_size, :]
s = gamma + tau * torch.randn_like(tau)
S, M, B, K = batch_ids.shape[0], t.shape[1], gamma.shape[1], gamma.shape[2]
x = torch.zeros((S, M, K), device=tau.device)
x[:, [0], :] = s[:, [0], :]
x[:, 1:, :] = self.F(s, t=extract_time_grids(t, n_blocks=B))
return s, x
def _check_param_shapes(self, p: dict[str, Tensor]) -> None:
model_parameter_names = ["mu_theta_g", "mu_theta_F", "log_sig_theta_g", "log_sig_theta_F"]
for param_name in model_parameter_names:
assert len(p[param_name].shape) == 1, f"{param_name} must have shape (num_parameters, ) but has {p[param_name].shape}."
def extract_time_grids(t: Tensor, n_blocks: int) -> Tensor:
"""Extracts overlapping sub-grids from `t` for the given number of blocks.
Args:
t: Full time grids. Has shape (S, M, 1).
n_blocks: Number of blocks.
Returns:
sub_t: Overlapping sub-grids. Has shape (S, n_blocks, grid_size).
Simplified example:
For t=(t1, t2, t3, t4, t5) and b_blocks=2 returns (t1, t2, t3), (t3, t4, t5).
"""
S, M = t.shape[0:2]
assert (M - 1) % n_blocks == 0, "All blocks must be of equal size."
grid_size = int((M - 1) / n_blocks) + 1
sub_t = torch.empty((S, n_blocks, grid_size), dtype=t.dtype, device=t.device)
for b, i in enumerate(range(0, M-grid_size+1, grid_size-1)):
sub_t[:, [b], :] = torch.transpose(t[:, i:i+grid_size, :], 1, 2)
return sub_t
| 8,083 | 33.4 | 147 | py |
msvi | msvi-main/msvi/rec_net.py | import torch
import torch.nn as nn
from einops import rearrange
Tensor = torch.Tensor
Module = nn.Module
class RecognitionNet(Module):
def __init__(
self,
phi_enc: Module,
phi_agg: Module,
phi_gamma: Module,
phi_tau: Module,
tau_min: float,
) -> None:
"""This class is used to convert observations to variational parameters.
There are four main components:
phi_enc: a point-wise encoder which maps y:(S, M, N, D) to a:(S, M, K').
phi_agg: a sequence to sequence function which maps a:(S, M, K') to b:(S, M, K').
phi_gamma/phi_tau: a point-wise function which maps b:(S, M, K') to gamma/tau:(S, M, K).
First, observations `y` are converted to a lower-dimensional form `a` by the encoder `phi_enc`.
Then, sequence `a` is aggregated into another sequence `b` by `phi_agg`.
Finally, variational parameters are extracted from `b` by `phi_gamma` and `phi_tau`.
"""
super().__init__()
self.phi_enc = phi_enc
self.phi_agg = phi_agg
self.phi_gamma = phi_gamma
self.phi_tau = phi_tau
self.tau_min = tau_min
def forward(self, y: Tensor) -> tuple[Tensor, Tensor]:
"""Converts observations to variational parameters.
Args:
y: Observations. Has shape (S, M, N, D).
Returns:
gamma: Variational parameters. Has shape (S, M, K).
tau: Variational parameters. Has shape (S, M, K).
"""
a = self.phi_enc(y)
b = self.phi_agg(a)
gamma = self.phi_gamma(b)
tau = torch.exp(self.phi_tau(b)) + self.tau_min
return gamma, tau
def apply_batch_norm(self, gamma, bn):
S, M, _ = gamma.shape
gamma = rearrange(gamma, "s m k -> (s m) k")
gamma = bn(gamma)
gamma = rearrange(gamma, "(s m) k -> s m k", s=S, m=M)
return gamma
def update_time_grids(self, t: Tensor) -> None:
"""Updates all parts of aggregation net that depend on time grids."""
for module in self.phi_agg.modules():
if not hasattr(module, "update_time_grid"):
continue
if callable(getattr(module, "update_time_grid")):
module.update_time_grid(t) # type: ignore
class RecognitionNetSecondOrder(RecognitionNet):
"""Same as RecognitionNet but splits variational parameters into two groups."""
def __init__(
self,
phi_enc: Module,
phi_agg: Module,
phi_gamma: Module,
phi_tau: Module,
phi_agg_dyn: Module,
phi_gamma_dyn: Module,
phi_tau_dyn: Module,
tau_min: float,
) -> None:
super().__init__(phi_enc, phi_agg, phi_gamma, phi_tau, tau_min)
self.phi_agg_dyn = phi_agg_dyn
self.phi_gamma_dyn = phi_gamma_dyn
self.phi_tau_dyn = phi_tau_dyn
def forward(self, y: Tensor) -> tuple[Tensor, Tensor]:
a = self.phi_enc(y)
b_stat = self.phi_agg(a)
b_dyn = self.phi_agg_dyn(a)
gamma_stat = self.phi_gamma(b_stat)
tau_stat = torch.exp(self.phi_tau(b_stat)) + self.tau_min
gamma_dyn = self.phi_gamma_dyn(b_dyn)
tau_dyn = torch.exp(self.phi_tau_dyn(b_dyn))
gamma = torch.cat((gamma_stat, gamma_dyn), dim=2)
tau = torch.cat((tau_stat, tau_dyn), dim=2)
return gamma, tau
def update_time_grids(self, t: Tensor) -> None:
for agg_net in [self.phi_agg, self.phi_agg_dyn]:
for module in agg_net.modules():
if not hasattr(module, "update_time_grid"):
continue
if callable(getattr(module, "update_time_grid")):
module.update_time_grid(t) # type: ignore
def set_module_requires_grad(m: Module, value: bool):
for p in m.parameters():
p.requires_grad = value
| 3,937 | 31.01626 | 103 | py |
msvi | msvi-main/msvi/utils/utils.py | from types import SimpleNamespace
import torch
import torch.nn as nn
from einops import rearrange
from msvi.pos_enc import (
DiscreteSinCosPositionalEncoding,
ContinuousSinCosPositionalEncoding,
RelativePositionalEncodingInterp,
RelativePositionalEncodingNN
)
from msvi.attention import (
DotProductAttention,
TemporalAttention,
TemporalDotProductAttention,
TemporalDotProductAttentionBaseline,
)
from msvi.tf_encoder import TFEncoder
Tensor = torch.Tensor
Module = torch.nn.Module
Sequential = torch.nn.Sequential
def create_agg_net(param: SimpleNamespace, net_type: str) -> Sequential:
"""Constructs aggregation network."""
pos_enc_layers = {
"dsc": DiscreteSinCosPositionalEncoding,
"csc": ContinuousSinCosPositionalEncoding,
"rpeNN": RelativePositionalEncodingNN,
"rpeInterp": RelativePositionalEncodingInterp,
"none": None,
}
attn_layers = {
"dp": DotProductAttention,
"t": TemporalAttention,
"tdp": TemporalDotProductAttention,
"tdp_b": TemporalDotProductAttentionBaseline,
}
attn_key, pos_enc_key = param.h_agg_attn, param.h_agg_pos_enc
assert pos_enc_key in pos_enc_layers.keys(), f"Wrong position encoding name: {pos_enc_key}."
assert attn_key in attn_layers.keys(), f"Wrong attention layer name: {attn_key}."
t_init = torch.linspace(0, 1, 3).view(1, -1, 1) # update it later
pos_enc_args = {
"d_model": param.m_h*param.K,
"t": t_init,
"max_tokens": param.h_agg_max_tokens,
"max_time": param.h_agg_max_time,
"delta_r": param.h_agg_delta_r,
"f": nn.Linear(1, param.m_h*param.K, bias=False),
}
attn_args = {
"d_model": param.m_h*param.K,
"t": t_init,
"eps": 1e-2,
"delta_r": param.h_agg_delta_r,
"p": param.h_agg_p,
"n": param.n,
"drop_prob": param.drop_prob,
}
if net_type == "static":
param.h_agg_layers = param.h_agg_stat_layers
elif net_type == "dynamic":
param.h_agg_layers = param.h_agg_dyn_layers
modules = []
if pos_enc_key in ["dsc", "csc"]: # absolute positional encodings
pos_enc = pos_enc_layers[pos_enc_key](**pos_enc_args)
tf_enc_blocks = []
for _ in range(param.h_agg_layers):
tf_enc_block = TFEncoder(
d_model=param.m_h*param.K,
self_attn=attn_layers[attn_key](**attn_args),
t=t_init,
dim_feedforward=2*param.m_h*param.K,
)
tf_enc_blocks.append(tf_enc_block)
modules.extend([pos_enc, *tf_enc_blocks])
else: # relative positional encodings
if pos_enc_key == "none":
print("Using no positional encodings!")
pos_enc = None
else:
pos_enc = pos_enc_layers[pos_enc_key](**pos_enc_args)
tf_enc_blocks = []
for i in range(param.h_agg_layers):
if i == 0:
self_attn = attn_layers["t"](rpe=pos_enc, **attn_args)
else:
self_attn = attn_layers[attn_key](rpe=pos_enc, **attn_args)
tf_enc_block = TFEncoder(
d_model=param.m_h*param.K,
self_attn=self_attn,
t=t_init,
dim_feedforward=2*param.m_h*param.K,
)
tf_enc_blocks.append(tf_enc_block)
modules.extend(tf_enc_blocks)
return nn.Sequential(*modules)
class CNNEncoder(Module):
"""Mapping from R^{NxD} to R^K."""
def __init__(self, K: int, N: int, D: int, n_channels: int) -> None:
super().__init__()
self.K = K
self.N = N
self.D = D
self.n_channels = n_channels
self.img_size = int(N**0.5)
self.n_feat = (self.img_size//16)**2 * (8 * n_channels)
self.f = nn.Sequential(
nn.Conv2d(D, n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size/2
nn.Conv2d(n_channels, 2*n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(2*n_channels), # img_size/4
nn.Conv2d(2*n_channels, 4*n_channels, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
nn.BatchNorm2d(4*n_channels), # img_size/8
nn.Conv2d(4*n_channels, 8*n_channels, kernel_size=2, stride=2, padding=0),
nn.ReLU(),
nn.BatchNorm2d(8*n_channels), # img_size/16
nn.Flatten(),
nn.Linear(self.n_feat, K),
)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, N, D)
S, M, _, _ = x.shape
x = rearrange(x, "s m (h w) d -> (s m) d h w", h=self.img_size, w=self.img_size)
x = self.f(x)
x = rearrange(x, "(s m) k -> s m k", s=S, m=M)
return x
class CNNDecoder(Module):
"""Mapping from R^K to R^{NxDxn_param}."""
def __init__(self, K: int, N: int, D: int, n_param: int, n_channels: int) -> None:
super().__init__()
self.K = K
self.N = N
self.D = D
self.n_param = n_param
self.n_channels = n_channels
self.img_size = int(N**0.5)
self.n_feat = (self.img_size//16)**2 * (8 * n_channels)
self.lin_layer = nn.Linear(K, self.n_feat)
self.f = nn.Sequential(
nn.ConvTranspose2d(8*n_channels, 4*n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(4*n_channels), # img_size/8
nn.ConvTranspose2d(4*n_channels, 2*n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(2*n_channels), # img_size/4
nn.ConvTranspose2d(2*n_channels, n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size/2
nn.ConvTranspose2d(n_channels, n_channels, kernel_size=2, stride=2),
nn.ReLU(),
nn.BatchNorm2d(n_channels), # img_size
nn.Conv2d(n_channels, D*n_param, kernel_size=5, padding=2),
)
def forward(self, x: Tensor) -> Tensor:
# x: Tensor, shape (S, M, K)
S, M, _ = x.shape
nc, h = 8*self.n_channels, self.img_size//16
x = rearrange(self.lin_layer(x), "s m (nc h w) -> (s m) nc h w", nc=nc, h=h, w=h)
x = self.f(x)
x = rearrange(x, "(s m) (d npar) h w -> s m (h w) d npar", s=S, m=M, d=self.D, npar=self.n_param)
return x
class Sine(nn.Module):
def __init__(self, w=1.0):
super().__init__()
self.weight = nn.parameter.Parameter(torch.tensor(w), True)
self.bias = nn.parameter.Parameter(torch.tensor(0.0), False)
def forward(self, x):
return torch.sin(self.weight * x)
| 6,830 | 31.221698 | 105 | py |
msvi | msvi-main/msvi/utils/pendulum.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
import torchvision.transforms
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from einops import rearrange
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "PENDULUM"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/pendulum/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-4, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=1, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=8, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=8, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=8, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=3.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=0.45, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # 0.00025
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size.")
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=0.45, help="Fraction of obsevations used for x0 inference at test time.")
parser.add_argument("--model_folder", type=str, default="./models/pendulum/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomHorizontalFlip(p=0.5),
]
)
def apply_transform(y: Tensor) -> Tensor:
_, m, n, d = y.shape
h, w = 32, 32
y = rearrange(y, "s m (h w) d -> s (m d) h w", h=h, w=w)
y = transform(y)
y = rearrange(y, "s (m d) h w -> s m (h w) d", m=m, d=d)
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 15,776 | 39.557841 | 158 | py |
msvi | msvi-main/msvi/utils/bballs.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
import torchvision.transforms
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
from einops import rearrange
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "BOUNCING_BALLS"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/bballs/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-3, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=5, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=32, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=32, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=32, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=20.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=3.0, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # TODO: X for regular grids
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size.") # 16 at least for block_size=1
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=3.0, help="Attention time span at test time.")
parser.add_argument("--model_folder", type=str, default="./models/bballs/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
transform = torchvision.transforms.Compose(
[
torchvision.transforms.RandomVerticalFlip(p=0.5),
torchvision.transforms.RandomHorizontalFlip(p=0.5),
]
)
def apply_transform(y: Tensor) -> Tensor:
_, m, n, d = y.shape
h, w = int(n**0.5), int(n**0.5)
y = rearrange(y, "s m (h w) d -> s (m d) h w", h=h, w=w)
y = transform(y)
y = rearrange(y, "s (m d) h w -> s m (h w) d", m=m, d=d)
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 16,068 | 39.992347 | 176 | py |
msvi | msvi-main/msvi/utils/rmnist.py | import os
import pickle
import argparse
from typing import Union
from types import SimpleNamespace
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.utils.data import DataLoader
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from msvi.model import ModelNormal, ModelNormalSecondOrder
from msvi.posterior import AmortizedMultipleShootingPosterior
from msvi.elbo import AmortizedMultipleShootingELBO
from msvi.decoder import NeuralDecoder
from msvi.trans_func import ODETransitionFunction, ODETransitionFunctionSecondOrder
from msvi.rec_net import RecognitionNet, RecognitionNetSecondOrder
from msvi.dataset import TrajectoryDataset
from msvi.utils.utils import create_agg_net, Sine, CNNEncoder, CNNDecoder
plt.style.use("seaborn") # type: ignore
sns.set_style("whitegrid")
ndarray = np.ndarray
Tensor = torch.Tensor
Sequential = nn.Sequential
DataDict = dict[str, dict[str, list]]
TensorDataDict = dict[str, dict[str, list[Tensor]]]
Module = nn.Module
DATASET_NAME = "RMNIST"
def create_argparser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# Data.
parser.add_argument("--data_folder", type=str, default="./experiments/data/datasets/rmnist/", help="Path to the dataset.")
parser.add_argument("--N", type=int, default=1024, help="Number of observation points.")
parser.add_argument("--D", type=int, default=1, help="Dimensionality of observation.")
parser.add_argument("--max_len", type=int, default=None, help="Truncation length for trajectories.")
parser.add_argument("--sigY", type=float, default=1e-3, help="Observation noise.")
# Model (common).
parser.add_argument("--K", type=int, default=32, help="Latent space dimension.")
parser.add_argument("--Xi", type=float, default=1e-4, help="Used to set variance for the continuity prior.")
parser.add_argument("--block_size", type=int, default=1, help="Number of time points in each block.")
# Model (g).
parser.add_argument("--g_cnn_channels", type=int, default=16, help="Channels in CNNDecoder.")
# Model (F).
parser.add_argument("--m_F", type=int, default=16, help="Dimensionality scaler for F.")
parser.add_argument("--F_nonlin", type=str, default="relu", help="Nonlinearity for F.")
parser.add_argument("--dyn_order", type=int, default=2, help="Order of the dynamcis function, must be 1 or 2.")
# Model (h).
parser.add_argument("--m_h", type=int, default=4, help="Dimensionality scaler for h.")
parser.add_argument("--h_enc_cnn_channels", type=int, default=16, help="Channels in CNNEncoder.")
parser.add_argument("--h_agg_attn", type=str, default="tdp", help="Attention type (dp, t, tdp, tdp_b).")
parser.add_argument("--h_agg_pos_enc", type=str, default="rpeNN", help="Position encoding type (csc, rpeNN, rpeInterp).")
parser.add_argument("--h_agg_stat_layers", type=int, default=4, help="Number of TFEncoder layers in static aggregation net.")
parser.add_argument("--h_agg_dyn_layers", type=int, default=8, help="Number of TFEncoder layers in dynamic aggregation net.")
parser.add_argument("--h_agg_max_tokens", type=int, default=51, help="Maximum expected number of tokens.")
parser.add_argument("--h_agg_max_time", type=float, default=2.0, help="Maximum expected observation time.")
parser.add_argument("--h_agg_delta_r", type=float, default=0.3, help="Attention time span at training time.")
parser.add_argument("--h_agg_p", type=float, default=-1, help="Exponent for temporal attention (use -1 for p=inf).")
parser.add_argument("--n", type=int, default=1, help="Number of nearest neighbors used for baseline aggregation net.")
parser.add_argument("--drop_prob", type=float, default=0.1, help="Attention dropout probability.") # 0.1
parser.add_argument("--tau_min", type=float, default=2e-2, help="Lower bound on the variance of q(s_i).") # 2e-2
parser.add_argument("--sigT", type=float, default=0.0, help="Scale of the noise added to the time grids for temporal neighborhood adjustment.") # 0.0004
# Training/validation/testing.
parser.add_argument("--scaler", type=float, default=1, help="Scaler for ELBO L2 term.")
parser.add_argument("--n_iters", type=int, default=300000, help="Number of training iterations.")
parser.add_argument("--lr", type=float, default=3e-4, help="Learning rate.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size.")
parser.add_argument("--solver", type=str, default="dopri5", help="Name of the ODE solver (see torchdiffeq).")
parser.add_argument("--rtol", type=float, default=1e-5, help="Relative tolerance for ODE solver.")
parser.add_argument("--atol", type=float, default=1e-5, help="Absolute tolerance for ODE solver.")
parser.add_argument("--adjoint", type=int, default=0, help="Use adjoint to evaluate gradient flag (0 - no, 1 - yes).")
parser.add_argument("--device", type=str, default="cuda", help="Device (cpu or cuda)")
parser.add_argument("--seed", type=int, default=13, help="Random seed.")
parser.add_argument("--group", default="None", help="Group for wandb.")
parser.add_argument("--tags", default=["no_tag"], nargs="+", help="Tags for wandb.")
parser.add_argument("--name", type=str, default="tmp", help="Name of the run.")
parser.add_argument("--visualize", type=int, default=1, help="Visualize predictions on validation set flag (0 - no, 1 - yes).")
parser.add_argument("--n_mc_samples", type=int, default=10, help="Number of samples for Monte Carlo integration.")
parser.add_argument("--delta_inf", type=float, default=0.3, help="Attention time span at test time.")
parser.add_argument("--model_folder", type=str, default="./models/rmnist/", help="Folder for saving/loading models.")
return parser
def create_datasets(param: SimpleNamespace, device=None) -> tuple[TrajectoryDataset, ...]:
data = read_data(param.data_folder)
data = preprocess_data(data)
data = to_tensors(data, device)
train_dataset = TrajectoryDataset(data["train"]["t"], data["train"]["y"], param.max_len)
val_dataset = TrajectoryDataset(data["val"]["t"], data["val"]["y"], param.max_len)
test_dataset = TrajectoryDataset(data["test"]["t"], data["test"]["y"], param.max_len)
return train_dataset, val_dataset, test_dataset
def read_data(path: str) -> DataDict:
"""Reads data from folder `path` which contains subfolders train, val and test.
Each subfolder contains ndarrays with time grids and trajectories stored as
t.pkl and y.pkl files."""
data = {}
data["train"] = read_pickle(["t", "y"], path+"train/")
data["val"] = read_pickle(["t", "y"], path+"val/")
data["test"] = read_pickle(["t", "y"], path+"test/")
return data
def preprocess_data(data: DataDict) -> DataDict:
data["train"], train_stats = _preprocess_data(data["train"])
data["val"], _ = _preprocess_data(data["val"], train_stats)
data["test"], _ = _preprocess_data(data["test"], train_stats)
return data
def add_noise(data: DataDict, sig: float, seed: int) -> DataDict:
np.random.seed(seed)
for i in range(len(data["train"]["y"])):
data["train"]["y"][i] += np.random.randn(*data["train"]["y"][i].shape) * sig
for i in range(len(data["val"]["y"])):
data["val"]["y"][i] += np.random.randn(*data["val"]["y"][i].shape) * sig
for i in range(len(data["test"]["y"])):
data["test"]["y"][i] += np.random.randn(*data["test"]["y"][i].shape) * sig
return data
def to_tensors(data: DataDict, device=None) -> TensorDataDict:
tensor_data = {}
tensor_data["train"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["train"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["train"]["y"]],
}
tensor_data["val"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["val"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["val"]["y"]],
}
tensor_data["test"] = {
"t": [torch.tensor(ti, dtype=torch.float64).to(device) for ti in data["test"]["t"]],
"y": [torch.tensor(yi, dtype=torch.float32).to(device) for yi in data["test"]["y"]],
}
return tensor_data
def create_dataloaders(
param: SimpleNamespace,
train_dataset: TrajectoryDataset,
val_dataset: TrajectoryDataset,
test_dataset: TrajectoryDataset
) -> tuple[DataLoader, ...]:
train_loader = DataLoader(
train_dataset,
batch_size=param.batch_size,
shuffle=True,
pin_memory=False,
)
val_loader = DataLoader(val_dataset, batch_size=param.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=param.batch_size, shuffle=False)
return train_loader, val_loader, test_loader
def _preprocess_data(
data: dict[str, list],
stats: Union[None, dict] = None
) -> tuple[dict[str, list], Union[None, dict]]:
is_train = stats is None
if is_train:
stats = {
"T_max": np.max([np.max(ti) for ti in data["t"]]),
"y_max": np.max([np.max(yi) for yi in data["y"]]),
}
for i in range(len(data["t"])):
# Normalize time grid.
# data["t"][i] = data["t"][i].astype(np.float64) / stats["T_max"]
# Normalize images.
data["y"][i] = data["y"][i].astype(np.float32) / stats["y_max"]
# Swap last two dimensions for compatibility with (S, M, N, D) dimensions.
data["y"][i] = np.transpose(data["y"][i], (0, 2, 1))
if is_train:
return data, stats
else:
return data, None
def read_pickle(keys: list[str], path: str = "./") -> dict[str, ndarray]:
data_dict = {}
for key in keys:
with open(path+key+".pkl", "rb") as f:
data_dict[key] = pickle.load(f)
return data_dict
def get_model_components(
param: SimpleNamespace,
) -> tuple[NeuralDecoder, ODETransitionFunction, RecognitionNet]:
nonlins = {
"relu": nn.ReLU,
"tanh": nn.Tanh,
"gelu": nn.GELU,
"mish": nn.Mish,
"sine": Sine,
}
# Decoder.
g = NeuralDecoder(
decoder=nn.Sequential(
CNNDecoder(param.K, param.N, param.D, 2, param.g_cnn_channels),
ToNormalParameters(param.sigY),
),
)
# Transition function and recognition network.
solver_kwargs = {
"method": param.solver,
"rtol": param.rtol,
"atol": param.atol,
"adjoint": param.adjoint,
"options": {"step_size": 0.2},
}
if param.dyn_order == 1:
F = ODETransitionFunction(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNet(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K),
phi_tau=nn.Linear(param.m_h*param.K, param.K),
tau_min=param.tau_min,
)
elif param.dyn_order == 2:
assert param.K % 2 == 0, "Latent dimension `K` must be divisible by 2."
F = ODETransitionFunctionSecondOrder(
f=nn.Sequential(
nn.Linear(param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.m_F*param.K), nonlins[param.F_nonlin](),
nn.Linear(param.m_F*param.K, param.K//2)
),
layers_to_count=[],
solver_kwargs=solver_kwargs
)
h = RecognitionNetSecondOrder(
phi_enc=CNNEncoder(param.m_h*param.K, param.N, param.D, param.h_enc_cnn_channels),
phi_agg=create_agg_net(param, "static"),
phi_agg_dyn=create_agg_net(param, "dynamic"),
phi_gamma=nn.Linear(param.m_h*param.K, param.K//2),
phi_gamma_dyn=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau=nn.Linear(param.m_h*param.K, param.K//2),
phi_tau_dyn=nn.Linear(param.m_h*param.K, param.K//2),
tau_min=param.tau_min,
)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
return g, F, h
def create_elbo(
g: NeuralDecoder,
F: ODETransitionFunction,
h: RecognitionNet,
param: SimpleNamespace
) -> AmortizedMultipleShootingELBO:
prior_param_dict = nn.ParameterDict({
"mu0": Parameter(0.0 * torch.ones([param.K]), False),
"sig0": Parameter(1.0 * torch.ones([param.K]), False),
"sigXi": Parameter(param.Xi / param.K**0.5 * torch.ones([1]), False),
"mu_theta": Parameter(0.0 * torch.ones([1]), False),
"sig_theta": Parameter(1.0 * torch.ones([1]), False),
})
posterior_param_dict = nn.ParameterDict({
"mu_theta_g": Parameter(torch.cat([par.detach().reshape(-1) for par in g.parameters()])),
"log_sig_theta_g": Parameter(-7.0 * torch.ones(g.param_count())),
"mu_theta_F": Parameter(torch.cat([par.detach().reshape(-1) for par in F.parameters()])),
"log_sig_theta_F": Parameter(-7.0 * torch.ones(F.param_count())),
})
if param.dyn_order == 1:
p = ModelNormal(prior_param_dict, g, F)
elif param.dyn_order == 2:
p = ModelNormalSecondOrder(prior_param_dict, g, F)
else:
raise RuntimeError("Wrong dynamics order. Must be 1 or 2.")
q = AmortizedMultipleShootingPosterior(posterior_param_dict, F, h)
elbo = AmortizedMultipleShootingELBO(p, q)
elbo.p.set_theta(elbo.q.sample_theta())
return elbo
def visualize_trajectories(
traj: list[ndarray],
vis_inds: list[int],
title: str,
path: str,
img_name: str,
) -> None:
if not os.path.isdir(path):
os.makedirs(path)
img_size = 32
panel_size = 5
n_row = len(traj)
n_col = len(vis_inds)
fig, ax = plt.subplots(n_row, n_col, figsize=(panel_size*n_col, panel_size*n_row), squeeze=False)
for i in range(n_row):
for j in range(n_col):
ax[i, j].imshow(traj[i][0, vis_inds[j], :, 0].reshape(img_size, img_size)) # type: ignore
ax[i, j].grid(False) # type: ignore
# fig.colorbar(im, ax=ax[i, j], orientation='vertical') # type: ignore
fig.suptitle(title, fontsize=45)
fig.tight_layout()
plt.savefig(path+img_name)
plt.close()
class ToNormalParameters(Module):
"""Converts output of CNNDecoder to parameters of p(y|x)."""
def __init__(self, sigY) -> None:
super().__init__()
self.sigY = sigY
def forward(self, x):
x[..., 0] = torch.sigmoid(x[..., 0]) # to keep mean \in (0, 1)
x[..., 1] = self.sigY # fix standard deviation
return x
def get_data_transform():
def apply_transform(y: Tensor) -> Tensor:
return y
return apply_transform
def get_scheduler(optimizer, n_iters, lr):
sched = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=(1e-5/lr)**(1.0/n_iters))
return sched
| 15,338 | 40.013369 | 157 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/backbone/Backbone.py | import os
import torch
import torch.nn as nn
from util.model.initialize_load_r21d import initialize_load_model
from util.loader.LUMC_A4C.loader_vid import create_dataloader
from util.checkpoint.checkpoint_train import checkpoint_train
from util.checkpoint.checkpoint_test import checkpoint_test
from torch.utils.tensorboard import SummaryWriter
from datetime import datetime
from tqdm import tqdm
import sys, traceback
from shutil import copyfile
class Backbone():
def __init__(self, save_folder, epo_iter, forward, task='clas',
create_dataloader=create_dataloader, initialize_load_model=initialize_load_model,
checkpoint_train=checkpoint_train, checkpoint_test = checkpoint_test,
optimizer=torch.optim.Adam, lr=1e-4, wd=1e-8, loss_accu_period=1,
log_val_only=True, eval_per_iter=False):
assert task in ['clas', 'seg', 'regres']
# Variables
self.save_folder = save_folder
self.epo_iter = epo_iter
self.task = task
self.optimizer = optimizer
self.lr = lr
self.wd = wd
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.loss_accu_period = loss_accu_period
self.log_val_only = log_val_only
self.eval_per_iter = eval_per_iter
# Functions
self.forward = forward
self.create_dataloader = create_dataloader
self.initialize_load_model = initialize_load_model
self.checkpoint_train = checkpoint_train
self.checkpoint_test = checkpoint_test
def batch_train(self, model, batch, i, opt, loss_running_accu, loss_accu_period, **kwargs):
loss, _ = self.forward(batch, model, self.device, return_one_batch=False, **kwargs)
loss.backward()
loss_running_accu += loss.item()
one_batch = 0
if (i + 1) % loss_accu_period == 0:
loss_running_accu = loss_running_accu / loss_accu_period
opt.step()
opt.zero_grad()
one_batch = loss_running_accu
loss_running_accu = 0
return model, one_batch, loss_running_accu, opt
def whole_eval(self, model, dataloader, **kwargs):
model.eval()
one_epoch = []
for i, batch in enumerate(dataloader):
loss, one_batch = self.forward(batch, model, self.device, return_one_batch=True, **kwargs)
one_epoch.append(one_batch)
return one_epoch
def run_val(self, i, itr, epo, num_batch, num_epo, subfolder, one_epoch_train,
model, dataloader_val, dataloader_test, **kwargs):
one_epoch_val = self.whole_eval(model, dataloader_val, **kwargs)
one_epoch_test = [] if self.log_val_only else self.whole_eval(model, dataloader_test, **kwargs)
is_first_update = (self.eval_per_iter & (itr == 0)) | ((not self.eval_per_iter) & (epo == 0))
is_last_update = itr == ((num_batch // self.loss_accu_period) * num_epo) - 1
self.checkpoint_train(itr, one_epoch_train, one_epoch_val, one_epoch_test, model,
self.save_folder, subfolder, epo, is_first_update, is_last_update,
self.writer, self.log_val_only, self.task, **kwargs)
def run_test(self, dataloader_test, subfolder, **kwargs):
model, _ = self.initialize_load_model(mode='test', model_path=self.save_folder+'train/model_val_min.pth', device=self.device, **kwargs)
one_epoch = self.whole_eval(model, dataloader_test, **kwargs)
self.checkpoint_test(one_epoch, model, self.save_folder, subfolder, self.task, **kwargs)
def run(self, workflow='complete', subfolder='default', verbose=False, **kwargs):
try:
n = datetime.now()
assert workflow in ['complete', 'train', 'test']
dataloader_train = self.create_dataloader(mode='train', **kwargs) #Have to be initialized here since kwargs are needed
dataloader_val = self.create_dataloader(mode='val', **kwargs)
dataloader_test = self.create_dataloader(mode='test', **kwargs)
## [Training]
if (workflow == 'complete') | (workflow == 'train'):
num_batch, num_epo = len(dataloader_train), self.epo_iter.stop
assert num_batch % self.loss_accu_period == 0
model, param = self.initialize_load_model(mode='train', device=self.device, **kwargs)
opt = self.optimizer(param, lr=self.lr, weight_decay=self.wd)
opt.zero_grad() # Do zero_grad() here because of the gradient accumulation feature
self.writer = SummaryWriter(self.save_folder)
if verbose: print('Training initialization time: ', datetime.now() - n,'='*100)
n = datetime.now()
for epo in tqdm(self.epo_iter, ncols=0):
one_epoch_train, loss_running_accu = [], 0
for i, batch in enumerate(dataloader_train):
wt = (datetime.now() - n).total_seconds()
if verbose&(wt>2): print('\n Batch loading waiting time ', wt)
# itr counts the number of updates. When loss accumulation is used, itr would be different to i.
itr = i//self.loss_accu_period + epo * (num_batch//self.loss_accu_period)
model, one_batch_train, loss_running_accu, opt = self.batch_train(model, batch, i, opt,
loss_running_accu, self.loss_accu_period, **kwargs)
# Log training loss of one (full) batch for calculation of averaged training loss later on.
if (i+1)%self.loss_accu_period == 0:
one_epoch_train.append(one_batch_train)
## [Validation]:
# Run validation if eval_per_iter & end of a batch; Or NOT eval_per_iter & end of a epoch
if (
(self.eval_per_iter & ((i+1)%self.loss_accu_period == 0)) or
((not self.eval_per_iter) & ((i+1)/self.loss_accu_period == num_batch//self.loss_accu_period))
):
self.run_val(i, itr, epo, num_batch, num_epo, subfolder, one_epoch_train,
model, dataloader_val, dataloader_test, **kwargs)
one_epoch_train = []
n = datetime.now()
self.writer.flush()
self.writer.close()
## [Testing]
if (workflow == 'complete') | (workflow == 'test'):
self.run_test(dataloader_test, subfolder, **kwargs)
except KeyboardInterrupt:
## [Test on current best if interrupted]
print('Interrupted at epo ', epo, )
# copyfile(self.save_folder + 'train/log_tmp.csv', self.save_folder + 'train/log_' + str(epo) + '.csv')
# epo_iter = range(epo+1)
self.run_test(dataloader_test, subfolder, **kwargs)
sys.exit(0) | 7,196 | 48.979167 | 143 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/checkpoint_test.py | import os
import numpy as np
import torch
from util.checkpoint.create_header import create_header_clas, create_header_seg, create_header_regres
from util.eval.eval import one_epoch_avg_clas, one_epoch_avg_seg, one_epoch_avg_regres
def checkpoint_test(one_epoch, model, save_folder, subfolder, task,
header_train=None, header_eval=None, one_epoch_avg=None, **kwargs):
mode = 'test'
create_header = globals()['create_header_'+task]
if one_epoch_avg is None:
one_epoch_avg = globals()['one_epoch_avg_'+task]
if subfolder == 'default': subfolder = mode
save_subfolder = save_folder + subfolder
os.makedirs(save_subfolder, exist_ok=True)
epo = find_epo_test(save_folder, subfolder, **kwargs) # Here epo might actually be itr since the log might be per every update
one_epoch_avg = one_epoch_avg(one_epoch)
multi_epo = create_header(mode, None, header_train, header_eval)
multi_epo = np.concatenate([multi_epo, one_epoch_avg], axis=0)
np.savetxt(save_subfolder + '/prediction_' + str(epo) + '.csv', np.asarray(one_epoch), fmt='%s', delimiter=',')
np.savetxt(save_subfolder + '/performance_' + str(epo) + '.csv', np.asarray(multi_epo), fmt='%s', delimiter=',')
print('Epoch: ', epo, '| ', mode, ' | performance: ', one_epoch_avg, '\n')
def find_epo_test(save_folder, subfolder, **kwargs):
# The columns of multi_epo are [itr, train_loss, val_loss_or_early_stop_metric, other_val_metrics_if_any]
multi_epo = np.genfromtxt(save_folder + '/train/log.csv', dtype='str', delimiter=',')
multi_epo = multi_epo[1:,2].astype('float')
epo_test = np.argmin(multi_epo)
min_loss = multi_epo[epo_test]
os.makedirs(save_folder + '/' + subfolder, exist_ok=True)
np.savetxt(save_folder + '/' + subfolder + '/minLoss_'+str(min_loss)+'.txt',[]) # Just to indicate val-loss. Empty file
return epo_test | 1,894 | 50.216216 | 131 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/checkpoint/checkpoint_train.py | import os
import numpy as np
import torch
from util.checkpoint.create_header import create_header_clas, create_header_seg, create_header_regres
from util.eval.eval import one_epoch_avg_clas, one_epoch_avg_seg, one_epoch_avg_regres
def checkpoint_train(itr, one_epoch_train, one_epoch_val, one_epoch_test, model, save_folder, subfolder, epo, is_first_update, is_last_update, writer, log_val_only,
task, header_train=None, header_eval=None, one_epoch_avg=None, **kwargs):
mode = 'train'
create_header = globals()['create_header_'+task]
if one_epoch_avg is None:
one_epoch_avg = globals()['one_epoch_avg_'+task]
save_subfolder = save_folder + mode # From now on the trainign log is always stored in the folder "train"
os.makedirs(save_subfolder, exist_ok=True)
train_avg = np.mean(one_epoch_train)
header = create_header(mode, log_val_only, header_train, header_eval)
NUM_METRICS_TO_LOG = len(header[0]) - 2
if log_val_only:
# For clas, one_epoch contains appended [ID[0], loss.item(), y_true, y_pred]
# one_epoch_avg returns numpy array of shape (1,-1) containing [loss, acc, prec, rec, f1]
# Sor seg, one_epoch contains appended [ID[0], loss, 'dice', 'iou', 'precision', 'recall']
# one_epoch_avg returns its average, with shape (1, -1)
val_avg = one_epoch_avg(one_epoch_val)
one_epoch_log = [itr, train_avg] + list(val_avg.reshape(-1,))[:NUM_METRICS_TO_LOG]
else:
val_avg = one_epoch_avg(one_epoch_val)
test_avg = one_epoch_avg(one_epoch_test)
one_epoch_log = [itr, train_avg] + list(val_avg.reshape(-1,))[:NUM_METRICS_TO_LOG/2] + list(test_avg.reshape(-1,))[:NUM_METRICS_TO_LOG/2]
logging(one_epoch_log, header, writer, save_subfolder, is_first_update, log_val_only, model, **kwargs)
# if is_last_update:
# os.rename(save_subfolder + '/log_tmp.csv', save_subfolder + '/log_' + str(epo) + '.csv')
# # os.rename(save_subfolder + '/individual_pred_tmp.csv', save_subfolder + '/individual_pred_' + str(epo) + '.csv')
print('Epoch: ', epo, '| training | performance: ', one_epoch_log, '\n')
def logging(one_epoch_log, header, writer, save_subfolder, is_first_update, log_val_only, model, **kwargs):
"""
1) Log performance to csv & tensorboard.
2) Determine if has validation loss minimum.
"""
def compare(one_epoch_log, multi_epo):
current = one_epoch_log[2]
history_min = min(multi_epo[1:,2].astype('float'))
if current < history_min:
has_min_val = True
else:
has_min_val = False
return has_min_val
# Write to tensorboard
itr = one_epoch_log[0]
assert len(header[0]) == len(one_epoch_log)
for i in range(1,len(header[0])):
writer.add_scalar(header[0,i], one_epoch_log[i], itr)
# Write to csv file & Save model if has val-loss minimum
csv_name = save_subfolder+'/log.csv'
if is_first_update:
multi_epo = header
has_min_val = True
else:
multi_epo = np.genfromtxt(csv_name, dtype='str', delimiter=',')
has_min_val = compare(one_epoch_log, multi_epo)
one_epoch_log = np.asarray(one_epoch_log).reshape(1, -1)
multi_epo = np.concatenate([multi_epo, one_epoch_log], axis=0)
np.savetxt(csv_name, np.asarray(multi_epo), fmt='%s', delimiter=',')
if has_min_val: torch.save(model.state_dict(), save_subfolder + '/model_val_min.pth')
| 3,481 | 43.641026 | 164 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/loader/LUMC_A4C/loader_vid.py | import numpy as np
import torch
from torch.utils.data.dataset import Dataset
import random
from skimage.transform import rotate
class loader(Dataset):
def __init__(self, X_list, aug=False, rgb_channel=3, **kwargs):
self.X_list = X_list
self.aug = aug
self.rgb_channel = rgb_channel
def __getitem__(self, index):
filepath = self.X_list[index]
X = np.load(filepath)
# Replace with own loader. Output X should have size [channel=3, num_frame=30, x_dimension=112, y_dimension=112]
X = torch.from_numpy(X).float()
return X, Y, ID
def __len__(self):
return len(self.X_list)
def create_dataloader(mode, batch_size=16, num_workers=[4, 4], data_folder='../data/LUMC_A4C/ver3/',
split_folder='split_000all_400_401/', **kwargs):
X_list = np.load(data_folder + split_folder + '/' + mode + '_list_RGB.npy').tolist()
if mode == 'train':
data = loader(X_list, aug=True, **kwargs)
dataloader = torch.utils.data.DataLoader(dataset=data, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=num_workers[0], pin_memory=True)
elif (mode == 'val') | (mode == 'test'):
data = loader(X_list, aug=False, **kwargs)
dataloader = torch.utils.data.DataLoader(dataset=data, batch_size=1, shuffle=False, drop_last=False, num_workers=num_workers[1], pin_memory=True)
return dataloader
# if __name__ == '__main__':
# dataloader = create_dataloader('train')
# print(len(dataloader))
# for i, batch in enumerate(dataloader):
# print(batch[0].shape, batch[1].shape, batch[2][0]) | 1,640 | 36.295455 | 160 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/util/util/model/initialize_load_r21d.py | import torch
import torch.nn as nn
import torchvision
def initialize_load_model(mode, model_path='scratch', in_channel=3, out_channel=3, device="cuda", **kwargs):
def r21d(in_channel, out_channel, pretrain=False, echo_pretrain=False):
model = torchvision.models.video.__dict__["r2plus1d_18"](pretrained=pretrain)
if in_channel == 1: model.stem[0] = nn.Conv3d(1, 45, kernel_size=(1, 7, 7), stride=(1, 2, 2), padding=(0, 3, 3), bias=False)
model.fc = nn.Linear(model.fc.in_features, out_channel)
return model
if model_path == 'pretrain':
model = r21d(in_channel, out_channel, pretrain=True)
elif model_path == 'scratch':
model = r21d(in_channel, out_channel)
else:
model = r21d(in_channel, out_channel)
model.load_state_dict(torch.load(model_path))
model.to(device)
param = model.parameters()
if mode == 'train':
model.train()
else:
model.eval()
return model, param | 984 | 34.178571 | 132 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/projectDDDIF/main.py | import os
import torch
import torch.nn as nn
# import numpy as np
from util.backbone.Backbone import Backbone
from util.loader.LUMC_A4C.loader_vid import create_dataloader
from util.model.initialize_load_r21d import initialize_load_model
from analyze import analyze
def forward(batch, model, device, return_one_batch, criterion=nn.CrossEntropyLoss(), **kwargs):
X, Y, ID = batch
X = X.to(device)
Y = Y.to(device).view(-1).long()
out_logit = model(X)
loss = criterion(out_logit, Y)
if return_one_batch:
sf = nn.Softmax(1)
output = sf(out_logit)
y_pred = output.argmax(1).item()
y_true = Y.item()
one_batch = [ID[0], loss.item(), y_true, y_pred, output[:, 1].item()]
# analyze(X, [y_true], model, 1, 'model/DeepLIFT/', ID[0])
return loss, one_batch
else:
return loss, []
if __name__ == '__main__':
root = './model/regurg/'
kwargs = {'in_channel': 3, 'out_channel': 3}
b = Backbone(root, range(200), forward,
create_dataloader=create_dataloader, initialize_load_model=initialize_load_model)
b.run(**kwargs)
| 1,139 | 26.804878 | 98 | py |
Disease-Detection-and-Diagnostic-Image-Feature | Disease-Detection-and-Diagnostic-Image-Feature-main/projectDDDIF/analyze.py | import os
import torch
import torch.nn as nn
import torchvision
import matplotlib.pyplot as plt
from skimage.transform import resize
import numpy as np
import cv2
# import cmapy
from pydicom import dcmread
from pydicom.uid import ExplicitVRLittleEndian
from captum.attr import GradientShap, DeepLift, DeepLiftShap, IntegratedGradients, GuidedGradCam, NoiseTunnel, Saliency, GuidedBackprop
def to_0_255(x):
return (x-x.min())/(x.max()-x.min())*255
def write_dcm(raw, x, path):
# Requires x of shape (t,row,col,3)
x = to_0_255(x)
x = x.astype('uint8')
raw.NumberOfFrames = x.shape[0]
raw.Rows = x.shape[1]
raw.Columns = x.shape[2]
raw.PixelData = x.tobytes()
raw.save_as(path)
def show_save_mov(video, save_path, file_type='mp4', norm=False, boundary=None, gray2color=None, fps=5, show=False, insert_text=None):
if norm:
if boundary is not None:
video[video > boundary[0]] = boundary[0]
video[video < boundary[1]] = boundary[1]
video = ((video - np.min(video)) / (np.max(video) - np.min(video))) * 255
video = np.asarray(video, dtype='uint8')
frame_delay = int(1000 / fps)
if file_type == 'mp4':
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
elif file_type == 'avi':
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
save_path = save_path + '.' + file_type
out = cv2.VideoWriter(save_path, fourcc, fps, (video.shape[2],video.shape[1]))
for frame in video:
if gray2color is not None:
frame = cv2.applyColorMap(frame, gray2color)
if insert_text is not None:
cv2.putText(frame, insert_text, (2, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
cv2.putText(frame, 'ILV'+' '*21+'AVR', (2, 18), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
out.write(frame)
if show:
cv2.imshow('frame', frame)
key = cv2.waitKey(frame_delay)
out.release()
cv2.destroyAllWindows()
def get_analyzer(methodID, model, if_smoothGrad):
assert methodID in range(5)
if methodID == 0:
analyzer = Saliency(model)
methodname = '_Saliency'
if methodID == 1:
analyzer = DeepLift(model)
methodname = '_DL'
if methodID == 2:
analyzer = DeepLiftShap(model)
methodname = '_DLshap'
if methodID == 3:
analyzer = GuidedBackprop(model)
methodname = '_GB'
if methodID == 4:
analyzer = GuidedGradCam(model, model.layer4)
methodname = '_GradCAM'
if if_smoothGrad:
analyzer = NoiseTunnel(analyzer)
methodname = methodname+'smo'
return analyzer, methodname
def run_analyze(analyzer, inputs, target):
return analyzer.attribute(inputs=inputs, target=target, baselines=inputs*0)
def post_process(attributions, threshold):
"""Post-process the generated attributions"""
assert threshold in ['abs', 'pos']
if threshold == 'abs':
attributions = abs(attributions)
elif threshold == 'pos':
attributions[attributions<0] = 0
attributions = attributions.cpu().detach().numpy()[0, 0, ...] # remove batch & channel dimension -> [t,x,y]
attributions = np.uint8(to_0_255(attributions))
attributions_color = []
for i, att in enumerate(attributions):
# att = cv2.applyColorMap(att, cv2.COLORMAP_JET) #After this step the shape changes from (112,112) to (112,112,3)
att = cv2.applyColorMap(att, cv2.COLORMAP_HOT)
attributions_color.append(att)
attributions_color = np.stack(attributions_color, axis=0)
assert attributions_color.shape == (30, 112, 112, 3)
return attributions_color
def analyze(X, target_classes, model, methodID, save_dir=None, file_name=None, tail='',
save_vid_type='mp4', save_att_vid=True, save_input_vid=False, save_render_vid=False, save_render_npy=False,
save_dcm=False, save_figs=False, threshold='pos', if_smoothGrad=False):
os.makedirs(save_dir, exist_ok=True)
# First, process and save the input X if needed
if save_input_vid | save_render_vid | save_render_npy | save_figs: # Then we would need X
Xrgb = to_0_255(X.cpu().detach().numpy()[0, 0, ...]) # (b,c,t,x,y) -> (t,x,y)
Xrgb = np.stack([Xrgb] * 3, axis=3)
if save_input_vid:
show_save_mov(video=Xrgb, save_path=save_dir + file_name, file_type=save_vid_type)
# Second, run analyze and save if needed
for c in target_classes:
classname = '_class'+str(c)
analyzer, methodname = get_analyzer(methodID, model, if_smoothGrad)
attributions = run_analyze(analyzer, X, c)
attributions_color = post_process(attributions, threshold)
if save_render_vid | save_render_npy | save_figs: # Then we would need "render"
render = attributions_color * 0.7 + Xrgb * 0.3
if save_att_vid:
show_save_mov(video=attributions_color, save_path=save_dir+file_name+tail+methodname+classname, file_type=save_vid_type)
if save_render_vid:
show_save_mov(nvideo=render, save_path=save_dir+file_name+tail+methodname+classname+'_overlay', file_type=save_vid_type)
if save_render_npy:
np.save(save_dir+file_name+tail+methodname+classname+'_overlay.npy', render)
if save_figs:
for i, (img, att, rnd) in enumerate(zip(Xrgb, attributions_color, render)):
cv2.imwrite(save_dir + file_name + '_' + str(i) + '.png', img)
cv2.imwrite(save_dir + file_name+tail+methodname+classname+'_heatmap_'+str(i)+'.png', att)
cv2.imwrite(save_dir + file_name+tail+methodname+classname+'_render_'+str(i)+'.png', rnd)
| 5,747 | 39.478873 | 135 | py |
MetaHIN | MetaHIN-master/code/main.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-21 17:27
import gc
import glob
import random
import time
import numpy as np
import torch
from HeteML_new import HML
from DataHelper import DataHelper
from tqdm import tqdm
from Config import states
# random.seed(13)
np.random.seed(13)
torch.manual_seed(13)
def training(model, model_save=True, model_file=None, device='cpu'):
print('training model...')
if config['use_cuda']:
model.cuda()
model.train()
batch_size = config['batch_size']
num_epoch = config['num_epoch']
for _ in range(num_epoch): # 20
loss, mae, rmse = [], [], []
ndcg_at_5 = []
start = time.time()
random.shuffle(train_data)
num_batch = int(len(train_data) / batch_size) # ~80
supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s = zip(*train_data) # supp_um_s:(list,list,...,2553)
for i in range(num_batch): # each batch contains some tasks (each task contains a support set and a query set)
support_xs = list(supp_xs_s[batch_size * i:batch_size * (i + 1)])
support_ys = list(supp_ys_s[batch_size * i:batch_size * (i + 1)])
support_mps = list(supp_mps_s[batch_size * i:batch_size * (i + 1)])
query_xs = list(query_xs_s[batch_size * i:batch_size * (i + 1)])
query_ys = list(query_ys_s[batch_size * i:batch_size * (i + 1)])
query_mps = list(query_mps_s[batch_size * i:batch_size * (i + 1)])
_loss, _mae, _rmse, _ndcg_5 = model.global_update(support_xs,support_ys,support_mps,
query_xs,query_ys,query_mps,device)
loss.append(_loss)
mae.append(_mae)
rmse.append(_rmse)
ndcg_at_5.append(_ndcg_5)
print('epoch: {}, loss: {:.6f}, cost time: {:.1f}s, mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
format(_, np.mean(loss), time.time() - start,
np.mean(mae), np.mean(rmse), np.mean(ndcg_at_5)))
if _ % 10 == 0 and _ != 0:
testing(model, device)
model.train()
if model_save:
print('saving model...')
torch.save(model.state_dict(), model_file)
def testing(model, device='cpu'):
# testing
print('evaluating model...')
if config['use_cuda']:
model.cuda()
model.eval()
for state in states:
if state == 'meta_training':
continue
print(state + '...')
evaluate(model, state, device)
def evaluate(model, state, device='cpu'):
test_data = data_helper.load_data(data_set=data_set, state=state,
load_from_file=True)
supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s = zip(*test_data) # supp_um_s:(list,list,...,2553)
loss, mae, rmse = [], [], []
ndcg_at_5 = []
for i in range(len(test_data)): # each task
_mae, _rmse, _ndcg_5 = model.evaluation(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i],
query_xs_s[i], query_ys_s[i], query_mps_s[i],device)
mae.append(_mae)
rmse.append(_rmse)
ndcg_at_5.append(_ndcg_5)
print('mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
format(np.mean(mae), np.mean(rmse),np.mean(ndcg_at_5)))
# print('fine tuning...')
# model.train()
# for i in range(len(test_data)):
# model.fine_tune(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i])
# model.eval()
# for i in range(len(test_data)): # each task
# _mae, _rmse, _ndcg_5 = model.evaluation(supp_xs_s[i], supp_ys_s[i], supp_mps_s[i],
# query_xs_s[i], query_ys_s[i], query_mps_s[i],device)
# mae.append(_mae)
# rmse.append(_rmse)
# ndcg_at_5.append(_ndcg_5)
# print('mae: {:.5f}, rmse: {:.5f}, ndcg@5: {:.5f}'.
# format(np.mean(mae), np.mean(rmse), np.mean(ndcg_at_5)))
if __name__ == "__main__":
# data_set = 'dbook'
data_set = 'movielens'
# data_set = 'yelp'
input_dir = '../data/'
output_dir = '../data/'
res_dir = '../res/'+data_set
load_model = False
if data_set == 'movielens':
from Config import config_ml as config
elif data_set == 'yelp':
from Config import config_yelp as config
elif data_set == 'dbook':
from Config import config_db as config
cuda_or_cpu = torch.device("cuda" if config['use_cuda'] else "cpu")
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print(config)
model_filename = "{}/hml.pkl".format(res_dir)
data_helper = DataHelper(input_dir, output_dir, config)
# training model.
model_name = 'mp_update'
# model_name = 'mp_MAML'
# model_name = 'mp_update_multi_MAML'
# model_name = 'mp_update_no_f'
# model_name = 'no_MAML'
# model_name = 'no_MAML_with_finetuning'
hml = HML(config, model_name)
print('--------------- {} ---------------'.format(model_name))
if not load_model:
# Load training dataset
print('loading train data...')
train_data = data_helper.load_data(data_set=data_set,state='meta_training',load_from_file=True)
# print('loading warm data...')
# warm_data = data_helper.load_data(data_set=data_set, state='warm_up',load_from_file=True)
training(hml, model_save=True, model_file=model_filename,device=cuda_or_cpu)
else:
trained_state_dict = torch.load(model_filename)
hml.load_state_dict(trained_state_dict)
# testing
testing(hml, device=cuda_or_cpu)
print('--------------- {} ---------------'.format(model_name))
| 5,711 | 35.615385 | 130 | py |
MetaHIN | MetaHIN-master/code/MetaLearner_new.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-10 14:25
import torch
from torch.nn import functional as F
class MetaLearner(torch.nn.Module):
def __init__(self,config):
super(MetaLearner, self).__init__()
self.embedding_dim = config['embedding_dim']
self.fc1_in_dim = 32 + config['item_embedding_dim']
self.fc2_in_dim = config['first_fc_hidden_dim']
self.fc2_out_dim = config['second_fc_hidden_dim']
self.use_cuda = config['use_cuda']
self.config = config
# prediction parameters
self.vars = torch.nn.ParameterDict()
self.vars_bn = torch.nn.ParameterList()
w1 = torch.nn.Parameter(torch.ones([self.fc2_in_dim,self.fc1_in_dim])) # 64, 96
torch.nn.init.xavier_normal_(w1)
self.vars['ml_fc_w1'] = w1
self.vars['ml_fc_b1'] = torch.nn.Parameter(torch.zeros(self.fc2_in_dim))
w2 = torch.nn.Parameter(torch.ones([self.fc2_out_dim,self.fc2_in_dim]))
torch.nn.init.xavier_normal_(w2)
self.vars['ml_fc_w2'] = w2
self.vars['ml_fc_b2'] = torch.nn.Parameter(torch.zeros(self.fc2_in_dim))
w3 = torch.nn.Parameter(torch.ones([1, self.fc2_out_dim]))
torch.nn.init.xavier_normal_(w3)
self.vars['ml_fc_w3'] = w3
self.vars['ml_fc_b3'] = torch.nn.Parameter(torch.zeros(1))
def forward(self, user_emb, item_emb, user_neigh_emb, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.vars
x_i = item_emb
x_u = user_neigh_emb # movielens: loss:12.14... up! ; dbook 20epoch: user_cold: mae 0.6051;
x = torch.cat((x_i, x_u), 1) # ?, item_emb_dim+user_emb_dim+user_emb_dim
x = F.relu(F.linear(x, vars_dict['ml_fc_w1'], vars_dict['ml_fc_b1']))
x = F.relu(F.linear(x, vars_dict['ml_fc_w2'], vars_dict['ml_fc_b2']))
x = F.linear(x, vars_dict['ml_fc_w3'], vars_dict['ml_fc_b3'])
return x.squeeze()
def zero_grad(self, vars_dict=None):
with torch.no_grad():
if vars_dict is None:
for p in self.vars.values():
if p.grad is not None:
p.grad.zero_()
else:
for p in vars_dict.values():
if p.grad is not None:
p.grad.zero_()
def update_parameters(self):
return self.vars
class MetapathLearner(torch.nn.Module):
def __init__(self,config):
super(MetapathLearner, self).__init__()
self.config = config
# meta-path parameters
self.vars = torch.nn.ParameterDict()
neigh_w = torch.nn.Parameter(torch.ones([32,config['item_embedding_dim']])) # dim=32, movielens 0.81006
torch.nn.init.xavier_normal_(neigh_w)
self.vars['neigh_w'] = neigh_w
self.vars['neigh_b'] = torch.nn.Parameter(torch.zeros(32))
def forward(self, user_emb, item_emb, neighs_emb, mp, index_list, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.vars
agg_neighbor_emb = F.linear(neighs_emb, vars_dict['neigh_w'], vars_dict['neigh_b']) # (#neighbors, item_emb_dim)
output_emb = F.leaky_relu(torch.mean(agg_neighbor_emb, 0)).repeat(user_emb.shape[0], 1) # (#sample, user_emb_dim)
#
# # each mean, then att agg
# _emb = []
# start = 0
# for idx in index_list:
# end = start+idx
# _emb.append(F.leaky_relu(torch.mean(agg_neighbor_emb[start:end],0)))
# start = end
# output_emb = torch.stack(_emb, 0) # (#sample, dim)
return output_emb
def zero_grad(self, vars_dict=None):
with torch.no_grad():
if vars_dict is None:
for p in self.vars.values():
if p.grad is not None:
p.grad.zero_()
else:
for p in vars_dict.values():
if p.grad is not None:
p.grad.zero_()
def update_parameters(self):
return self.vars
| 4,108 | 35.6875 | 122 | py |
MetaHIN | MetaHIN-master/code/DataHelper.py | # coding: utf-8
# author: lu yf
# create date: 2019-11-24 13:16
import gc
import glob
import os
import pickle
# from DataProcessor import Movielens
from tqdm import tqdm
from multiprocessing import Process, Pool
from multiprocessing.pool import ThreadPool
import numpy as np
import torch
class DataHelper:
def __init__(self, input_dir, output_dir, config):
self.input_dir = input_dir # ../data/movielens_1m/original/
self.output_dir = output_dir # ../data/movielens_1m
self.config = config
self.mp_list = self.config['mp']
def load_data(self, data_set, state, load_from_file=True):
data_dir = os.path.join(self.output_dir,data_set)
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
if data_set == 'yelp':
training_set_size = int(
len(glob.glob("{}/{}/*.npy".format(data_dir, state))) / self.config['file_num']) # support, query
# load all data
for idx in tqdm(range(training_set_size)):
supp_xs_s.append(torch.from_numpy(np.load("{}/{}/support_x_{}.npy".format(data_dir, state, idx))))
supp_ys_s.append(torch.from_numpy(np.load("{}/{}/support_y_{}.npy".format(data_dir, state, idx))))
query_xs_s.append(torch.from_numpy(np.load("{}/{}/query_x_{}.npy".format(data_dir, state, idx))))
query_ys_s.append(torch.from_numpy(np.load("{}/{}/query_y_{}.npy".format(data_dir, state, idx))))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
_cur_data = np.load("{}/{}/support_{}_{}.npy".format(data_dir, state, mp, idx), encoding='latin1')
supp_mp_data[mp] = [torch.from_numpy(x) for x in _cur_data]
_cur_data = np.load("{}/{}/query_{}_{}.npy".format(data_dir, state, mp, idx), encoding='latin1')
query_mp_data[mp] = [torch.from_numpy(x) for x in _cur_data]
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
else:
# if not load_from_file:
# ml = Movielens(os.path.join(self.input_dir,data_set), os.path.join(self.output_dir,data_set))
# ml.support_query_data()
training_set_size = int(len(glob.glob("{}/{}/*.pkl".format(data_dir,state))) / self.config['file_num']) # support, query
# load all data
for idx in tqdm(range(training_set_size)):
support_x = pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb"))
if support_x.shape[0] > 5:
continue
del support_x
supp_xs_s.append(pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, state, idx), "rb")))
query_xs_s.append(pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, state, idx), "rb")))
query_ys_s.append(pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, state, idx), "rb")))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
print('#support set: {}, #query set: {}'.format(len(supp_xs_s), len(query_xs_s)))
total_data = list(zip(supp_xs_s, supp_ys_s, supp_mps_s,
query_xs_s, query_ys_s, query_mps_s)) # all training tasks
del (supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s)
gc.collect()
return total_data
def load_batch_data(self, data_set, state, batch_indices, load_from_file=True):
data_dir = os.path.join(self.output_dir,data_set)
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
for idx in batch_indices:
supp_xs_s.append(pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, state, idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, state, idx), "rb")))
query_xs_s.append(pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, state, idx), "rb")))
query_ys_s.append(pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, state, idx), "rb")))
supp_mp_data, query_mp_data = {}, {}
for mp in self.mp_list:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, state, mp, idx), "rb"))
supp_mps_s.append(supp_mp_data)
query_mps_s.append(query_mp_data)
return supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s
def load_data_multiprocess(self, data_set, state, batch_indices, load_from_file=True):
data_dir = os.path.join(self.output_dir, data_set)
global cur_state
cur_state = state
supp_xs_s = []
supp_ys_s = []
supp_mps_s = []
query_xs_s = []
query_ys_s = []
query_mps_s = []
pool = ThreadPool(processes=20)
res = pool.map(self.load_single_data, batch_indices)
for r in res:
supp_xs_s.append(r[0])
supp_ys_s.append(r[1])
supp_mps_s.append(r[2])
query_xs_s.append(r[3])
query_ys_s.append(r[4])
query_mps_s.append(r[5])
return supp_xs_s, supp_ys_s, supp_mps_s, query_xs_s, query_ys_s, query_mps_s
def load_single_data(self, idx):
data_dir = os.path.join(self.output_dir, self.config['dataset'])
supp_xs = pickle.load(open("{}/{}/support_x_{}.pkl".format(data_dir, cur_state, idx), "rb"))
supp_ys = pickle.load(open("{}/{}/support_y_{}.pkl".format(data_dir, cur_state, idx), "rb"))
query_xs = pickle.load(open("{}/{}/query_x_{}.pkl".format(data_dir, cur_state, idx), "rb"))
query_ys = pickle.load(open("{}/{}/query_y_{}.pkl".format(data_dir, cur_state, idx), "rb"))
supp_mp_data = {}
query_mp_data = {}
for mp in self.config['mp']:
supp_mp_data[mp] = pickle.load(open("{}/{}/support_{}_{}.pkl".format(data_dir, cur_state, mp, idx), "rb"))
query_mp_data[mp] = pickle.load(open("{}/{}/query_{}_{}.pkl".format(data_dir, cur_state, mp, idx), "rb"))
return supp_xs, supp_ys, supp_mp_data, query_xs, query_ys, query_mp_data
# if __name__ == "__main__":
# from Config import config_ml
# data_set = 'movielens_1m'
# input_dir = '../data/'
# output_dir = '../data/'
#
# data_helper = DataHelper(input_dir, output_dir, config_ml)
#
# training_set_size = int(len(glob.glob("../data/{}/{}/*.pkl".format(data_set, 'meta_training'))) / config_ml['file_num'])
# indices = list(range(training_set_size))
# random.shuffle(indices)
# num_batch = int(training_set_size / 32)
# start_time = time.time()
# for idx, i in tqdm(enumerate(range(num_batch))):
# cur_indices = indices[32*i:32*(i+1)]
# support_xs, support_ys, support_mps, query_xs, query_ys, query_mps = \
# data_helper.load_data_multiprocess(data_set, 'meta_training', cur_indices)
#
# print(time.time()-start_time)
| 7,817 | 45.814371 | 133 | py |
MetaHIN | MetaHIN-master/code/EmbeddingInitializer.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-10 14:22
import torch
from torch.autograd import Variable
# Movielens dataset
class UserEmbeddingML(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingML, self).__init__()
self.num_gender = config['num_gender']
self.num_age = config['num_age']
self.num_occupation = config['num_occupation']
self.num_zipcode = config['num_zipcode']
self.embedding_dim = config['embedding_dim']
self.embedding_gender = torch.nn.Embedding(
num_embeddings=self.num_gender,
embedding_dim=self.embedding_dim
)
self.embedding_age = torch.nn.Embedding(
num_embeddings=self.num_age,
embedding_dim=self.embedding_dim
)
self.embedding_occupation = torch.nn.Embedding(
num_embeddings=self.num_occupation,
embedding_dim=self.embedding_dim
)
self.embedding_area = torch.nn.Embedding(
num_embeddings=self.num_zipcode,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
"""
:param user_fea:
:return:
"""
gender_idx = Variable(user_fea[:, 0], requires_grad=False)
age_idx = Variable(user_fea[:, 1], requires_grad=False)
occupation_idx = Variable(user_fea[:, 2], requires_grad=False)
area_idx = Variable(user_fea[:, 3], requires_grad=False)
gender_emb = self.embedding_gender(gender_idx)
age_emb = self.embedding_age(age_idx)
occupation_emb = self.embedding_occupation(occupation_idx)
area_emb = self.embedding_area(area_idx)
return torch.cat((gender_emb, age_emb, occupation_emb, area_emb), 1) # (1, 4*32)
class ItemEmbeddingML(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingML, self).__init__()
self.num_rate = config['num_rate']
self.num_genre = config['num_genre']
self.embedding_dim = config['embedding_dim']
self.embedding_rate = torch.nn.Embedding(
num_embeddings=self.num_rate,
embedding_dim=self.embedding_dim
)
self.embedding_genre = torch.nn.Linear(
in_features=self.num_genre,
out_features=self.embedding_dim,
bias=False
)
def forward(self, item_fea):
"""
:param item_fea:
:return:
"""
rate_idx = Variable(item_fea[:, 0], requires_grad=False)
genre_idx = Variable(item_fea[:, 1:26], requires_grad=False)
rate_emb = self.embedding_rate(rate_idx) # (1,32)
genre_emb = self.embedding_genre(genre_idx.float()) / torch.sum(genre_idx.float(), 1).view(-1, 1) # (1,32)
return torch.cat((rate_emb, genre_emb), 1) # (1, 2*32)
# Yelp dataset
class UserEmbeddingYelp(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingYelp, self).__init__()
self.num_fans = config['num_fans']
self.num_avgrating = config['num_avgrating']
self.embedding_dim = config['embedding_dim']
self.embedding_fans = torch.nn.Embedding(
num_embeddings=self.num_fans,
embedding_dim=self.embedding_dim
)
self.embedding_avgrating = torch.nn.Embedding(
num_embeddings=self.num_avgrating,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
fans_idx = Variable(user_fea[:, 0], requires_grad=False) # [#sample]
avgrating_idx = Variable(user_fea[:, 1], requires_grad=False) # [#sample]
fans_emb = self.embedding_fans(fans_idx)
avgrating_emb = self.embedding_avgrating(avgrating_idx)
return torch.cat((fans_emb, avgrating_emb), 1) # (1, 1*32)
class ItemEmbeddingYelp(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingYelp, self).__init__()
self.num_stars = config['num_stars']
self.num_postalcode = config['num_postalcode']
self.embedding_dim = config['embedding_dim']
self.embedding_stars = torch.nn.Embedding(
num_embeddings=self.num_stars,
embedding_dim=self.embedding_dim,
)
self.embedding_postalcode = torch.nn.Embedding(
num_embeddings=self.num_postalcode,
embedding_dim=self.embedding_dim,
)
def forward(self, item_fea):
stars_idx = Variable(item_fea[:, 0], requires_grad=False)
postalcode_idx = Variable(item_fea[:, 1], requires_grad=False)
stars_emb = self.embedding_stars(stars_idx) # (1,32)
postalcode_emb = self.embedding_postalcode(postalcode_idx) # (1,32)
return torch.cat((stars_emb, postalcode_emb), 1)
# DBook dataset
class UserEmbeddingDB(torch.nn.Module):
def __init__(self, config):
super(UserEmbeddingDB, self).__init__()
self.num_location = config['num_location']
self.embedding_dim = config['embedding_dim']
self.embedding_location = torch.nn.Embedding(
num_embeddings=self.num_location,
embedding_dim=self.embedding_dim
)
def forward(self, user_fea):
"""
:param user_fea: tensor, shape = [#sample, #user_fea]
:return:
"""
location_idx = Variable(user_fea[:, 0], requires_grad=False) # [#sample]
location_emb = self.embedding_location(location_idx)
return location_emb # (1, 1*32)
class ItemEmbeddingDB(torch.nn.Module):
def __init__(self, config):
super(ItemEmbeddingDB, self).__init__()
self.num_publisher = config['num_publisher']
self.embedding_dim = config['embedding_dim']
self.embedding_publisher = torch.nn.Embedding(
num_embeddings=self.num_publisher,
embedding_dim=self.embedding_dim
)
def forward(self, item_fea):
"""
:param item_fea:
:return:
"""
publisher_idx = Variable(item_fea[:, 0], requires_grad=False)
publisher_emb = self.embedding_publisher(publisher_idx) # (1,32)
return publisher_emb # (1, 1*32)
| 6,165 | 32.51087 | 115 | py |
MetaHIN | MetaHIN-master/code/HeteML_new.py | # coding: utf-8
# author: lu yf
# create date: 2019-12-02 11:25
import numpy as np
import torch
from torch.nn import functional as F
from Evaluation import Evaluation
from MetaLearner_new import MetapathLearner, MetaLearner
class HML(torch.nn.Module):
def __init__(self, config, model_name):
super(HML, self).__init__()
self.config = config
self.use_cuda = self.config['use_cuda']
self.device = torch.device("cuda" if config['use_cuda'] else "cpu")
self.model_name = model_name
if self.config['dataset'] == 'movielens':
from EmbeddingInitializer import UserEmbeddingML, ItemEmbeddingML
self.item_emb = ItemEmbeddingML(config)
self.user_emb = UserEmbeddingML(config)
elif self.config['dataset'] == 'yelp':
from EmbeddingInitializer import UserEmbeddingYelp, ItemEmbeddingYelp
self.item_emb = ItemEmbeddingYelp(config)
self.user_emb = UserEmbeddingYelp(config)
elif self.config['dataset'] == 'dbook':
from EmbeddingInitializer import UserEmbeddingDB, ItemEmbeddingDB
self.item_emb = ItemEmbeddingDB(config)
self.user_emb = UserEmbeddingDB(config)
self.mp_learner = MetapathLearner(config)
self.meta_learner = MetaLearner(config)
self.mp_lr = config['mp_lr']
self.local_lr = config['local_lr']
self.emb_dim = self.config['embedding_dim']
self.cal_metrics = Evaluation()
self.ml_weight_len = len(self.meta_learner.update_parameters())
self.ml_weight_name = list(self.meta_learner.update_parameters().keys())
self.mp_weight_len = len(self.mp_learner.update_parameters())
self.mp_weight_name = list(self.mp_learner.update_parameters().keys())
self.transformer_liners = self.transform_mp2task()
self.meta_optimizer = torch.optim.Adam(self.parameters(), lr=config['lr'])
def transform_mp2task(self):
liners = {}
ml_parameters = self.meta_learner.update_parameters()
# output_dim_of_mp = self.config['user_embedding_dim']
output_dim_of_mp = 32 # movielens: lr=0.001, avg mp, 0.8081
for w in self.ml_weight_name:
liners[w.replace('.', '-')] = torch.nn.Linear(output_dim_of_mp,
np.prod(ml_parameters[w].shape))
return torch.nn.ModuleDict(liners)
def forward(self, support_user_emb, support_item_emb, support_set_y, support_mp_user_emb, vars_dict=None):
"""
"""
if vars_dict is None:
vars_dict = self.meta_learner.update_parameters()
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_user_emb, vars_dict)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, vars_dict.values(), create_graph=True)
fast_weights = {}
for i, w in enumerate(vars_dict.keys()):
fast_weights[w] = vars_dict[w] - self.local_lr * grad[i]
for idx in range(1, self.config['local_update']): # for the current task, locally update
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_user_emb, vars_dict=fast_weights)
loss = F.mse_loss(support_set_y_pred, support_set_y) # calculate loss on support set
grad = torch.autograd.grad(loss, fast_weights.values(),
create_graph=True) # calculate gradients w.r.t. model parameters
for i, w in enumerate(fast_weights.keys()):
fast_weights[w] = fast_weights[w] - self.local_lr * grad[i]
return fast_weights
def mp_update(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
Mete-update the parameters of MetaPathLearner, AggLearner and MetaLearner.
"""
# each mp
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
mp_task_fast_weights_s = {}
mp_task_loss_s = {}
mp_initial_weights = self.mp_learner.update_parameters()
ml_initial_weights = self.meta_learner.update_parameters()
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = list(map(lambda _: _.shape[0], support_set_mp))
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = list(map(lambda _: _.shape[0], query_set_mp))
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list)
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_enhanced_user_emb)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, mp_initial_weights.values(), create_graph=True)
fast_weights = {}
for i in range(self.mp_weight_len):
weight_name = self.mp_weight_name[i]
fast_weights[weight_name] = mp_initial_weights[weight_name] - self.mp_lr * grad[i]
for idx in range(1, self.config['mp_update']):
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list,
vars_dict=fast_weights)
support_set_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_mp_enhanced_user_emb)
loss = F.mse_loss(support_set_y_pred, support_set_y)
grad = torch.autograd.grad(loss, fast_weights.values(), create_graph=True)
for i in range(self.mp_weight_len):
weight_name = self.mp_weight_name[i]
fast_weights[weight_name] = fast_weights[weight_name] - self.mp_lr * grad[i]
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp, support_index_list, vars_dict=fast_weights)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp, query_index_list, vars_dict=fast_weights)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
f_fast_weights = {}
for w, liner in self.transformer_liners.items():
w = w.replace('-', '.')
f_fast_weights[w] = ml_initial_weights[w] * \
torch.sigmoid(liner(support_mp_enhanced_user_emb.mean(0))). \
view(ml_initial_weights[w].shape)
# f_fast_weights = None
# # the current mp ---> task update
mp_task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_mp_enhanced_user_emb,vars_dict=f_fast_weights)
mp_task_fast_weights_s[mp] = mp_task_fast_weights
query_set_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_emb,
vars_dict=mp_task_fast_weights)
q_loss = F.mse_loss(query_set_y_pred, query_set_y)
mp_task_loss_s[mp] = q_loss.data # movielens: 0.8126 dbook 0.6084
# mp_task_loss_s[mp] = loss.data # dbook 0.6144
# mp_att = torch.FloatTensor([l/sum(mp_task_loss_s.values()) for l in mp_task_loss_s.values()]).to(self.device) # movielens: 0.81
mp_att = F.softmax(-torch.stack(list(mp_task_loss_s.values())), dim=0) # movielens: 0.80781 lr0.001
# mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device)
agg_task_fast_weights = self.aggregator(mp_task_fast_weights_s, mp_att)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
# agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb, vars_dict=agg_task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
query_y_real = query_set_y.data.cpu().numpy()
query_y_pred = query_y_pred.data.cpu().numpy()
mae, rmse = self.cal_metrics.prediction(query_y_real, query_y_pred)
ndcg_5 = self.cal_metrics.ranking(query_y_real, query_y_pred, k=5)
return loss, mae, rmse, ndcg_5
def mp_update_mp_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
MeLU + multiple meta-paths aggregation
"""
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
mp_task_loss_s = {}
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
# query_set_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_embs)
# q_loss = F.mse_loss(query_set_y_pred, query_set_y)
# mp_task_loss_s[mp] = q_loss.data
# mp_att = F.softmax(-torch.stack(list(mp_task_loss_s.values())), dim=0)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_agg_enhanced_user_emb)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb, vars_dict=task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
query_y_real = query_set_y.data.cpu().numpy()
query_y_pred = query_y_pred.data.cpu().numpy()
mae, rmse = self.cal_metrics.prediction(query_y_real, query_y_pred)
ndcg_5 = self.cal_metrics.ranking(query_y_real, query_y_pred, k=5)
return loss, mae, rmse, ndcg_5
def mp_update_multi_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
"""
multiple MAML for multiple meta-paths
"""
loss_s = []
mae_s, rmse_s = [], []
ndcg_at_5 = []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
task_fast_weights = self.forward(support_user_emb, support_item_emb, support_set_y,
support_mp_enhanced_user_emb)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_mp_enhanced_user_emb,
vars_dict=task_fast_weights)
loss = F.mse_loss(query_y_pred, query_set_y)
mae, rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
loss_s.append(loss)
mae_s.append(mae)
rmse_s.append(rmse)
ndcg_at_5.append(ndcg_5)
return torch.stack(loss_s).mean(0), np.mean(mae_s), np.mean(rmse_s), np.mean(ndcg_5)
def no_MAML(self, support_set_x, support_set_y, support_set_mps, query_set_x, query_set_y, query_set_mps):
# each mp
support_mp_enhanced_user_emb_s, query_mp_enhanced_user_emb_s = [], []
support_user_emb = self.user_emb(support_set_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_set_x[:, 0:self.config['item_fea_len']])
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = list(support_set_mps[mp])
query_set_mp = list(query_set_mps[mp])
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
support_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_agg_enhanced_user_emb)
support_loss = F.mse_loss(support_y_pred, support_set_y)
support_mae, support_rmse = self.cal_metrics.prediction(support_set_y.data.cpu().numpy(),
support_y_pred.data.cpu().numpy())
support_ndcg_5 = self.cal_metrics.ranking(support_set_y.data.cpu().numpy(),
support_y_pred.data.cpu().numpy(), 5)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb)
query_loss = F.mse_loss(query_y_pred, query_set_y)
query_mae, query_rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
query_ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
return (support_loss + query_loss) / 2.0, (support_mae + query_mae) / 2.0, (support_rmse + query_rmse) / 2.0, \
(support_ndcg_5 + query_ndcg_5) / 2.0
def global_update(self, support_xs, support_ys, support_mps, query_xs, query_ys, query_mps, device='cpu'):
"""
"""
batch_sz = len(support_xs)
loss_s = []
mae_s = []
rmse_s = []
ndcg_at_5_s = []
for i in range(batch_sz): # each task in a batch
support_mp = dict(support_mps[i]) # must be dict!!!
query_mp = dict(query_mps[i])
for mp in self.config['mp']:
support_mp[mp] = map(lambda x: x.to(device), support_mp[mp])
query_mp[mp] = map(lambda x: x.to(device), query_mp[mp])
_loss, _mae, _rmse, _ndcg_5 = self.mp_update(support_xs[i].to(device), support_ys[i].to(device), support_mp,
query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.mp_update_mp_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.mp_update_multi_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
# _loss, _mae, _rmse, _ndcg_5 = self.no_MAML(support_xs[i].to(device), support_ys[i].to(device), support_mp,
# query_xs[i].to(device), query_ys[i].to(device), query_mp)
loss_s.append(_loss)
mae_s.append(_mae)
rmse_s.append(_rmse)
ndcg_at_5_s.append(_ndcg_5)
loss = torch.stack(loss_s).mean(0)
mae = np.mean(mae_s)
rmse = np.mean(rmse_s)
ndcg_at_5 = np.mean(ndcg_at_5_s)
self.meta_optimizer.zero_grad()
loss.backward()
self.meta_optimizer.step()
return loss.cpu().data.numpy(), mae, rmse, ndcg_at_5
def evaluation(self, support_x, support_y, support_mp, query_x, query_y, query_mp, device='cpu'):
"""
"""
support_mp = dict(support_mp) # must be dict!!!
query_mp = dict(query_mp)
for mp in self.config['mp']:
support_mp[mp] = map(lambda x: x.to(device), support_mp[mp])
query_mp[mp] = map(lambda x: x.to(device), query_mp[mp])
_, mae, rmse, ndcg_5 = self.mp_update(support_x.to(device), support_y.to(device), support_mp,
query_x.to(device), query_y.to(device), query_mp)
# _, mae, rmse, ndcg_5 = self.mp_update_mp_MAML(support_x.to(device), support_y.to(device), support_mp,
# query_x.to(device), query_y.to(device), query_mp)
# _, mae, rmse, ndcg_5 = self.mp_update_multi_MAML(support_x.to(device), support_y.to(device), support_mp,
# query_x.to(device), query_y.to(device), query_mp)
# mae, rmse, ndcg_5 = self.eval_no_MAML(query_x.to(device), query_y.to(device), query_mp)
return mae, rmse, ndcg_5
def aggregator(self, task_weights_s, att):
for idx, mp in enumerate(self.config['mp']):
if idx == 0:
att_task_weights = dict({k: v * att[idx] for k, v in task_weights_s[mp].items()})
continue
tmp_att_task_weights = dict({k: v * att[idx] for k, v in task_weights_s[mp].items()})
att_task_weights = dict(zip(att_task_weights.keys(),
list(map(lambda x: x[0] + x[1],
zip(att_task_weights.values(), tmp_att_task_weights.values())))))
return att_task_weights
def eval_no_MAML(self, query_set_x, query_set_y, query_set_mps):
# each mp
query_mp_enhanced_user_emb_s = []
query_user_emb = self.user_emb(query_set_x[:, self.config['item_fea_len']:])
query_item_emb = self.item_emb(query_set_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
query_set_mp = list(query_set_mps[mp])
query_neighs_emb = self.item_emb(torch.cat(query_set_mp))
query_index_list = map(lambda _: _.shape[0], query_set_mp)
query_mp_enhanced_user_emb = self.mp_learner(query_user_emb, query_item_emb, query_neighs_emb, mp,
query_index_list)
query_mp_enhanced_user_emb_s.append(query_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(query_mp_enhanced_user_emb_s, 1)
query_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
query_y_pred = self.meta_learner(query_user_emb, query_item_emb, query_agg_enhanced_user_emb)
query_mae, query_rmse = self.cal_metrics.prediction(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy())
query_ndcg_5 = self.cal_metrics.ranking(query_set_y.data.cpu().numpy(),
query_y_pred.data.cpu().numpy(), 5)
return query_mae, query_rmse, query_ndcg_5
def fine_tune(self, support_x,support_y,support_mp):
if self.cuda():
support_x = support_x.cuda()
support_y = support_y.cuda()
support_mp = dict(support_mp) # must be dict!!!
for mp, mp_data in support_mp.items():
support_mp[mp] = list(map(lambda x: x.cuda(), mp_data))
support_mp_enhanced_user_emb_s = []
support_user_emb = self.user_emb(support_x[:, self.config['item_fea_len']:])
support_item_emb = self.item_emb(support_x[:, 0:self.config['item_fea_len']])
for mp in self.config['mp']:
support_set_mp = support_mp[mp]
support_neighs_emb = self.item_emb(torch.cat(support_set_mp))
support_index_list = map(lambda _: _.shape[0], support_set_mp)
support_mp_enhanced_user_emb = self.mp_learner(support_user_emb, support_item_emb, support_neighs_emb, mp,
support_index_list)
support_mp_enhanced_user_emb_s.append(support_mp_enhanced_user_emb)
mp_att = torch.FloatTensor([1.0 / len(self.config['mp'])] * len(self.config['mp'])).to(self.device) # mean
agg_mp_emb = torch.stack(support_mp_enhanced_user_emb_s, 1)
support_agg_enhanced_user_emb = torch.sum(agg_mp_emb * mp_att.unsqueeze(1), 1)
support_y_pred = self.meta_learner(support_user_emb, support_item_emb, support_agg_enhanced_user_emb)
support_loss = F.mse_loss(support_y_pred, support_y)
# fine-tune
self.meta_optimizer.zero_grad()
support_loss.backward()
self.meta_optimizer.step()
| 24,851 | 55.869565 | 162 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/benchpress.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BenchPress: A directed compiler benchmark generator powered by active learning.
The core operations of BenchPress are:
1. Preprocess and encode a corpus of human-written programs.
2. Define and train a machine learning model on the corpus.
3. Sample the trained model to generate new programs.
This program automates the execution of all three stages of the pipeline.
The pipeline can be interrupted and resumed at any time. Results are cached
across runs. Please note that many of the steps in the pipeline are extremely
compute intensive and highly parallelized. If configured with CUDA support,
any NVIDIA GPUs will be used to improve performance where possible.
"""
import contextlib
import cProfile
import os
import pathlib
import time
import sys
import typing
import datetime
from absl import app, flags
from deeplearning.benchpress.samplers import sample_observers as sample_observers_lib
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import memory
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.dashboard import dashboard
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.reinforcement_learning import reinforcement_models
from deeplearning.benchpress.proto import benchpress_pb2
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.github import miner
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import proxy_bash
from eupy.hermes import client
FLAGS = flags.FLAGS
flags.DEFINE_string(
"notify_me",
None,
"Set receiver mail address to notify for program failures or termination."
)
flags.DEFINE_integer(
"notify_me_level",
5,
"Define logging level of mail client"
)
flags.DEFINE_boolean(
"color", True, "Colorize or not, logging messages"
)
flags.DEFINE_boolean(
"step", False, "Enable step execution on debug logs (debug level must be selected)"
)
flags.DEFINE_string(
"config", "/benchpress/config.pbtxt", "Path to a benchpress.Instance proto file."
)
flags.DEFINE_string(
"workspace_dir",
"/tmp/benchpress",
"Root path of the working space directory. Corpus, dataset, model and all meta files"
"will be stored here. Default value is /tmp folder.",
)
flags.DEFINE_integer(
"min_samples",
0,
"The minimum number of samples to make. If <= 0, sampling continues "
"indefinitely and never terminates.",
)
flags.DEFINE_boolean(
"print_samples", True, "If set, print the generated samples."
)
flags.DEFINE_boolean(
"store_samples_db", True, "If set, store generated samples to database."
)
flags.DEFINE_boolean(
"cache_samples", False, "If set, cache the generated sample protobufs."
)
flags.DEFINE_string(
"sample_text_dir", None, "A directory to write plain text samples to."
)
flags.DEFINE_string(
"stop_after",
None,
'Stop BenchPress early. Valid options are: "corpus", or "train".',
)
flags.DEFINE_boolean(
"only_sample",
False,
"Select to deploy sampling without training."
)
flags.DEFINE_string(
"print_cache_path",
None,
'Print the directory of a cache and exit. Valid options are: "pre_train_corpus", "corpus", '
'"model", or "sampler".',
)
flags.DEFINE_boolean(
"debug",
False,
"Enable a debugging mode of BenchPress python runtime. When enabled, errors "
"which may otherwise be caught lead to program crashes and stack traces.",
)
flags.DEFINE_boolean(
"profiling",
False,
"Enable BenchPress self profiling. Profiling results be logged.",
)
flags.DEFINE_boolean(
"monitor_mem_usage",
False,
"Plot application RAM and GPU memory usage."
)
flags.DEFINE_boolean(
"dashboard_only", False, "If true, launch dashboard only."
)
flags.DEFINE_boolean(
"proxy_bash",
False,
"Set True to start a proxy bash thread."
"Commands are provided from BenchPress's"
"running terminal and standard's input format"
"must be: `>> CMD'."
)
class Instance(object):
"""A BenchPress instance encapsulates a github_miner, model, sampler, and working directory."""
def __init__(self, config: benchpress_pb2.Instance):
"""Instantiate an instance.
Args:
config: An Instance proto.
"""
self.working_dir = None
self.github = None
self.model = None
self.sampler = None
self.config = config
if config.HasField("github_miner"):
self.github = miner.GithubMiner.FromConfig(config.github_miner)
if config.HasField("working_dir"):
self.working_dir: pathlib.Path = pathlib.Path(
os.path.join(FLAGS.workspace_dir, config.working_dir)
).expanduser().resolve()
# Enter a session so that the cache paths are set relative to any requested
# working directory.
with self.Session():
# Initialize pytorch to make distributed barrier accessible.
pytorch.initPytorch()
if config.HasField("language_model"):
self.model: language_models.Model = language_models.Model(config.language_model)
elif config.HasField("rl_model"):
self.model: reinforcement_models.RLModel = reinforcement_models.RLModel(config.rl_model)
## Specialize 'locks' folder.
if environment.WORLD_SIZE > 1:
lock_cache = pathlib.Path(self.model.cache.path / "locks")
if environment.WORLD_RANK == 0:
lock_cache.mkdir(exist_ok = True)
else:
while not lock_cache.exists():
time.sleep(0.5)
distrib.init(lock_cache)
if config.HasField("sampler"):
self.sampler: samplers.Sampler = samplers.Sampler(
config.sampler,
model_hash = self.model.hash,
)
if environment.WORLD_RANK == 0:
self.dashboard = dashboard.Launch()
@contextlib.contextmanager
def Session(self) -> "Instance":
"""Scoped $BENCHPRESS_CACHE value."""
old_working_dir = os.environ.get("BENCHPRESS_CACHE", "")
if self.working_dir:
os.environ["BENCHPRESS_CACHE"] = str(self.working_dir)
yield self
if self.working_dir:
os.environ["BENCHPRESS_CACHE"] = old_working_dir
def Create(self) -> None:
with self.Session():
self.model.Create()
def PreTrain(self, *args, **kwargs) -> None:
if self.model.pre_train_corpus:
with self.Session():
test_sampler = None
if not self.sampler.is_active:
test_sampler_config = sampler_pb2.Sampler()
test_sampler_config.CopyFrom(self.sampler.config)
# Make all test samples the same sequence_length length.
del test_sampler_config.termination_criteria[:]
test_sampler_config.termination_criteria.extend(
[
sampler_pb2.SampleTerminationCriterion(
maxlen=sampler_pb2.MaxTokenLength(maximum_tokens_in_sample=self.sampler.sequence_length)
),
]
)
test_sampler = samplers.Sampler(test_sampler_config, sample_db_name = "pre_epoch_samples.db")
# We inject the `test_sampler` argument so that we can create samples
# during training.
self.model.PreTrain(*args, test_sampler = test_sampler, **kwargs)
def Train(self, *args, **kwargs) -> None:
with self.Session():
test_sampler = None
if not self.sampler.is_active:
test_sampler_config = sampler_pb2.Sampler()
test_sampler_config.CopyFrom(self.sampler.config)
# Make all test samples the same sequence_length length.
del test_sampler_config.termination_criteria[:]
test_sampler_config.termination_criteria.extend(
[
sampler_pb2.SampleTerminationCriterion(
maxlen=sampler_pb2.MaxTokenLength(maximum_tokens_in_sample=self.sampler.sequence_length)
),
]
)
test_sampler = samplers.Sampler(test_sampler_config, sample_db_name = "epoch_samples.db")
# We inject the `test_sampler` argument so that we can create samples
# during training.
self.model.Train(*args, test_sampler = test_sampler, **kwargs)
def Sample(self, *args, **kwargs) -> typing.List[model_pb2.Sample]:
self.PreTrain()
self.Train()
with self.Session():
self.model.Sample(self.sampler, *args, **kwargs)
def ToProto(self) -> benchpress_pb2.Instance:
"""Get the proto config for the instance."""
config = benchpress_pb2.Instance()
config.working_dir = str(self.working_dir)
if config.HasField("language_model"):
config.language_model.CopyFrom(self.model.config)
elif config.HasField("rl_model"):
config.rl_model.CopyFrom(self.model.config)
config.sampler.CopyFrom(self.sampler.config)
return config
@classmethod
def FromFile(cls, path: pathlib.Path) -> "Instance":
return cls(pbutil.FromFile(path, benchpress_pb2.Instance()))
def ConfigFromFlags() -> benchpress_pb2.Instance:
config_path = pathlib.Path(FLAGS.config)
if not config_path.is_file():
raise FileNotFoundError (f"BenchPress --config file not found: '{config_path}'")
config = pbutil.FromFile(config_path, benchpress_pb2.Instance())
os.environ["PWD"] = str(config_path.parent)
return config
def SampleObserversFromFlags(instance: Instance) -> typing.List[
sample_observers_lib.SampleObserver
]:
"""Instantiate sample observers from flag values."""
if instance.sampler is None:
return []
sample_observers = []
if FLAGS.min_samples <= 0:
l.logger().warning(
"Entering an infinite sample loop, this process will never end!"
)
else:
sample_observers.append(
sample_observers_lib.MaxSampleCountObserver(FLAGS.min_samples * instance.sampler.batch_size)
)
if FLAGS.print_samples:
sample_observers.append(sample_observers_lib.PrintSampleObserver())
if FLAGS.store_samples_db:
if environment.WORLD_RANK == 0:
(instance.model.cache.path / "samples" / instance.sampler.hash).mkdir(exist_ok = True)
sample_observers.append(sample_observers_lib.SamplesDatabaseObserver(
instance.model.cache.path / "samples" / instance.sampler.hash / instance.sampler.sample_db_name,
plot_sample_status = True
)
)
instance.sampler.symlinkModelDB(
instance.model.cache.path / "samples" / instance.sampler.hash,
instance.model.hash
)
if FLAGS.cache_samples:
sample_observers.append(sample_observers_lib.LegacySampleCacheObserver())
if FLAGS.sample_text_dir:
sample_observers.append(
sample_observers_lib.SaveSampleTextObserver(
pathlib.Path(FLAGS.sample_text_dir)
)
)
return sample_observers
def DoFlagsAction(
instance: Instance,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
) -> None:
"""Do the action requested by the command line flags.
By default, this method trains and samples the instance using the given
sample observers. Flags which affect this behaviour are:
--print_cache_path={corpus,model,sampler}: Prints the path and returns.
--stop_after={corpus,train}: Stops after corpus creation or training,
respectively
--export_model=<path>: Train the model and export it to the requested path.
Args:
instance: The BenchPress instance to act on.
sample_observer: A list of sample observers. Unused if no sampling occurs.
"""
if instance.github:
instance.github.fetch()
if instance.model:
with instance.Session():
if FLAGS.print_cache_path == "pre_train_corpus":
print(instance.model.pre_train_corpus.cache.path)
return
elif FLAGS.print_cache_path == "corpus":
print(instance.model.corpus.cache.path)
return
elif FLAGS.print_cache_path == "model":
print(instance.model.cache.path)
return
elif FLAGS.print_cache_path == "sampler":
if instance.sampler:
print(instance.model.SamplerCache(instance.sampler))
else:
raise ValueError("Sampler config has not been specified.")
return
elif FLAGS.print_cache_path:
raise ValueError(f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'")
# The default action is to sample the model.
if FLAGS.stop_after == "corpus":
instance.model.corpus.Create()
if instance.model.pre_train_corpus:
instance.model.pre_train_corpus.Create(tokenizer = instance.model.corpus.tokenizer)
elif FLAGS.stop_after == "pre_train":
instance.PreTrain()
l.logger().info("Model: {}".format(instance.model.cache.path))
elif FLAGS.stop_after == "train":
instance.Train()
l.logger().info("Model: {}".format(instance.model.cache.path))
elif FLAGS.stop_after:
raise ValueError(
f"Invalid --stop_after argument: '{FLAGS.stop_after}'"
)
else:
if instance.sampler:
instance.Sample(sample_observers)
instance.sampler.symlinkModelDB(
instance.model.cache.path / "samples" / instance.sampler.hash,
instance.model.hash
)
else:
l.logger().warn("Sampler has not been provided. Use --stop_after to create corpus or train.")
else:
if FLAGS.stop_after in {"corpus", "train"}:
l.logger().warn("FLAGS.stop_after {} will be ignored without model config.".format(FLAGS.stop_after))
if FLAGS.print_cache_path in {"pre_train_corpus", "corpus", "model", "sampler"}:
raise ValueError("{} config has not been specified.".format(FLAGS.print_cache_path))
elif FLAGS.print_cache_path:
raise ValueError(f"Invalid --print_cache_path argument: '{FLAGS.print_cache_path}'")
return
def main():
"""Main entry point."""
if FLAGS.dashboard_only:
if environment.WORLD_RANK == 0:
dash = dashboard.Launch(debug = {"debug": True})
else:
instance = Instance(ConfigFromFlags())
sample_observers = SampleObserversFromFlags(instance)
DoFlagsAction(instance, sample_observers)
return
def initMain(*args, **kwargs):
"""
Pre-initialization for the main function of the program
Args:
*args: Arguments to be passed to the function.
**kwargs: Arguments to be passed to the function.
"""
mail = None
if FLAGS.notify_me:
mail = client.initClient(FLAGS.notify_me)
l.initLogger(name = "benchpress", mail = mail, rank = environment.WORLD_RANK)
if FLAGS.local_filesystem:
pathlib.Path(FLAGS.local_filesystem).resolve().mkdir(exist_ok = True, parents = True)
if FLAGS.monitor_mem_usage:
mem_monitor_threads = memory.init_mem_monitors(
pathlib.Path(FLAGS.workspace_dir).resolve()
)
if FLAGS.proxy_bash:
proxy_bash.start()
if FLAGS.debug:
# Enable verbose stack traces. See: https://pymotw.com/2/cgitb/
import cgitb
cgitb.enable(format="text")
main()
return
try:
if FLAGS.profiling:
cProfile.runctx("main()", None, None, sort="tottime")
else:
main()
except KeyboardInterrupt:
return
except Exception as e:
l.logger().error(e)
if mail:
if FLAGS.config is not None:
job = pathlib.Path(FLAGS.config)
else:
job = ""
mail.send_message("benchpress:{}".format(str(job.stem)), e)
raise
if mail:
if FLAGS.config is not None:
job = pathlib.Path(FLAGS.config)
else:
job = ""
mail.send_message("benchpress: {}".format(str(job.stem)), "Program terminated successfully at {}.".format(datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S")))
return
if __name__ == "__main__":
app.run(initMain)
sys.exit(0)
| 16,345 | 33.340336 | 169 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/reinforcement_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RL Environment for the task of targeted benchmark generation.
"""
import pathlib
import os
import time
import typing
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.reinforcement_learning import env
from deeplearning.benchpress.reinforcement_learning import agent
from deeplearning.benchpress.reinforcement_learning import memory
from deeplearning.benchpress.models.torch_bert import model as bert_model
from absl import flags
FLAGS = flags.FLAGS
from deeplearning.benchpress.util import cache
def AssertConfigIsValid(config: reinforcement_learning_pb2.RLModel) -> reinforcement_learning_pb2.RLModel:
"""
Check validity of RL Model config.
"""
## Just check if language_model exists, later the language_models class will check the pbtxt.
pbutil.AssertFieldIsSet(config, "language_model")
## Now check the specialized agent attributes.
pbutil.AssertFieldIsSet(config, "target_features")
pbutil.AssertFieldIsSet(config, "agent")
## Parse FeatureTokenizer fields.
pbutil.AssertFieldIsSet(config.agent, "feature_tokenizer")
pbutil.AssertFieldIsSet(config.agent, "batch_size")
pbutil.AssertFieldIsSet(config.agent, "action_temperature_micros")
pbutil.AssertFieldIsSet(config.agent, "token_temperature_micros")
pbutil.AssertFieldIsSet(config.agent, "num_epochs")
pbutil.AssertFieldIsSet(config.agent, "num_episodes")
pbutil.AssertFieldIsSet(config.agent, "steps_per_episode")
pbutil.AssertFieldIsSet(config.agent, "num_updates")
pbutil.AssertFieldIsSet(config.agent, "gamma")
pbutil.AssertFieldIsSet(config.agent, "lam")
pbutil.AssertFieldIsSet(config.agent, "epsilon")
pbutil.AssertFieldIsSet(config.agent, "learning_rate_micros")
pbutil.AssertFieldIsSet(config.agent, "value_loss_coefficient")
pbutil.AssertFieldIsSet(config.agent, "entropy_coefficient")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_max_value_token")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_singular_token_thr")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_token_range")
pbutil.AssertFieldIsSet(config.agent.feature_tokenizer, "feature_sequence_length")
return config
class RLModel(object):
"""
Manager class of Reinforcement Learning pipeline for benchmark generation.
"""
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.language_model.tokenizer
@property
def corpus(self) -> corpuses.Corpus:
return self.language_model.corpus
@property
def pre_train_corpus(self) -> corpuses.Corpus:
return self.language_model.pre_train_corpus
@staticmethod
def _ComputeHash(language_model: language_models.Model, config: reinforcement_learning_pb2.RLModel) -> str:
"""
Compute unique hash of model specifications.
"""
lm_hash = language_model.hash
config_to_hash = reinforcement_learning_pb2.RLModel()
config_to_hash.CopyFrom(config)
config_to_hash.ClearField("language_model")
return crypto.sha1_list([lm_hash, config_to_hash.SerializeToString()])
def __init__(self, config: reinforcement_learning_pb2.RLModel):
"""
A Reinforcement Learning model, wrapping a Language Model backend.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, reinforcement_learning_pb2.RLModel):
t = type(config).__name__
raise TypeError(f"Config must be an RLModel proto. Received: '{t}'")
self.config = reinforcement_learning_pb2.RLModel()
self.config.CopyFrom(AssertConfigIsValid(config))
# Initialize the LM-backend for token sampling.
self.language_model = language_models.Model(self.config.language_model)
self.hash = self._ComputeHash(self.language_model, self.config)
self._created = False
if environment.WORLD_RANK == 0:
self.cache = cache.mkcache("rl_model", self.hash)
self.cache.path.mkdir(exist_ok = True, parents = True)
else:
while not cache.cachepath("rl_model", self.hash).exists():
time.sleep(0.5)
self.cache = cache.mkcache("rl_model", self.hash)
if environment.WORLD_RANK == 0:
# Create the necessary cache directories.
(self.cache.path / "feature_sampler").mkdir(exist_ok = True)
(self.cache.path / "samples").mkdir(exist_ok = True)
# Create symlink to language model.
symlink = self.cache.path / "language_model"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.language_model.cache.path),
self.cache.path
),
symlink
)
# Setup META.pbtxt
if self.cache.get("META.pbtxt"):
cached_meta = pbutil.FromFile(
pathlib.Path(self.cache["META.pbtxt"]), internal_pb2.RLModelMeta()
)
# Exclude num_epochs and corpus location from metadata comparison.
config_to_compare = reinforcement_learning_pb2.RLModel()
config_to_compare.CopyFrom(self.config)
config_to_compare.language_model.corpus.ClearField("contentfiles")
if config_to_compare.language_model.HasField("pre_train_corpus"):
config_to_compare.language_model.pre_train_corpus.ClearField("contentfiles")
config_to_compare.language_model.training.ClearField("num_epochs")
config_to_compare.language_model.training.ClearField("num_train_steps")
if config_to_compare.language_model.HasField("pre_train_corpus"):
config_to_compare.language_model.training.ClearField("num_pretrain_steps")
config_to_compare.language_model.training.ClearField("batch_size")
if config_to_compare.language_model.training.HasField("data_generator"):
config_to_compare.language_model.training.data_generator.ClearField("steps_per_epoch")
config_to_compare.language_model.training.data_generator.ClearField("validation_set")
# These fields should have already been cleared, but we'll do it again
# so that metadata comparisons don't fail when the cached meta schema
# is updated.
cached_to_compare = reinforcement_learning_pb2.RLModel()
cached_to_compare.CopyFrom(cached_meta.config)
cached_to_compare.language_model.corpus.ClearField("contentfiles")
if cached_to_compare.language_model.HasField("pre_train_corpus"):
cached_to_compare.language_model.pre_train_corpus.ClearField("contentfiles")
cached_to_compare.language_model.training.ClearField("num_epochs")
cached_to_compare.language_model.training.ClearField("num_train_steps")
if cached_to_compare.language_model.HasField("pre_train_corpus"):
cached_to_compare.language_model.training.ClearField("num_pretrain_steps")
cached_to_compare.language_model.training.ClearField("batch_size")
if cached_to_compare.language_model.training.HasField("data_generator"):
cached_to_compare.language_model.training.data_generator.ClearField("steps_per_epoch")
cached_to_compare.language_model.training.data_generator.ClearField("validation_set")
if cached_to_compare.language_model.training.sequence_length != config_to_compare.language_model.training.sequence_length:
l.logger().warning("Mismatch between pre-trained and current config sequence_length!\
This can only be intended in BERT model!")
cached_to_compare.language_model.training.ClearField("sequence_length")
config_to_compare.language_model.training.ClearField("sequence_length")
if config_to_compare != cached_to_compare:
raise SystemError("Metadata mismatch: {} \n\n {}".format(config_to_compare, cached_to_compare))
self.meta = cached_meta
else:
self.meta = internal_pb2.RLModelMeta()
self.meta.config.CopyFrom(self.config)
self._WriteMetafile()
## Store current commit
commit.saveCommit(self.cache.path)
l.logger().info("Initialized RL Pipeline in {}".format(self.cache.path))
"""
How do you target features during training ?
1) Active learner - downstream task <- Sampler
2) Random feasible vectors (collected from OpenCL corpus ?) <- Sampler ?
3) Got from benchmark suites ? <- Sampler
"""
return
def Create(self, **kwargs) -> bool:
"""
Create the LM and RL environment.
"""
_ = self.language_model.Create()
if self.language_model.pre_train_corpus:
self.language_model.PreTrain(**kwargs)
self.language_model.Train(**kwargs)
self.feature_tokenizer = tokenizers.FeatureTokenizer.FromArgs(
self.config.agent.feature_tokenizer.feature_singular_token_thr,
self.config.agent.feature_tokenizer.feature_max_value_token,
self.config.agent.feature_tokenizer.feature_token_range
)
if self._created:
return False
FLAGS.sample_indices_limit = 1 # Force BERT-LM on one prediction per hole.
self._created = True
self.env = env.Environment(
self.config,
self.language_model.backend.config.architecture.max_position_embeddings,
self.language_model.corpus,
self.tokenizer,
self.feature_tokenizer,
self.cache.path,
)
self.agent = agent.Agent(
self.config, self.language_model, self.tokenizer, self.feature_tokenizer, self.cache.path
)
self.memory = memory.Memory(self.cache.path)
return True
def PreTrain(self, **kwargs) -> 'RLModel':
"""
Pre-train wrapper for Language model.
No-pretraining is supported for RL model.
"""
self.Create(**kwargs)
return self
def Train(self, **kwargs) -> None:
"""
Train the RL-Agent.
"""
self.Create(**kwargs)
## First, train the Language model backend.
num_epochs = self.config.agent.num_epochs
num_episodes = self.config.agent.num_episodes
steps_per_episode = self.config.agent.steps_per_episode
num_updates = self.config.agent.num_updates
gamma = self.config.agent.gamma
lam = self.config.agent.lam
epsilon = self.config.agent.epsilon
lr = self.config.agent.learning_rate_micros / 10e6
value_loss_coeff = self.config.agent.value_loss_coefficient
entropy_coeff = self.config.agent.entropy_coefficient
self.agent.Train(
env = self.env,
num_epochs = num_epochs,
num_episodes = num_episodes,
steps_per_episode = steps_per_episode,
num_updates = num_updates,
gamma = gamma,
lr = lr,
lam = lam,
epsilon = epsilon,
value_loss_coeff = value_loss_coeff,
entropy_coeff = entropy_coeff,
)
return
def Sample(self, sampler: samplers.Sampler) -> None:
"""
Instead of calling Model's sample, this sample will be called, acting as a backend (BERT) wrapper.
"""
raise NotImplementedError("Here you must sample your RL-Model.")
return
def SamplerCache(self, sampler: samplers.Sampler) -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache.path / "samples" / sampler.hash
def _WriteMetafile(self) -> None:
pbutil.ToFile(self.meta, pathlib.Path(self.cache.keypath("META.pbtxt")))
def saveCheckpoint(self) -> None:
"""
Save current state of RL pipeline.
"""
self.feature_loader.saveCheckpoint()
self.env.saveCheckpoint()
self.agent.saveCheckpoint()
self.memory.saveCheckpoint()
return
def loadCheckpoint(self) -> None:
"""
Load RL pipeline checkpoint.
"""
self.feature_loader.loadCheckpoint()
self.env.loadCheckpoint()
self.agent.loadCheckpoint()
self.memory.loadCheckpoint()
return
| 13,200 | 40.382445 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/memory.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory replay buffer for reinforcement learning training.
"""
import pathlib
import typing
import pickle
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
class Memory(object):
"""
Replay buffer of previous states and actions.
"""
def __init__(self, cache_path: pathlib.Path):
self.cache_path = cache_path / "memory"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.action_buffer = []
self.state_buffer = []
self.reward_buffer = []
self.done_buffer = []
self.info_buffer = []
self.loadCheckpoint()
return
def add(self,
action : interactions.Action,
state : interactions.State,
reward : interactions.Reward,
done : bool,
info : str,
) -> None:
"""Add single step to memory buffers."""
self.action_buffer.append(action)
self.state_buffer.append(state)
self.reward_buffer.append(reward)
self.done_buffer.append(done)
self.info_buffer.append(info)
return
def sample(self) -> typing.Dict[str, torch.Tensor]:
"""
Sample memories to update the RL agent.
"""
return
def loadCheckpoint(self) -> None:
"""Fetch memory's latest state."""
if (self.cache_path / "memory.pkl").exists():
distrib.lock()
with open(self.cache_path / "memory.pkl", 'rb') as inf:
checkpoint = pickle.load(inf)
distrib.unlock()
self.action_buffer = checkpoint['action_buffer']
self.action_buffer = checkpoint['state_buffer']
self.action_buffer = checkpoint['reward_buffer']
return
def saveCheckpoint(self) -> None:
"""Save Checkpoint state."""
if environment.WORLD_RANK == 0:
checkpoint = {
'action_buffer' : self.action_buffer,
'reward_buffer' : self.reward_buffer,
'state_buffer' : self.state_buffer,
}
with open(self.cache_path / "memory.pkl", 'wb') as outf:
pickle.dump(checkpoint, outf)
distrib.barrier()
return
| 2,825 | 29.387097 | 74 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Memory replay buffer for reinforcement learning training.
"""
import typing
import numpy as np
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
def from_config(config : reinforcement_learning_pb2.RLModel,
feature_tokenizer : tokenizers.FeatureTokenizer,
corpus : corpuses.Corpus,
) -> torch.utils.data.Dataset:
"""
Return the right torch dataloader based on configuration.
"""
if config.HasField("train_set"):
return CorpusFeatureLoader(config, corpus, feature_tokenizer)
elif config.HasField("random"):
return RandomFeatureLoader(config, feature_tokenizer)
return
def StateToActionTensor(state : interactions.State,
padToken : int,
feat_padToken : int,
batch_size : int,
) -> typing.Dict[str, torch.Tensor]:
"""
Pre-process state to tensor inputs for Action Deep QValues.
"""
seq_len = len(state.encoded_code)
feat_seq_len = len(state.encoded_features)
src_ids = torch.LongTensor(state.encoded_code).unsqueeze(0).repeat(batch_size, 1)
src_mask = src_ids != padToken
src_pos = torch.arange(seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1)
feat_ids = torch.LongTensor(state.encoded_features).unsqueeze(0).repeat(batch_size, 1)
feat_mask = feat_ids != feat_padToken
feat_pos = torch.arange(feat_seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1)
return {
'encoder_feature_ids' : feat_ids,
'encoder_feature_mask' : feat_mask,
'encoder_position_ids' : feat_pos,
'decoder_input_ids' : src_ids,
'decoder_input_mask' : src_mask,
'decoder_position_ids' : src_pos,
}
def StateToTokenTensor(state : interactions.State,
mask_idx : int,
maskToken : int,
padToken : int,
feat_padToken : int,
batch_size : int,
replace_token : bool = False,
) -> typing.Dict[str, torch.Tensor]:
"""
Pre-process state to
"""
seq_len = len(state.encoded_code)
feat_seq_len = len(state.encoded_features)
if replace_token:
masked_code = state.encoded_code
masked_code[mask_idx] = maskToken
else:
masked_code = np.concatenate((state.encoded_code[:mask_idx+1], [maskToken], state.encoded_code[mask_idx+1:]))
masked_code = torch.LongTensor(masked_code[:seq_len]).unsqueeze(0).repeat(batch_size, 1)
enc_features = torch.LongTensor(state.encoded_features).unsqueeze(0).repeat(batch_size, 1)
return {
'encoder_feature_ids' : enc_features,
'encoder_feature_mask' : enc_features != feat_padToken,
'encoder_position_ids' : torch.arange(feat_seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1),
'decoder_input_ids' : masked_code,
'decoder_input_mask' : masked_code != padToken,
'decoder_position_ids' : torch.arange(seq_len, dtype = torch.int64).unsqueeze(0).repeat(batch_size, 1),
}
class CorpusFeatureLoader(torch.utils.data.Dataset):
"""
Dataloading from language model's training corpus.
"""
def __init__(self,
config: reinforcement_learning_pb2.RLModel,
corpus: corpuses.Corpus,
feature_tokenizer: tokenizers.FeatureTokenizer
):
self.config = config
self.data = corpus.GetTrainingFeatures()
self.feature_tokenizer = feature_tokenizer
self.setup_dataset()
return
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
return
def setup_dataset(self) -> typing.List[typing.Dict[str, torch.Tensor]]:
"""Process raw feature vectors to processed dataset."""
self.dataset = []
for dp in self.data:
for k, v in dp.items():
if v:
fvec = self.feature_tokenizer.TokenizeFeatureVector(v, k, self.config.agent.action_qv.feature_sequence_length)
self.dataset.append(
{
'input_features': torch.LongTensor(fvec),
'input_features_key_padding_mask': torch.LongTensor(fvec != self.feature_tokenizer.padToken),
}
)
return
class RandomFeatureLoader(torch.utils.data.Dataset):
"""
Torch-based dataloading class for target feature vectors.
"""
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
feature_tokenizer : tokenizers.FeatureTokenizer,
):
self.config = config
self.feature_tokenizer = feature_tokenizer
return
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
return
| 5,794 | 36.62987 | 120 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/model.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modeling for reinforcement learning program synthesis.
"""
import pathlib
import typing
import math
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import data_generator
from deeplearning.benchpress.reinforcement_learning import config
from deeplearning.benchpress.models.torch_bert import model
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
torch = pytorch.torch
class PredictionHeadTransform(torch.nn.Module):
def __init__(self,
config : config.QValuesConfig,
dense_size : int
):
super().__init__()
self.dense = torch.nn.Linear(dense_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = model.ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ActionHead(torch.nn.Module):
"""Classification head for action prediction."""
def __init__(self, config, output_dim: int = None):
super().__init__()
if output_dim is None:
output_dim = len(interactions.ACTION_TYPE_SPACE) * config.max_position_embeddings
self.transform = PredictionHeadTransform(config, dense_size = config.hidden_size)
self.decoder = torch.nn.Linear(config.hidden_size * config.max_position_embeddings, output_dim, bias = False)
self.bias = torch.nn.Parameter(torch.zeros(output_dim))
self.decoder.bias = self.bias
return
def forward(self, decoder_out: torch.FloatTensor) -> torch.FloatTensor:
transformed = self.transform(decoder_out)
flat = transformed.reshape((transformed.shape[0], -1))
action_logits = self.decoder(flat)
return action_logits
class TokenHead(torch.nn.Module):
"""Classification head for token prediction."""
def __init__(self, config, output_dim: int):
super().__init__()
self.transform = PredictionHeadTransform(config, dense_size = config.hidden_size)
self.decoder = torch.nn.Linear(config.hidden_size, output_dim, bias = False)
self.bias = torch.nn.Parameter(torch.zeros(output_dim))
self.decoder.bias = self.bias
return
def forward(self, decoder_out: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.transform(decoder_out)
token_logits = self.decoder(hidden_states)
return token_logits
class ActionQV(torch.nn.Module):
"""Deep Q-Values for Action type prediction."""
def __init__(self,
language_model : language_models.Model,
config : config.QValuesConfig,
is_critic : bool = False
):
super().__init__()
## Pre-trained Encoder LM.
self.feature_encoder = language_model.backend.GetEncoderModule(
vocab_size = config.feature_vocab_size,
hidden_size = config.hidden_size,
num_hidden_layers = config.num_hidden_layers,
num_attention_heads = config.num_attention_heads,
intermediate_size = config.intermediate_size,
hidden_act = config.hidden_act,
hidden_dropout_prob = config.hidden_dropout_prob,
attention_probs_dropout_prob = config.attention_probs_dropout_prob,
max_position_embeddings = config.feature_sequence_length,
type_vocab_size = config.type_vocab_size,
initializer_range = config.initializer_range,
layer_norm_eps = config.layer_norm_eps,
pad_token_id = config.feature_pad_idx,
with_checkpoint = False,
)
## Decoder for token prediction, given features and source code encoded memory.
self.source_decoder = language_model.backend.GetDecoderModule(
with_checkpoint = True,
without_label_head = True,
)
output_dim = None
if is_critic:
output_dim = 1
self.action_head = ActionHead(config, output_dim = output_dim)
self.softmax = torch.nn.Softmax(dim = -1)
return
def forward(self,
encoder_feature_ids : torch.LongTensor,
encoder_feature_mask : torch.LongTensor,
encoder_position_ids : torch.LongTensor,
decoder_input_ids : torch.LongTensor,
decoder_input_mask : torch.LongTensor,
decoder_position_ids : torch.LongTensor,
# actor_action_logits : torch.LongTensor = None,
) -> typing.Dict[str, torch.Tensor]:
"""Action type forward function."""
## Run BERT-Encoder in target feature vector.
encoder_out = self.feature_encoder(
input_ids = encoder_feature_ids,
input_mask = encoder_feature_mask,
position_ids = encoder_position_ids,
input_features = None,
)
encoder_memory = encoder_out['hidden_states']
## Run source code over pre-trained BERT decoder.
decoder_out = self.source_decoder(
input_ids = decoder_input_ids,
input_mask = decoder_input_mask,
position_ids = decoder_position_ids,
encoder_hidden_states = encoder_memory,
input_features = None,
)
decoded_source = decoder_out['hidden_states']
## Predict action type logits.
action_logits = self.action_head(decoded_source)
action_probs = self.softmax(action_logits)
return {
'action_logits' : action_logits,
'action_probs' : action_probs,
}
class ActionLanguageModelQV(torch.nn.Module):
"""Deep Q-Values for Token type prediction."""
def __init__(self,
language_model : language_models.Model,
config : config.QValuesConfig,
is_critic : bool = False,
):
super(ActionLanguageModelQV, self).__init__()
## Feature-Encoder.
self.encoder = language_model.backend.GetEncoderModule(
vocab_size = config.feature_vocab_size,
hidden_size = config.hidden_size,
num_hidden_layers = config.num_hidden_layers,
num_attention_heads = config.num_attention_heads,
intermediate_size = config.intermediate_size,
hidden_act = config.hidden_act,
hidden_dropout_prob = config.hidden_dropout_prob,
attention_probs_dropout_prob = config.attention_probs_dropout_prob,
max_position_embeddings = config.feature_sequence_length,
type_vocab_size = config.type_vocab_size,
initializer_range = config.initializer_range,
layer_norm_eps = config.layer_norm_eps,
pad_token_id = config.feature_pad_idx,
with_checkpoint = False,
)
## Decoder for token prediction, given features memory and source code.
if is_critic:
output_dim = 1
self.language_model = language_model.backend.GetDecoderModule(
with_checkpoint = True,
without_label_head = True,
)
self.decoder = TokenHead(config, output_dim)
else:
output_dim = config.vocab_size
self.language_model = language_model.backend.GetDecoderModule(
with_checkpoint = True,
)
self.softmax = torch.nn.Softmax(dim = -1)
self.is_critic = is_critic
return
def forward(self,
encoder_feature_ids : torch.LongTensor,
encoder_feature_mask : torch.LongTensor,
encoder_position_ids : torch.LongTensor,
decoder_input_ids : torch.LongTensor,
decoder_input_mask : torch.LongTensor,
decoder_position_ids : torch.LongTensor,
encoder_input_features = None,
):
encoder_out = self.encoder(
input_ids = encoder_feature_ids,
input_mask = encoder_feature_mask,
position_ids = encoder_position_ids,
input_features = encoder_input_features,
)
encoder_memory = encoder_out['hidden_states']
decoder_out = self.language_model(
input_ids = decoder_input_ids,
input_mask = decoder_input_mask,
position_ids = decoder_position_ids,
encoder_hidden_states = encoder_memory,
)
if self.is_critic:
decoded_source = decoder_out['hidden_states']
token_logits = self.decoder(decoded_source)
else:
token_logits = decoder_out['prediction_logits']
token_probs = self.softmax(token_logits)
return {
'token_logits' : token_logits,
'token_probs' : token_probs,
}
class QValuesModel(object):
"""
Handler of Deep-QNMs for program synthesis.
"""
@property
def action_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.action, torch.nn.DataParallel):
module = self.model.action.module
else:
module = self.model.action
return (
[x for x in module.feature_encoder.parameters()] +
[x for x in module.source_decoder.parameters()] +
[x for x in module.action_head.parameters()]
)
else:
return None
@property
def index_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.action, torch.nn.DataParallel):
module = self.model.action.module
else:
module = self.model.action
return (
[x for x in module.feature_encoder.parameters()] +
[x for x in module.source_decoder.parameters()] +
[x for x in module.index_head.parameters()]
)
else:
return None
@property
def token_parameters(self) -> torch.Tensor:
"""
Return all gradient parameters for model involved in action decision.
"""
if self.model:
if isinstance(self.model.token, torch.nn.DataParallel):
module = self.model.token.module
else:
module = self.model.token
return (
[x for x in module.encoder.parameters()] +
[x for x in module.language_model.parameters()]
)
else:
return None
| 11,311 | 38.141869 | 115 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/agent.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agents module for reinforcement learning.
"""
from cmath import inf
from code import interact
import pathlib
import typing
import tqdm
import numpy as np
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import model
from deeplearning.benchpress.reinforcement_learning import env
from deeplearning.benchpress.reinforcement_learning import hooks
from deeplearning.benchpress.reinforcement_learning.config import QValuesConfig
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
torch = pytorch.torch
class Policy(object):
"""
The policy selected over Q-Values
"""
def __init__(self, action_temp: float, token_temp: float):
self.action_temperature = action_temp
self.token_temperature = token_temp
return
def SampleActions(self,
action_logits : torch.FloatTensor,
actual_lengths : typing.Tuple[torch.LongTensor, torch.LongTensor],
) -> typing.Tuple[int, int]:
"""
Get the Q-Values for action and apply policy on it.
"""
actions = torch.zeros((action_logits.shape[0]), dtype = torch.long)
batch_idxs, seq_idxs = actual_lengths
for bidx, sidx, seq_logits in zip(batch_idxs, seq_idxs, action_logits):
try:
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.action_temperature if self.action_temperature is not None else 1.0,
logits = seq_logits[:(sidx * len(interactions.ACTION_TYPE_SPACE))],
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
action = torch.argmax(ct, dim = -1)
actions[bidx] = action
except Exception as e:
l.logger().error(seq_logits[:(sidx * len(interactions.ACTION_TYPE_SPACE))])
raise e
return actions
def SampleTokens(self, token_logits: torch.FloatTensor) -> int:
"""
Get logit predictions for token and apply policy on it.
"""
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.token_temperature if self.token_temperature is not None else 1.0,
logits = token_logits,
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
tokens = torch.argmax(ct, dim = -1)
return tokens
class Agent(object):
"""
Benchmark generation RL-Agent.
"""
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
language_model : language_models.Model,
tokenizer : tokenizers.TokenizerBase,
feature_tokenizer : tokenizers.FeatureTokenizer,
cache_path : pathlib.Path
):
self.cache_path = cache_path / "agent"
self.ckpt_path = self.cache_path / "checkpoint"
self.log_path = self.cache_path / "logs"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.log_path.mkdir(exist_ok = True, parents = True)
self.config = config
self.language_model = language_model
self.tokenizer = tokenizer
self.feature_tokenizer = feature_tokenizer
self.qv_config = QValuesConfig.from_config(
self.config,
self.language_model.backend.config.architecture.max_position_embeddings,
self.tokenizer,
self.feature_tokenizer,
self.language_model,
)
self.policy = Policy(
action_temp = self.qv_config.action_temperature,
token_temp = self.qv_config.token_temperature,
)
return
def _ConfigModelParams(self, learning_rate: float) -> None:
"""
Initialize torch models and send them to device.
"""
self.action_actor = model.ActionQV(self.language_model, self.qv_config).to(pytorch.device)
self.action_critic = model.ActionQV(self.language_model, self.qv_config, is_critic = True).to(pytorch.device)
self.token_actor = model.ActionLanguageModelQV(self.language_model, self.qv_config).to(pytorch.device)
self.token_critic = model.ActionLanguageModelQV(self.language_model, self.qv_config, is_critic = True).to(pytorch.device)
if pytorch.num_nodes > 1:
self.action_actor = torch.nn.DistributedDataParallel(
self.action_actor,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.action_critic = torch.nn.DistributedDataParallel(
self.action_critic,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.token_actor = torch.nn.DistributedDataParallel(
self.token_actor,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
self.token_critic = torch.nn.DistributedDataParallel(
self.token_critic,
device_ids = [pytorch.offset_device],
output_device = pytorch.offset_device,
find_unused_parameters = True,
)
elif pytorch.num_gpus > 1:
self.action_actor = torch.nn.DataParallel(self.action_actor)
self.action_critic = torch.nn.DataParallel(self.action_critic)
self.token_actor = torch.nn.DataParallel(self.token_actor)
self.token_critic = torch.nn.DataParallel(self.token_critic)
self.action_optim = torch.optim.Adam(
list(self.action_actor.parameters()) + list(self.action_critic.parameters()),
lr = learning_rate
)
self.token_optim = torch.optim.Adam(
list(self.token_actor.parameters()) + list(self.token_critic.parameters()),
lr = learning_rate
)
return
def Train(self,
env : env.Environment,
num_epochs : int,
num_episodes : int, # Equivalent to batch size
steps_per_episode : int, # Depth length of single trajectory.
num_updates : int,
gamma : float,
lr : float,
lam : float,
epsilon : float,
value_loss_coeff : float,
entropy_coeff : float,
) -> None:
"""
Run PPO over policy and train the agent.
"""
self._ConfigModelParams(learning_rate = lr)
self.ckpt_step = max(0, self.loadCheckpoint())
########### DOES LM WORK ALONE ?
code = "[START][HOLE]kernel[END]"
encoded = list(self.tokenizer.TokenizeString(code))
encoded = encoded + [self.tokenizer.padToken] * (self.language_model.backend.config.architecture.max_position_embeddings - len(encoded))
inputs = {
'input_ids' : torch.LongTensor(encoded).unsqueeze(0).to(pytorch.device),
'input_mask' : (torch.LongTensor(encoded) != self.tokenizer.padToken).unsqueeze(0).to(pytorch.device),
'position_ids' : torch.arange(self.language_model.backend.config.architecture.max_position_embeddings).unsqueeze(0).to(pytorch.device),
'mask_labels' : None,
'input_features': None,
}
out = self.language_model.backend.model_step(
self.language_model.backend.GetEncoderModule(with_checkpoint = True, without_label_head = False).to(pytorch.device),
inputs,
)
preds = torch.argmax(out['prediction_logits'], dim = -1)
l.logger().info(self.tokenizer.tokensToString([int(x) for x in preds.squeeze(0)[:10].cpu()]))
########### DOES LM WORK ALONE ?
if self.is_world_process_zero():
rollout_hook = hooks.tensorMonitorHook(
self.log_path,
self.ckpt_step,
1, 1,
average = False,
)
# train_hook = hooks.tensorMonitorHook(
# self.logfile_path,
# self.current_step,
# min(self.steps_per_epoch, FLAGS.monitor_frequency)
# )
action_type_distrib = {
k: ([], []) for k in interactions.ACTION_TYPE_SPACE.keys()
}
index_type_distrib = {
k: ([], []) for k in range(self.qv_config.max_position_embeddings)
}
for ep in range(num_epochs):
# Run a batch of episodes.
input_ids, final_state, masked_input_ids, feature_ids,\
action_values, action_predictions, action_policy_probs,\
token_values, token_predictions, token_policy_probs,\
use_lm, rewards, discounted_rewards, done = self.rollout(
env, num_episodes, steps_per_episode, gamma,
)
action_advantages, token_advantages = self.gae(
rewards,
action_values,
token_values,
use_lm,
done,
gamma,
lam
)
# Compute reward-to-gos.
action_reward_to_go = action_advantages + action_values.squeeze(-1)
token_reward_to_go = token_advantages + token_values.squeeze(-1)
# Nornmalize advantages.
action_advantages = (action_advantages - action_advantages.mean()) / (action_advantages.std() + 1e-5)
token_advantages = (token_advantages - token_advantages.mean()) / (token_advantages.std() + 1e-5)
# Set the batch size.
batch_size = int(input_ids.shape[0])
num_batches = int(input_ids.shape[1])
# Reshape to 2 dimensions.
action_advantages = torch.reshape(action_advantages, (-1, ) + action_advantages.shape[2:])
token_advantages = torch.reshape(token_advantages, (-1, ) + token_advantages.shape[2:])
action_reward_to_go = torch.reshape(action_reward_to_go, (-1, ) + action_reward_to_go.shape[2:])
token_reward_to_go = torch.reshape(token_reward_to_go, (-1, ) + token_reward_to_go.shape[2:])
action_values = torch.reshape(action_values, (-1, ) + action_values.shape[2:])
token_values = torch.reshape(token_values, (-1, ) + token_values.shape[2:])
action_predictions = torch.reshape(action_predictions, (-1, ) + action_predictions.shape[2:])
token_predictions = torch.reshape(token_predictions, (-1, ) + token_predictions.shape[2:])
use_lm = torch.reshape(use_lm, (-1, ) + use_lm.shape[2:])
input_ids = torch.reshape(input_ids, (-1, ) + input_ids.shape[2:])
masked_input_ids = torch.reshape(masked_input_ids, (-1, ) + masked_input_ids.shape[2:])
feature_ids = torch.reshape(feature_ids, (-1, ) + feature_ids.shape[2:])
action_policy_probs = torch.reshape(action_policy_probs, (-1, ) + action_policy_probs.shape[2:])
token_policy_probs = torch.reshape(token_policy_probs, (-1, ) + token_policy_probs.shape[2:])
if environment.WORLD_SIZE > 1:
raise NotImplementedError("Gather all the tensors here ?")
for k in action_type_distrib.keys():
action_type_distrib[k][0].append(ep)
action_type_distrib[k][1].append(0)
for k in index_type_distrib.keys():
index_type_distrib[k][0].append(ep)
index_type_distrib[k][1].append(0)
for act in action_predictions:
act_type = int(act) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(act) // len(interactions.ACTION_TYPE_SPACE)
try:
action_type_distrib[interactions.ACTION_TYPE_MAP[act_type]][1][ep] += 1
index_type_distrib[act_index][1][ep] += 1
except IndexError as e:
l.logger().error(act_type)
l.logger().error(act_index)
l.logger().info(act)
l.logger().warn(action_type_distrib)
l.logger().info(index_type_distrib)
raise e
from deeplearning.benchpress.util import plotter as plt
plt.GrouppedBars(
groups = action_type_distrib,
plot_name = "Acts_per_rollout_step",
path = self.log_path,
)
plt.GrouppedBars(
groups = index_type_distrib,
plot_name = "pos_index_per_rollout_step",
path = self.log_path,
)
## Print the full trajectory with the best reward.
best_full_traj = torch.argmax(discounted_rewards[:,-1], dim = -1)
l.logger().info("Best full-trajectory sample:")
print(self.tokenizer.tokensToString([int(x) for x in final_state[int(best_full_traj)]], ignore_token=self.tokenizer.padToken))
# Split the data into batches in the num_workers dimension
for epoch in tqdm.tqdm(range(num_updates), total = num_updates, desc = "Epoch"):
for batch in tqdm.tqdm(range(num_batches), total = num_batches, desc = "Batch", leave = False):
start = batch * batch_size
end = (batch + 1) * batch_size
# Step batch
mean_action_loss, mean_token_loss = self.ppo_train_step(
epsilon,
value_loss_coeff,
entropy_coeff,
action_advantages [start:end].to(pytorch.device),
token_advantages [start:end].to(pytorch.device),
action_reward_to_go [start:end].to(pytorch.device),
token_reward_to_go [start:end].to(pytorch.device),
action_values [start:end].to(pytorch.device),
token_values [start:end].to(pytorch.device),
action_predictions [start:end],
token_predictions [start:end],
use_lm [start:end],
input_ids [start:end],
masked_input_ids [start:end],
feature_ids [start:end],
action_policy_probs [start:end].to(pytorch.device),
token_policy_probs [start:end].to(pytorch.device),
)
# Probably here save the necessary checkpoints.
# Also log the following stuff:
# Rewards, advantages (?), size of code ?, rtg ? Distribution of actions selected ?
# self.saveCheckpoint()
if self.is_world_process_zero():
rollout_hook.step(
mean_action_loss = float(mean_action_loss),
mean_token_loss = float(mean_token_loss),
mean_final_reward = float(torch.mean(discounted_rewards[:,-1])),
)
self.ckpt_step += 1
## distribution of actions per
return
def ppo_train_step(self,
epsilon : float,
value_loss_coeff : float,
entropy_coeff : float,
action_advantages : torch.FloatTensor,
token_advantages : torch.FloatTensor,
action_reward_to_go : torch.FloatTensor,
token_reward_to_go : torch.FloatTensor,
action_values : torch.FloatTensor,
token_values : torch.FloatTensor,
action_predictions : torch.LongTensor,
token_predictions : torch.LongTensor,
use_lm : torch.BoolTensor,
input_ids : torch.LongTensor,
masked_input_ids : torch.LongTensor,
feature_ids : torch.LongTensor,
action_policy_probs : torch.FloatTensor,
token_policy_probs : torch.FloatTensor,
) -> typing.Tuple[float, float]:
"""
Run a batch through PPO training.
Inputs:
action_optim:
Adam optimizer that handles action actor and critic.
token_optim:
Adam optimizer that handles token actor and critic.
action_advantages:
Calculated advantages for action model.
token_advantages:
Calculated advantages for token model.
action_reward_to_go:
Aggregated rewards for actions trajectory.
token_reward_to_go:
Aggregated rewards for tokens trajectory.
action_values:
Predicted values by action critic.
token_values:
Predicted values by token critic.
action_predictions:
Predicted action labels by action actor.
token_predictions:
Predicted token labels by token actor.
use_lm:
Indices of states that used the language model.
input_ids:
Input code for the action model.
masked_input_ids:
Masked input code for the token model. Contains masked code where use_lm==True, zeros otherwise.
feature_ids:
Tokenized vector of target state features.
action_policy_probs:
Predicted action label's probability.
token_policy_probs:
Predicted token label's probability.
"""
# Enable training mode for these little fuckers.
self.action_actor.train()
self.action_critic.train()
self.token_actor.train()
self.token_critic.train()
self.action_optim.zero_grad()
self.token_optim.zero_grad()
seq_len, feat_seq_len, batch_size = input_ids.shape[-1], feature_ids.shape[-1], input_ids.shape[0]
mean_action_loss, action_backwards = 0.0, 0
mean_token_loss, token_backwards = 0.0, 0
# Prepare model inputs.
feature_mask = feature_ids != self.feature_tokenizer.padToken
feature_pos = torch.arange(feat_seq_len, dtype = torch.long).repeat(batch_size, 1)
input_mask = feature_ids != self.feature_tokenizer.padToken
input_pos = torch.arange(seq_len, dtype = torch.long).repeat(batch_size, 1)
# Run the batch again in actor/critic.
# Actor model returns logits of action.
action_actor_out = self.action_actor(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
new_action_logits, new_action_probs = action_actor_out['action_logits'], action_actor_out['action_probs']
# Critic model returns value logit.
action_critic_out = self.action_critic(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
new_action_values, new_action_values_probs = action_critic_out['action_logits'], action_critic_out['action_probs']
# Sample the most likely action.
actual_lengths = torch.where(input_ids == self.tokenizer.endToken)
step_actions = self.policy.SampleActions(new_action_logits, actual_lengths)
# Collect the probability of said selected action, per episode.
new_action_probs = new_action_probs[(torch.arange(new_action_probs.shape[0]), step_actions)]
# Compute entropy of actions
new_action_entropy = torch.distributions.categorical.Categorical(logits = new_action_logits).entropy()
# Flatten the critic values.
new_action_values = new_action_values.flatten()
# Compute the PPO loss
action_prob_ratio = torch.exp(new_action_probs) / torch.exp(action_policy_probs)
a = action_prob_ratio * action_advantages
b = torch.clamp(action_prob_ratio, 1 - epsilon, 1 + epsilon) * action_advantages
action_ppo_loss = -1 * torch.mean(torch.min(a, b))
# Compute the value function loss
# Clipped loss - same idea as PPO loss, don't allow value to move too
# far from where it was previously
value_pred_clipped = action_values + (new_action_values - action_values).clamp(-epsilon, epsilon)
value_losses = (new_action_values - action_reward_to_go) ** 2
value_losses_clipped = (value_pred_clipped - action_reward_to_go) ** 2
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
action_value_loss = value_loss.mean()
action_entropy_loss = torch.mean(new_action_entropy)
# Compute the final loss and backward.
action_loss = action_ppo_loss + value_loss_coeff * action_value_loss - entropy_coeff * action_entropy_loss
action_loss.backward()
mean_action_loss += action_loss.item()
action_backwards += 1
torch.nn.utils.clip_grad_norm_(self.action_actor.parameters(), .5)
torch.nn.utils.clip_grad_norm_(self.action_critic.parameters(), .5)
self.action_optim.step()
if torch.any(use_lm):
# Get the indices where use_lm is True.
lm_indices = torch.where(use_lm == True)[0]
# Prepare token model inputs.
lm_feature_ids = torch.index_select(feature_ids, 0, lm_indices)
lm_feature_mask = lm_feature_ids != self.feature_tokenizer.padToken
lm_feat_pos_id = torch.arange(feat_seq_len, dtype = torch.long).repeat(lm_feature_ids.shape[0], 1)
lm_input_ids = torch.index_select(masked_input_ids, 0, lm_indices)
lm_input_mask = lm_input_ids != self.tokenizer.padToken
lm_pos_id = torch.arange(seq_len, dtype = torch.long).repeat(lm_input_ids.shape[0], 1)
# Keep track of where [HOLE] reside.
ep_idx, seq_idx = torch.where(lm_input_ids == self.tokenizer.holeToken)
# Run the batch in actor/critic.
# The input indices are based on those the rollout action actor decided to use the LM.
# We directly use masked_input_ids for this reason.
# Actor model returns logits of the token predictions.
token_actor_out = self.token_actor(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_id.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_pos_id.to(pytorch.device),
)
t, new_token_probs = token_actor_out['token_logits'], token_actor_out['token_probs']
# Collect the logits but only for the hole indices.
new_token_logits = t[(ep_idx, seq_idx)]
new_token_probs = new_token_probs[(ep_idx, seq_idx)]
# Critic model returns value logit.
token_critic_out = self.token_critic(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_id.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_pos_id.to(pytorch.device),
)
new_token_values, new_token_values_probs = token_critic_out['token_logits'], token_critic_out['token_probs']
# Collect the critic's value for this hole index.
new_token_values = new_token_values[(ep_idx, seq_idx)]
new_token_values_probs = new_token_values_probs[(ep_idx, seq_idx)]
# According to policy, select the best token.
new_tokens = self.policy.SampleTokens(new_token_logits)
# Get probability of said token, per sequence.
new_token_probs = new_token_probs[(torch.arange(new_token_probs.shape[0]), new_tokens)]
# Calculate the entropy of new token logits.
new_token_entropy = torch.distributions.categorical.Categorical(logits = new_token_logits).entropy()
# Flatten critic values.
new_token_values = new_token_values.flatten()
# Keep only the advantages and policy probs for the indices where the LM was used.
lm_indices = lm_indices.to(pytorch.device)
token_advantages = torch.index_select(token_advantages, 0, lm_indices)
token_reward_to_go = torch.index_select(token_reward_to_go, 0, lm_indices)
token_policy_probs = torch.index_select(token_policy_probs, 0, lm_indices)
token_values = torch.index_select(token_values, 0, lm_indices)
# Compute the PPO loss
token_prob_ratio = torch.exp(new_token_probs) / torch.exp(token_policy_probs.to(pytorch.device))
a = token_prob_ratio * token_advantages.to(pytorch.device)
b = torch.clamp(token_prob_ratio, 1 - epsilon, 1 + epsilon) * token_advantages
token_ppo_loss = -1 * torch.mean(torch.min(a, b))
# Compute the value function loss
# Clipped loss - same idea as PPO loss, don't allow value to move too
# far from where it was previously
value_pred_clipped = token_values + (new_token_values - token_values).clamp(-epsilon, epsilon)
value_losses = (new_token_values - token_reward_to_go) ** 2
value_losses_clipped = (value_pred_clipped - token_reward_to_go) ** 2
value_loss = 0.5 * torch.max(value_losses, value_losses_clipped)
token_value_loss = value_loss.mean()
token_entropy_loss = torch.mean(new_token_entropy)
# Compute the final loss and backward.
token_loss = token_ppo_loss + value_loss_coeff * token_value_loss - entropy_coeff * token_entropy_loss
token_loss.backward()
mean_token_loss += token_loss.item()
token_backwards += 1
torch.nn.utils.clip_grad_norm_(self.token_actor.parameters(), .5)
torch.nn.utils.clip_grad_norm_(self.token_critic.parameters(), .5)
self.token_optim.step()
try:
mean_action_loss = mean_action_loss / action_backwards
except ZeroDivisionError:
mean_action_loss = 0.0
try:
mean_token_loss = mean_token_loss / token_backwards
except ZeroDivisionError:
mean_token_loss = 0.0
return mean_action_loss, mean_token_loss
def rollout(self,
env : env.Environment,
num_episodes : int,
steps_per_episode : int,
gamma : float,
) -> typing.Tuple[torch.Tensor]:
"""
1. Initialize all tensors [(num_episodes x batch_size?) x steps_per_episode x state_tensor_size]
2. for step in steps_per_episode:
a) slice state tensor
b) slice action tensor
c) Pass through model
d) env.step and assign new state to state tensor.
e) Compute rewards and rtgs.
"""
## Reset the environment.
state = env.reset()
self.action_actor.eval()
self.action_critic.eval()
self.token_actor.eval()
self.token_critic.eval()
seq_len, feat_seq_len = len(state.encoded_code), len(state.encoded_features)
## Create state and action tensors.
# State workload inputs.
batch_feature_ids = torch.LongTensor(state.encoded_features).unsqueeze(0).unsqueeze(0).repeat(num_episodes, steps_per_episode, 1) # Input features for workload
batch_input_ids = torch.zeros((num_episodes, steps_per_episode, seq_len), dtype = torch.long) # Input code for workload
batch_input_ids[:, 0] = torch.LongTensor(state.encoded_code) # Initialization of empty code for all episode's starting point of trajectory.
batch_masked_input_ids = torch.zeros((num_episodes, steps_per_episode, seq_len), dtype = torch.long) # Initialization of masked input ids tensor for token model.
final_state = torch.zeros((num_episodes, seq_len), dtype = torch.long) # The final state of all trajectories.
# Action, token predictions and probs, critic values.
action_predictions = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.long) # All action predictions per episode, per state.
action_policy_probs = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Probs of all actions predicted.
action_values = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Values from critic for actions.
token_predictions = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.long) # All token predictions per episode, per state.
token_policy_probs = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Probs of all tokens predicted.
token_values = torch.zeros((num_episodes, steps_per_episode, 1), dtype = torch.float32) # Values from critic for tokens.
use_lm = torch.zeros((num_episodes, steps_per_episode), dtype = torch.bool) # Indices where LM was indeed used (action was 'add' or 'replace')
## Reward placeholders.
rewards = torch.zeros((num_episodes, steps_per_episode), dtype = torch.float32) # Rewards per episode, per action.
discounted_rewards = torch.zeros((num_episodes, steps_per_episode), dtype = torch.float32) # The aggregated-discounted rewards as the trajectories proceed.
traj_disc_rewards = torch.zeros((num_episodes), dtype = torch.float32) # The latest aggregated discounted reward computed.
feature_dists = torch.full((num_episodes,), -1, dtype = torch.float32) # A tensor with the last updated euclidean distance from feature target.
done = torch.zeros((num_episodes, steps_per_episode), dtype = torch.bool) # Done boolean tensor.
## Run execution loop.
for step in tqdm.tqdm(range(steps_per_episode), total = steps_per_episode, desc = "Rollout {} episodes".format(num_episodes)):
## This loop unfolds all batch_size trajectories.
# Input tensors
feature_ids = batch_feature_ids[:, step]
feature_mask = feature_ids != self.feature_tokenizer.padToken
feature_pos = torch.arange(feat_seq_len, dtype = torch.long).repeat(feature_ids.shape[0], 1)
input_ids = batch_input_ids[:, step]
input_mask = input_ids != self.tokenizer.padToken
input_pos = torch.arange(seq_len, dtype = torch.long).repeat(input_ids.shape[0], 1)
# Actor model returns logits of action.
step_action_actor_out = self.action_actor(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
step_action_logits, step_action_probs = step_action_actor_out['action_logits'], step_action_actor_out['action_probs']
# Critic model returns value logit.
step_action_critic_out = self.action_critic(
encoder_feature_ids = feature_ids.to(pytorch.device),
encoder_feature_mask = feature_mask.to(pytorch.device),
encoder_position_ids = feature_pos.to(pytorch.device),
decoder_input_ids = input_ids.to(pytorch.device),
decoder_input_mask = input_mask.to(pytorch.device),
decoder_position_ids = input_pos.to(pytorch.device),
)
step_action_values, step_action_values_probs = step_action_critic_out['action_logits'], step_action_critic_out['action_probs']
# Sample the most likely action.
actual_lengths = torch.where(input_ids == self.tokenizer.endToken)
step_actions = self.policy.SampleActions(step_action_logits, actual_lengths)
# Collect the probability of said selected action, per episode.
step_action_probs = step_action_probs[(torch.arange(step_action_probs.shape[0]), step_actions)]
# Declare here the augmented token vectors.
augmented_step_token_values = torch.zeros((num_episodes, 1), dtype = torch.float32)
augmented_step_tokens = torch.zeros((num_episodes, 1), dtype = torch.long)
augmented_step_token_probs = torch.zeros((num_episodes, 1), dtype = torch.float32)
## Find which sequences need to sample a token.
step_use_lm, masked_input_ids = env.intermediate_step(input_ids, step_actions)
if torch.any(step_use_lm):
## If the language model needs to be invoked ('add' or 'replace')
## Fix the necessary batch of elements here.
# Indices of starting tensors that need the LM.
lm_indices = torch.where(step_use_lm == True)[0]
# Input tensors.
lm_feature_ids = torch.index_select(feature_ids, 0, lm_indices)
lm_feature_mask = lm_feature_ids != self.feature_tokenizer.padToken
lm_feat_pos_ids = torch.arange(feat_seq_len, dtype = torch.long).repeat(lm_feature_ids.shape[0], 1)
lm_input_ids = torch.index_select(masked_input_ids, 0, lm_indices)
lm_input_mask = lm_input_ids != self.tokenizer.padToken
lm_input_pos_ids = torch.arange(seq_len, dtype = torch.long).repeat(lm_input_ids.shape[0], 1)
# Keep the hole indices to dereference the prediction logits.
ep_idx, seq_idx = torch.where(lm_input_ids == self.tokenizer.holeToken)
# Run the token actor, get token logits.
step_token_actor_out = self.token_actor(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_ids.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_input_pos_ids.to(pytorch.device),
)
step_token_logits, step_token_probs = step_token_actor_out['token_logits'], step_token_actor_out['token_probs']
# Keep the prediction scores only for the masked token.
step_token_logits = step_token_logits[(ep_idx, seq_idx)]
step_token_probs = step_token_probs[(ep_idx, seq_idx)]
# Collect value logit from critic.
step_token_critic_out = self.token_critic(
encoder_feature_ids = lm_feature_ids.to(pytorch.device),
encoder_feature_mask = lm_feature_mask.to(pytorch.device),
encoder_position_ids = lm_feat_pos_ids.to(pytorch.device),
decoder_input_ids = lm_input_ids.to(pytorch.device),
decoder_input_mask = lm_input_mask.to(pytorch.device),
decoder_position_ids = lm_input_pos_ids.to(pytorch.device),
)
step_token_values, step_token_values_probs = step_token_critic_out['token_logits'], step_token_critic_out['token_probs']
# Get the critic's value only for masked index.
step_token_values = step_token_values[(ep_idx, seq_idx)]
step_token_values_probs = step_token_values_probs[(ep_idx, seq_idx)]
# According to policy, select the best token.
step_tokens = self.policy.SampleTokens(step_token_logits)
for inp in lm_input_ids:
l.logger().info(self.tokenizer.tokensToString([int(x) for x in inp], ignore_token = self.tokenizer.padToken))
for preds in step_tokens:
l.logger().info(self.tokenizer.tokensToString([int(preds)], ignore_token = self.tokenizer.padToken))
input()
# Get probability of said token, per episode.
step_token_probs = step_token_probs[(torch.arange(step_token_probs.shape[0]), step_tokens)]
# First extend to original dimensions.
# Store the modified - with token LM - codes to the original tensors.
for nidx, lm_idx in zip(range(step_tokens.shape[0]), lm_indices):
augmented_step_token_values[lm_idx] = step_token_values[nidx]
augmented_step_tokens[lm_idx] = step_tokens[nidx]
augmented_step_token_probs[lm_idx] = step_token_probs[nidx]
# Here is the appropriate storing back.
batch_masked_input_ids[:, step] = masked_input_ids
token_values [:, step] = augmented_step_token_values.detach().cpu()
token_predictions [:, step] = augmented_step_tokens.detach().cpu()
token_policy_probs [:, step] = augmented_step_token_probs.detach().cpu()
## Step environment and compute rewards.
input_ids, reward, discounted_reward, d, step_use_lm = env.new_step(
input_ids,
step_actions,
augmented_step_tokens,
traj_disc_rewards,
feature_dists,
step_use_lm,
gamma
)
## Save data to rollout buffers.
if step < steps_per_episode - 1:
batch_input_ids [:, step+1] = input_ids
else:
final_state = input_ids
action_values [:, step] = step_action_values.detach().cpu()
action_predictions [:, step] = step_actions.unsqueeze(0).reshape((-1, 1)).detach().cpu()
action_policy_probs[:, step] = step_action_probs.unsqueeze(0).reshape((-1, 1)).detach().cpu()
use_lm [:, step] = step_use_lm
rewards [:, step] = reward
traj_disc_rewards = discounted_reward
discounted_rewards [:, step] = traj_disc_rewards
done [:, step] = d
return (
batch_input_ids, # source code states.
final_state, # The state of the trajectory after the last applied action.
batch_masked_input_ids, # Masked source code for the language model.
batch_feature_ids, # Target feature vector state.
action_values, # Critic action logits.
action_predictions, # Actor sampled label actions.
action_policy_probs, # Actor probabilities of sampled actions.
token_values, # Critic token values.
token_predictions, # Actor sampled label tokens.
token_policy_probs, # Actor probabilities of sampled tokens.
use_lm, # Indices of actions that required language model.
rewards, # Rewards of each step.
discounted_rewards, # Discounted rewards of each step.
done, # Whether this step concludes the episode.
)
def gae(self, rewards, action_values, token_values, use_lm, episode_ends, gamma, lam):
"""
Compute generalized advantage estimate.
rewards: a list of rewards at each step.
values: the value estimate of the state at each step.
episode_ends: an array of the same shape as rewards, with a 1 if the
episode ended at that step and a 0 otherwise.
gamma: the discount factor.
lam: the GAE lambda parameter.
"""
# Invert episode_ends to have 0 if the episode ended and 1 otherwise
episode_ends = (episode_ends * -1) + 1
action_values = action_values.squeeze(-1)
token_values = token_values.squeeze(-1)
N = rewards.shape[0]
T = rewards.shape[1]
action_gae_step = torch.zeros((N, ))
token_gae_step = torch.zeros((N, ))
action_advantages = torch.zeros((N, T))
token_advantages = torch.zeros((N, T))
for t in reversed(range(T - 1)):
# First compute delta, which is the one-step TD error
action_delta = rewards[:, t] + gamma * action_values[:, t + 1] * episode_ends[:, t] - action_values[:, t]
token_delta = rewards[:, t] + gamma * token_values[:, t + 1] * episode_ends[:, t] - token_values[:, t]
# Then compute the current step's GAE by discounting the previous step
# of GAE, resetting it to zero if the episode ended, and adding this
# step's delta
# And store it
action_gae_step = action_delta + gamma * lam * episode_ends[:, t] * action_gae_step
token_gae_step = token_delta + gamma * lam * episode_ends[:, t] * token_gae_step
action_advantages[:, t] = action_delta + gamma * lam * episode_ends[:, t] * action_gae_step
token_advantages[:, t] = token_delta + gamma * lam * episode_ends[:, t] * token_gae_step
return action_advantages, token_advantages
def saveCheckpoint(self) -> None:
"""
Save agent state.
"""
if self.is_world_process_zero():
ckpt_comp = lambda prefix, x: self.ckpt_path / "{}{}_model-{}.pt".format(prefix, x, self.ckpt_step)
if pytorch.torch_tpu_available:
if pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
pytorch.torch_xla_model.save(self.action_actor, ckpt_comp("actor", "action"))
pytorch.torch_xla_model.save(self.action_critic, ckpt_comp("critic", "action"))
pytorch.torch_xla_model.save(self.action_optim, ckpt_comp("action", "optimizer"))
pytorch.torch_xla_model.save(self.token_optim, ckpt_comp("token", "optimizer"))
pytorch.torch_xla.rendezvous("saving_optimizer_states")
else:
if isinstance(self.action_actor, torch.nn.DataParallel):
torch.save(self.action_actor.module.state_dict(), ckpt_comp("actor", "action"))
else:
torch.save(self.action_actor.state_dict(), ckpt_comp("action", "action"))
if isinstance(self.action_critic, torch.nn.DataParallel):
torch.save(self.action_critic.module.state_dict(), ckpt_comp("critic", "action"))
else:
torch.save(self.action_critic.state_dict(), ckpt_comp("critic", "action"))
if isinstance(self.token_actor, torch.nn.DataParallel):
torch.save(self.token_actor.module.state_dict(), ckpt_comp("actor", "token"))
else:
torch.save(self.token_actor.state_dict(), ckpt_comp("action", "token"))
if isinstance(self.token_critic, torch.nn.DataParallel):
torch.save(self.token_critic.module.state_dict(), ckpt_comp("critic", "token"))
else:
torch.save(self.token_critic.state_dict(), ckpt_comp("critic", "token"))
torch.save(self.action_optim.state_dict(), ckpt_comp("action", "optimizer"))
torch.save(self.token_optim.state_dict(), ckpt_comp("token", "optimizer"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(self.ckpt_step))
self.ckpt_step += 1
distrib.barrier()
return
def loadCheckpoint(self) -> None:
"""
Load agent state.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'w') as mf:
get_step = lambda x: int(x.replace("\n", "").replace("train_step: ", ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines})
ckpt_step = max(entries)
ckpt_comp = lambda prefix, x: self.ckpt_path / "{}{}_model-{}.pt".format(prefix, x, ckpt_step)
if isinstance(self.action_actor, torch.nn.DataParallel):
try:
self.action_actor.module.load_state_dict(torch.load(ckpt_comp("actor", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "action")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.action_actor.module.load_state_dict(new_state_dict)
else:
try:
self.action_actor.module.load_state_dict(torch.load(ckpt_comp("actor", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "action")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.action_actor.load_state_dict(new_state_dict)
if isinstance(self.action_critic, torch.nn.DataParallel):
try:
self.action_critic.module.load_state_dict(torch.load(ckpt_comp("actor", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "critic")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.action_critic.module.load_state_dict(new_state_dict)
else:
try:
self.action_critic.module.load_state_dict(torch.load(ckpt_comp("actor", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("actor", "critic")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.action_critic.load_state_dict(new_state_dict)
if isinstance(self.token_actor, torch.nn.DataParallel):
try:
self.token_actor.module.load_state_dict(torch.load(ckpt_comp("token", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "action")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.token_actor.module.load_state_dict(new_state_dict)
else:
try:
self.token_actor.module.load_state_dict(torch.load(ckpt_comp("token", "action")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "action")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.token_actor.load_state_dict(new_state_dict)
if isinstance(self.token_critic, torch.nn.DataParallel):
try:
self.token_critic.module.load_state_dict(torch.load(ckpt_comp("token", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "critic")).items():
if k[:7] == "module.":
name = k[7:]
else:
name = "module." + k
new_state_dict[name] = k
self.token_critic.module.load_state_dict(new_state_dict)
else:
try:
self.token_critic.module.load_state_dict(torch.load(ckpt_comp("token", "critic")))
except RuntimeError:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in torch.load(ckpt_comp("token", "critic")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
self.token_critic.load_state_dict(new_state_dict)
if self.action_optim is not None and self.token_optim is not None and ckpt_step > 0:
self.action_optim.load_state_dict(
torch.load(ckpt_comp("action", "optimizer"), map_location = pytorch.device)
)
self.token_optim.load_state_dict(
torch.load(ckpt_comp("token", "optimizer"), map_location = pytorch.device)
)
self.action_actor.eval()
self.action_critic.eval()
self.token_actor.eval()
self.token_critic.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if pytorch.torch_tpu_available:
return pytorch.torch_xla_model.is_master_ordinal(local=False)
elif pytorch.num_nodes > 1:
return torch.distributed.get_rank() == 0
else:
return True
| 48,376 | 46.945491 | 184 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/reinforcement_learning/env.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RL Environment for the task of targeted benchmark generation.
"""
# import gym
import typing
import pathlib
import pickle
import numpy as np
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.reinforcement_learning import interactions
from deeplearning.benchpress.reinforcement_learning import memory
from deeplearning.benchpress.reinforcement_learning import data_generator
from deeplearning.benchpress.proto import reinforcement_learning_pb2
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
torch = pytorch.torch
from absl import flags
class Environment(object):
"""
Environment representation for RL Agents.
"""
metadata = {
'render_modes' : ['human'],
'render_fps' : 4,
}
@property
def init_code_state(self) -> np.array:
return np.array(
[self.tokenizer.startToken, self.tokenizer.endToken]
+ ([self.tokenizer.padToken] * (self.max_position_embeddings - 2))
)
def __init__(self,
config : reinforcement_learning_pb2.RLModel,
max_position_embeddings : int,
corpus : corpuses.Corpus,
tokenizer : tokenizers.TokenizerBase,
feature_tokenizer : tokenizers.FeatureTokenizer,
cache_path : pathlib.Path,
) -> None:
self.config = config
self.tokenizer = tokenizer
self.feature_tokenizer = feature_tokenizer
self.max_position_embeddings = max_position_embeddings
self.feature_sequence_length = self.config.agent.feature_tokenizer.feature_sequence_length
self.cache_path = cache_path / "environment"
self.ckpt_path = cache_path / "checkpoint"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.current_state = None
self.feature_dataset = None
self.loadCheckpoint()
if self.feature_dataset is None:
self.feature_dataset = []
if self.config.HasField("train_set"):
data = corpus.GetTrainingFeatures()
for dp in data:
for k, v in dp.items():
if v:
self.feature_dataset.append((k, v))
elif self.config.HasField("random"):
self.feature_dataset = []
return
def intermediate_step(self,
state_code : torch.LongTensor,
step_actions : torch.LongTensor,
) -> typing.Tuple[torch.Tensor]:
"""
The environment reads the predicted index, and makes
necessary transformations to the input ids so they can be
fed into the language model, if need be.
"""
num_episodes = step_actions.shape[0]
lm_input_ids = torch.zeros(state_code.shape, dtype = torch.long)
use_lm = torch.zeros((num_episodes), dtype = torch.bool)
for idx, (code, action) in enumerate(zip(state_code, step_actions)):
act_type = int(action) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(action) // len(interactions.ACTION_TYPE_SPACE)
if act_type == interactions.ACTION_TYPE_SPACE['ADD']:
if torch.any(code == self.tokenizer.padToken):
# ADD is only valid if there is room for new tokens, i.e. at least one [PAD] exists.
new_code = torch.cat((code[:act_index + 1], torch.LongTensor([self.tokenizer.holeToken]), code[act_index + 1:]))
new_code = new_code[:code.shape[0]]
lm_input_ids[idx] = new_code
use_lm[idx] = True
elif act_type == interactions.ACTION_TYPE_SPACE['REPLACE']:
if int(code[act_index]) not in self.tokenizer.metaTokenValues:
# REPLACE is only valid if the token it is trying to raplce is not meta token.
new_code = torch.clone(code)
new_code[act_index] = self.tokenizer.holeToken
lm_input_ids[idx] = new_code
use_lm[idx] = True
return use_lm, lm_input_ids
def new_step(self,
state_code : torch.LongTensor,
step_actions : torch.LongTensor,
step_tokens : torch.LongTensor,
traj_disc_rewards : torch.FloatTensor,
feature_dists : torch.FloatTensor,
use_lm : torch.BoolTensor,
gamma : float,
) -> typing.Tuple[torch.Tensor]:
"""
Step the environment, compute the reward.
"""
num_episodes = step_actions.shape[0]
reward = torch.zeros((num_episodes), dtype = torch.float32)
discounted_reward = torch.zeros((num_episodes), dtype = torch.float32)
done = torch.zeros((num_episodes), dtype = torch.bool)
for idx, (code, act, tok, dr, lm) in enumerate(zip(state_code, step_actions, step_tokens, discounted_reward, use_lm)):
act_type = int(act) % len(interactions.ACTION_TYPE_SPACE)
act_index = int(act) // len(interactions.ACTION_TYPE_SPACE)
token_id = int(tok)
lm = bool(lm)
try:
real_len = torch.where(code == self.tokenizer.endToken)[0][0]
except Exception as e:
# This exception is raised because you remove the endToken
l.logger().warn(code)
l.logger().error(torch.where(code == self.tokenizer.endToken))
l.logger().critical("No ENDTOKEN has been found.")
raise e
if act_index >= real_len and act_type != interactions.ACTION_TYPE_SPACE['COMP']:
l.logger().critical(self.tokenizer.tokensToString([int(x) for x in code]))
l.logger().critical(act_type)
l.logger().critical(act_index)
l.logger().critical(real_len)
raise ValueError("Why did this run out of bounds ?")
## ADD
if act_type == interactions.ACTION_TYPE_SPACE['ADD']:
if int(token_id) not in self.tokenizer.metaTokenValues and torch.any(code == self.tokenizer.padToken):
# ADD is only valid if predicted token is not a meta token.
# Also out-of-bounds restriction, also applied by intermediate step.
new_code = torch.cat((code[:act_index + 1], torch.LongTensor([token_id]), code[act_index + 1:]))
new_code = new_code[:code.shape[0]]
state_code[idx] = new_code
else:
# Unflag current sequence as LM-ready.
use_lm[idx] = False
reward[idx] = -0.1
## REMOVE
elif act_type == interactions.ACTION_TYPE_SPACE['REM']:
if int(code[act_index]) not in self.tokenizer.metaTokenValues:
new_code = torch.cat((code[:act_index], code[act_index + 1:], torch.LongTensor([self.tokenizer.padToken])))
state_code[idx] = new_code
## REPLACE
elif act_type == interactions.ACTION_TYPE_SPACE['REPLACE']:
if int(token_id) not in self.tokenizer.metaTokenValues and int(code[act_index]) not in self.tokenizer.metaTokenValues:
# REPLACE is valid if predicted token is not a meta token.
# Also if to-be-replaced token is not a meta token.
state_code[idx][act_index] = token_id
else:
# Unflag current sequence as LM-ready.
use_lm[idx] = False
reward[idx] = -0.1
## COMPILE
elif act_type == interactions.ACTION_TYPE_SPACE['COMP']:
src = self.tokenizer.ArrayToCode([int(x) for x in code])
try:
_ = opencl.Compile(src)
features = extractor.ExtractFeatures(src, ext = [self.current_state.feature_space])
compiles = True
except ValueError:
compiles = False
features = None
if compiles and len(src) > 0:
cur_dist = feature_sampler.calculate_distance(
features[self.current_state.feature_space],
self.current_state.target_features,
)
if feature_dists[idx] == -1 or cur_dist < feature_dists[idx]:
reward[idx] = +0.5
if cur_dist == 0:
done[idx] = True
else:
reward[idx] = -0.5
else:
raise ValueError("Invalid action type: {}".format(act_type))
discounted_reward = traj_disc_rewards * gamma + reward
traj_disc_rewards[torch.where(done == True)] = 0.0
return state_code, reward, discounted_reward, done, use_lm
def reset(self, recycle: bool = True) -> interactions.State:
"""
Reset the state of the environment.
"""
if recycle and self.current_state:
l.logger().warn("Remember to remove this line when you take training seriously.")
return self.current_state
self.feature_dataset.append(
(self.current_state.feature_space, self.current_state.target_features)
)
next = self.feature_dataset.pop(0)
self.current_state = interactions.State(
target_features = next[1],
feature_space = next[0],
encoded_features = self.feature_tokenizer.TokenizeFeatureVector(next[1], next[0], self.feature_sequence_length),
code = "",
encoded_code = self.init_code_state,
comment = "State: \nCode:\n\nFeatures:\n{}".format(next[1]),
)
return self.current_state
def get_state(self) -> interactions.State:
"""
Get the current state of the environment.
"""
return self.current_state
def loadCheckpoint(self) -> None:
"""
Load environment checkpoint.
"""
if (self.ckpt_path / "environment.pkl").exists():
distrib.lock()
with open(self.ckpt_path / "environment.pkl", 'rb') as inf:
self.current_state = pickle.load(inf)
distrib.unlock()
distrib.barrier()
if (self.ckpt_path / "feature_loader.pkl").exists():
distrib.lock()
with open(self.ckpt_path / "feature_loader.pkl", 'rb') as inf:
self.feature_loader = pickle.load(inf)
distrib.unlock()
distrib.barrier()
return
def saveCheckpoint(self) -> None:
"""
Save environment state.
"""
if environment.WORLD_RANK == 0:
with open(self.ckpt_path / "environment.pkl", 'wb') as outf:
pickle.dump(self.current_state, outf)
with open(self.ckpt_path / "feature_loader.pkl", 'wb') as outf:
pickle.dump(self.feature_loader, outf)
distrib.barrier()
return
| 11,257 | 40.389706 | 126 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/features/hidden_state.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature Extraction module for Dominic Grewe features.
"""
import math
import typing
from absl import flags
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models import backends
FLAGS = flags.FLAGS
KEYS = None
LANGUAGE_MODEL = None
def setup_lm(lm: backends.BackendBase) -> None:
"""
Initialize the language model that will be used as a feature extractor.
Also, the keys of the feature space (they are parametric to the hidden size).
"""
global LANGUAGE_MODEL
global KEYS
KEYS = ["f{}".format(x) for x in range(lm.hidden_state_size)]
LANGUAGE_MODEL = lm
return
class HiddenStateFeatures(object):
"""
Source code features as defined in paper
"Portable Mapping of Data Parallel Programs to OpenCL for Heterogeneous Systems"
by D.Grewe, Z.Wang and M.O'Boyle.
"""
def __init__(self):
return
@classmethod
def ExtractFeatures(cls,
src: str,
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Dict[str, float]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
raw_features = cls.ExtractRawFeatures(src)
return cls.RawToDictFeats(raw_features)
@classmethod
def ExtractFeaturesIter(cls,
srcs: typing.List[str],
header_file : str = None,
use_aux_headers : bool = True,
extra_args : typing.List[str] = [],
**kwargs,
) -> typing.Iterator[typing.Dict[str, float]]:
"""
Invokes clgen_features extractor on source code and return feature mappings
in dictionary format.
If the code has syntax errors, features will not be obtained and empty dict
is returned.
"""
batch_size = kwargs.get('batch_size', 256)
for bidx in range(0, len(srcs), batch_size):
batch = srcs[bidx: bidx + batch_size]
batch_feats = cls.ExtractRawFeatures(batch)
for feat_vec in batch_feats:
yield cls.RawToDictFeats(feat_vec)
@classmethod
def ExtractIRFeatures(cls, bytecode: str, **kwargs) -> typing.Dict[str, float]:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
raise NotImplementedError("I must not be called.")
return {}
@classmethod
def ExtractRawFeatures(cls, src: typing.Union[str, typing.List[str]], **kwargs) -> typing.Union[typing.List[float], typing.List[typing.List[float]]]:
"""
Invokes BenchPress to collect hidden softmax activations.
Params:
src: (str) Kernel in string format.
Returns:
Feature vector and diagnostics in str format.
"""
global LANGUAGE_MODEL
if not isinstance(src, list):
encoded = LANGUAGE_MODEL.EncodeInputs([src])
hidden_state = LANGUAGE_MODEL.ExtractHidden(encoded).squeeze(0)
else:
encoded = LANGUAGE_MODEL.EncodeInputs(src)
hidden_state = LANGUAGE_MODEL.ExtractHidden(encoded)
return list(hidden_state.detach().cpu().numpy())
@classmethod
def ExtractIRRawFeatures(cls, bytecode: str, **kwargs) -> str:
"""
Bytecode input in text-level feature space makes no sense. Therefore this function is just a decoy.
"""
raise NotImplementedError("I must not be called.")
return ""
@classmethod
def RawToDictFeats(cls, hidden_states: typing.List[float], **kwargs) -> typing.Dict[str, float]:
"""
Converts clgen_features subprocess output from raw string
to a mapped dictionary of feature -> value.
"""
return {
"{}".format(k): (v) for k, v in zip(KEYS, hidden_states)
} | 4,677 | 33.397059 | 151 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/sequence_masking.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core algorithm of sequence masking"""
import sys
import typing
import copy
import humanize
import pickle
import numpy as np
import progressbar
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util.tf import tf
from deeplearning.benchpress.util import logging as l
class tfSequence(typing.NamedTuple):
"""
Tuple representation of a single MaskLM Instance.
This is not batch! generateTfDataset applies native batching,
so this class represents a single instance!
"""
seen_in_training : np.int32
original_input : np.array
input_ids : np.array
input_mask : np.array
masked_lm_positions : np.array
masked_lm_ids : np.array
masked_lm_weights : np.array
masked_lm_lengths : np.array
next_sentence_label : np.int32
@staticmethod
def tfTypes():
return (tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32, tf.int32, tf.int32)
@staticmethod
def npTypes():
return (np.int32, np.int32, np.int32, np.int32, np.int32, np.int32, np.float32, np.int32, np.int32)
@staticmethod
def tfShapes(batch_size, sequence_length, max_position_embeddings = None):
return (tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, sequence_length]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, max_position_embeddings]),
tf.TensorShape([batch_size, 1]),
)
## Tuple representation of mask id/position/hole_length for easy sorting
class MaskedLmInstance():
def __init__(self,
pos_index: int,
token_id: int,
hole_length: int,
extend_left: bool,
):
self.pos_index = pos_index
self.token_id = token_id
self.hole_length = hole_length
self.extend_left = extend_left
def MPHoleSequence(seq: np.array,
train_set: bool,
max_predictions: int,
pickled_distribution: distributions.Distribution,
pickled_tokenizer,
training_opts,
is_torch: bool,
repair_locations: typing.List[int] = None,
) -> typing.Tuple[
typing.Union[typing.Dict[str, np.array], tfSequence],
typing.List[MaskedLmInstance],
]:
"""
Inserts hole tokens to a given sequence.
If repair_locations is set, then algorithm places holes over syntactic errors
for the model to repair them. Default is None, where hole-d indices are randomly
selected.
This function is compatible for multiprocessing. There is an optimized single-core
version below.
"""
assert seq.ndim == 1, "Input for masking must be single-dimension array."
# Unpack tokenizer and sampler
distribution = pickle.loads(pickled_distribution)
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * training_opts.masked_lm_prob))))
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Offset array. Indices represent elements in the initial array (seq)
# Values of indices represent current offset position in processed array (input_ids).
offset_idxs = np.zeros(len(seq), dtype = np.int64)
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
if repair_locations:
pos_index = repair_locations[np.random.RandomState().randint(0, len(repair_locations))]
else:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
assert pos_index < len(seq), "Candidate index is out of bounds: {} >= {}".format(pos_index, len(seq))
# Element in processed array can be found in its original index +/- offset
input_id_idx = pos_index + offset_idxs[pos_index]
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_id_idx > len(seq):
# Do not mask a part of input_ids that is going to be cropped.
continue
elif input_ids[input_id_idx] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
assert (input_ids[input_id_idx] == seq[pos_index],
"Original and offset-ted sequence have misaligned tokens: {}, {}"
.format(seq[pos_index], input_ids[input_id_idx]))
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + offset_idxs[last_elem] + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over input_id_idx bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, input_id_idx)
else:
hole_length = min(hole_length, (last_elem + offset_idxs[last_elem]) - input_id_idx)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[input_id_idx - i] == tokenizer.holeToken
or input_ids[input_id_idx - i] == tokenizer.startToken
or input_ids[input_id_idx - i] == tokenizer.endToken
# or input_id_idx - i == 0
):
hole_length = i
break
else:
if (input_ids[input_id_idx + i] == tokenizer.holeToken
or input_ids[input_id_idx + i] == tokenizer.startToken
or input_ids[input_id_idx + i] == tokenizer.endToken
# or input_id_idx + i == len(input_ids)
):
hole_length = i
break
if offset_idxs[last_elem] + 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
input_id_idx = pos_index + offset_idxs[pos_index]
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
target = input_ids[input_id_idx] if hole_length > 0 else tokenizer.endholeToken
input_ids = input_ids[:input_id_idx] + [tokenizer.holeToken] + input_ids[input_id_idx + hole_length:]
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = target, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
offset_idxs[pos_index + 1:] += 1 - hole_length
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
hole_analytics = copy.deepcopy(masked_lms)
# Now update the entries with offset index.
for lm in masked_lms:
prev_index = lm.pos_index
lm.pos_index = lm.pos_index + offset_idxs[lm.pos_index]
assert input_ids[lm.pos_index] == tokenizer.holeToken, "{}".format(lm.hole_length)
while len(input_ids) < len(seq):
input_ids.append(tokenizer.padToken)
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
first_pad_index = input_ids.index(tokenizer.padToken)
input_mask[first_pad_index:] = 0
# Check that the pad index is likely correct.
assert input_ids[first_pad_index] == tokenizer.padToken, "{}".format(input_ids)
assert input_ids[first_pad_index - 1] != tokenizer.padToken
"""
Related to next_sentence_labels: Fix it to 0 for now, as no next_sentence prediction
is intended on kernels. In any other case, check bert's create_instances_from_document
to see how next_sentence_labels are calculated.
Setting this to 0 means that next sentence is NOT random.
Note that if next_sentence prediction is to be embedded, [SEP] token has to be added.
"""
if len(masked_lms) == 0:
l.logger().warn("No HOLE added to datapoint. Increase probability of hole occuring.")
if is_torch:
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(holes_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = p.hole_length
ind += 1
return {
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}, hole_analytics
else: # TF 1.X, 2.[0-2]
seen_in_training = np.int32(1 if train_set else 0)
next_sentence_label = np.int32(0)
masked_lm_positions, masked_lm_ids, masked_lm_weights, masked_lm_lengths = [], [], [], []
for p in masked_lms:
if p.pos_index < len(seq):
"""
Adding holes can increase or decrease the length of the original sequence.
It is important in the end, to end up with an input sequence compatible
with the model's sequence length, i.e. len(seq). If any mask is found
beyond that point, will have to be rejected.
"""
masked_lm_positions.append(p.pos_index)
masked_lm_ids.append(p.token_id)
masked_lm_weights.append(1.0)
masked_lm_lengths.append(p.hole_length)
num_holes = len(masked_lm_positions)
while len(masked_lm_positions) < training_opts.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(tokenizer.padToken)
masked_lm_weights.append(0.0)
masked_lm_lengths.append(-1)
assert (input_ids[:len(seq)].count(tokenizer.holeToken) == num_holes,
"Number of targets {} does not correspond to hole number in final input sequence: {}"
.format(num_holes, input_ids[:len(seq)].count(tokenizer.holeToken))
)
return tfSequence(seen_in_training, seq,
np.asarray(input_ids[:len(seq)]), input_mask,
np.asarray(masked_lm_positions), np.asarray(masked_lm_ids),
np.asarray(masked_lm_weights), np.asarray(masked_lm_lengths),
next_sentence_label
), hole_analytics
def MPMaskSequence(seq: np.array,
train_set: bool,
max_predictions: int,
pickled_tokenizer,
training_opts,
config,
is_torch: bool,
) -> typing.Dict:
"""
Inserts masks to a given sequence.
This function is compatible for multiprocessing. There is an optimized single-core
version below.
"""
assert seq.ndim == 1, "Input for masking must be single-dimension array."
## Tuple representation of mask id/position for easy sorting
class MaskedLmInstance(typing.NamedTuple):
pos_index: int
token_id: int
# Unpack tokenizer
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
else:
actual_length = len(seq)
candidate_indexes = np.arange(actual_length)
np.random.RandomState().shuffle(candidate_indexes)
masks_to_predict = min(max_predictions,
max(1, int(round(actual_length * training_opts.masked_lm_prob))))
input_ids = list(np.copy(seq))
masked_lms = []
for pos_index in candidate_indexes:
if len(masked_lms) >= masks_to_predict:
break
if config.mask.random_placed_mask:
# 80% of the time, replace with [MASK]
if np.random.RandomState().random() < 0.8:
input_ids[pos_index] = tokenizer.maskToken
else:
# 10% of the time, keep original
if np.random.RandomState().random() < 0.5:
pass
# 10% of the time, replace with random word
else:
random_token = np.random.RandomState().randint(0, tokenizer.vocab_size)
while any(tokenizer.vocab[t] == random_token for (idx, t) in tokenizer.metaTokens.items()):
random_token = np.random.RandomState().randint(0, tokenizer.vocab_size)
input_ids[pos_index] = np.random.RandomState().randint(0, tokenizer.vocab_size)
else:
if np.random.RandomState().random() < 0.8:
input_ids[pos_index] = tokenizer.maskToken
masked_lms.append(MaskedLmInstance(pos_index=pos_index, token_id=seq[pos_index]))
assert len(masked_lms) <= masks_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
input_mask[input_ids.index(tokenizer.padToken):] = 0
## Related to next_sentence_labels: Fix it to 0 for now, as no next_sentence prediction
## is intended on kernels. In any other case, check bert's create_instances_from_document
## to see how next_sentence_labels are calculated.
## Setting this to 0 means that next sentence is NOT random.
## Note that if next_sentence prediction is to be embedded, [SEP] token has to be added.
if is_torch:
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(masks_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = 1
ind += 1
return ({
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}, [])
else: # TF 1.X, 2.[0-2]
masked_lm_positions, masked_lm_ids, masked_lm_weights, masked_lm_lengths = [], [], [], []
seen_in_training = np.int32(1 if train_set else 0)
next_sentence_label = np.int32(0)
for p in masked_lms:
masked_lm_positions.append(p.pos_index)
masked_lm_ids.append(p.token_id)
masked_lm_weights.append(1.0)
masked_lm_lengths.append(1)
while len(masked_lm_positions) < training_opts.max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(tokenizer.padToken)
masked_lm_weights.append(0.0)
masked_lm_lengths.append(-1)
return tfSequence(seen_in_training, seq,
np.asarray(input_ids), input_mask,
np.asarray(masked_lm_positions), np.asarray(masked_lm_ids),
np.asarray(masked_lm_weights), np.asarray(masked_lm_lengths),
next_sentence_label
), [], []
def HoleSequence(seq: np.array,
train_set: bool,
max_predictions: int,
masked_lm_prob: int,
distribution: distributions.Distribution,
tokenizer,
) -> typing.Dict[str, np.array]:
"""
Inserts hole tokens to a given sequence.
Used for online training.
"""
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * masked_lm_prob))))
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Offset array. Indices represent elements in the initial array (seq)
# Values of indices represent current offset position in processed array (input_ids).
offset_idxs = np.zeros(len(seq), dtype = np.int64)
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
try:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
except ValueError as e:
l.logger().error(actual_length)
l.logger().error(tokenizer.tokensToString(seq))
raise e
# Element in processed array can be found in its original index +/- offset
input_id_idx = pos_index + offset_idxs[pos_index]
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_id_idx > len(seq):
# Do not mask a part of input_ids that is going to be cropped.
continue
elif input_ids[input_id_idx] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + offset_idxs[last_elem] + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over input_id_idx bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, input_id_idx)
else:
hole_length = min(hole_length, (last_elem + offset_idxs[last_elem]) - input_id_idx)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[input_id_idx - i] == tokenizer.holeToken
or input_ids[input_id_idx - i] == tokenizer.startToken
or input_ids[input_id_idx - i] == tokenizer.endToken
# or input_id_idx - i == 0
):
hole_length = i
break
else:
if (input_ids[input_id_idx + i] == tokenizer.holeToken
or input_ids[input_id_idx + i] == tokenizer.startToken
or input_ids[input_id_idx + i] == tokenizer.endToken
# or input_id_idx + i == len(input_ids)
):
hole_length = i
break
if offset_idxs[last_elem] + 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
input_id_idx = pos_index + offset_idxs[pos_index]
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
target = input_ids[input_id_idx] if hole_length > 0 else tokenizer.endholeToken
input_ids = input_ids[:input_id_idx] + [tokenizer.holeToken] + input_ids[input_id_idx + hole_length:]
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = target, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
offset_idxs[pos_index + 1:] += 1 - hole_length
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
# Now update the entries with offset index.
for lm in masked_lms:
prev_index = lm.pos_index
lm.pos_index = lm.pos_index + offset_idxs[lm.pos_index]
while len(input_ids) < len(seq):
input_ids.append(tokenizer.padToken)
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
input_mask = np.ones(len(seq), dtype = np.int64)
if tokenizer.padToken in input_ids:
first_pad_index = input_ids.index(tokenizer.padToken)
input_mask[first_pad_index:] = 0
seen_in_training = np.int64([1] if train_set else [0])
next_sentence_labels = np.int64([0])
masked_lm_lengths = np.full(holes_to_predict, -1, dtype = np.int64)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
ind = 0
for p in masked_lms:
if p.pos_index < len(seq):
mask_labels[p.pos_index] = p.token_id
masked_lm_lengths[ind] = p.hole_length
ind += 1
return {
'seen_in_training' : seen_in_training,
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : input_mask,
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}
def HoleSequenceSeqMasks(seq: np.array,
train_set: bool,
max_predictions: int,
masked_lm_prob: int,
distribution: distributions.Distribution,
tokenizer,
) -> typing.Dict[str, np.array]:
"""
Instead of a hole, place left context on the leftmost part,
the right context on the rightmost part and all remaining
stuff are the masks in the middle. When the actual to-predict
sentence.
This is PLDI Reviewer B's idea.
"""
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
if use_start_end:
actual_length = np.where(seq == tokenizer.endToken)[0][0]
last_elem = actual_length
elif tokenizer.padToken in seq:
actual_length = np.where(seq == tokenizer.padToken)[0][0]
last_elem = actual_length - 1
else:
actual_length = len(seq)
last_elem = actual_length - 1
# total tokens to add in holes.
# No more than max_predictions_per_seq (or otherwise specified), no less than actual seq length x the probability of hiding a token
holes_to_predict = min(max_predictions,
max(1, int(round(actual_length * masked_lm_prob))))
assert holes_to_predict == 1, "This mode only supports a single hole."
extend_left = True if np.random.RandomState().randint(0, 2) == 1 else False
input_ids = list(np.copy(seq))
# List of (seq_idx, token_id, hole_length) tuples
masked_lms = []
# Set with all candidate_indexes that have been holed.
visited_indices = set()
# Total masks placed so far.
total_predictions = 0
while total_predictions < holes_to_predict:
pos_index = np.random.RandomState().randint(0, actual_length) # Fixed seed doesn't work!
# Element in processed array can be found in its original index +/- offset
if total_predictions >= holes_to_predict:
break
elif pos_index in visited_indices:
# Do not target an index, already holed
continue
elif input_ids[pos_index] in {tokenizer.startToken, tokenizer.endToken}:
# Do not target [START] or [END] token
continue
# Sampled number from distribution to represent the actual hole length
hole_length = distribution.sample(actual_length)
# Increase hole length a little bit, if too many empty holes have pushed rightmost elements
# over the edge.
while last_elem + 1 - hole_length >= len(seq):
hole_length += 1
# Inside range, make sure hole length does not run over pos_index bounds
# This may be redundant given the next for loop
if extend_left:
hole_length = min(hole_length, pos_index)
else:
hole_length = min(hole_length, last_elem - pos_index)
# Confirm there is no conflict with another hole, further down the sequence.
for i in range(hole_length):
if extend_left:
if (input_ids[pos_index - i] == tokenizer.holeToken
or input_ids[pos_index - i] == tokenizer.startToken
or input_ids[pos_index - i] == tokenizer.endToken
# or pos_index - i == 0
):
hole_length = i
break
else:
if (input_ids[pos_index + i] == tokenizer.holeToken
or input_ids[pos_index + i] == tokenizer.startToken
or input_ids[pos_index + i] == tokenizer.endToken
# or pos_index + i == len(input_ids)
):
hole_length = i
break
if 1 - hole_length >= len(seq):
# This hole can't help but explode the sequence. Go find a new position.
continue
assert hole_length >= 0, "hole length is negative: {}".format(hole_length)
pos_index -= hole_length - 1 if hole_length != 0 and extend_left else 0
# Target token for classifier is either the first token of the hole, or endholeToken if hole is empty
targets = input_ids[pos_index: pos_index + hole_length]
lc = input_ids[:pos_index]
rc = input_ids[pos_index + hole_length:actual_length+1]
pad_len = len(seq) - len(lc) - len(rc) - len(targets)
if pad_len == 0:
if len(rc) > 1:
# input_ids = input_ids[:-2] + [input_ids[-1]]
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len + 1) + rc[:-2] + [rc[-1]]
targets += [tokenizer.endholeToken]
else:
targets[-1] = tokenizer.endholeToken
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len) + rc
else:
input_ids = lc + [tokenizer.maskToken]*(len(targets) + pad_len) + rc
targets += [tokenizer.endholeToken] * pad_len
# Store position index, and after making all masks, update with updated offset array
masked_lms.append(MaskedLmInstance(
pos_index = pos_index, token_id = targets, hole_length = hole_length, extend_left = extend_left
)
)
# Adjust the offset of all affected tokens, from pos_index and after.
total_predictions += max(1, hole_length)
visited_indices.update(range(pos_index, pos_index + hole_length))
assert len(input_ids) == len(seq), "Input sequence and sequence length mismatch: {} / {}, {}".format(len(input_ids), len(seq), tokenizer.tokensToString(input_ids))
assert input_ids[0] == tokenizer.startToken, "{}".format(tokenizer.tokensToString(input_ids[0]))
assert input_ids[-1] == tokenizer.endToken, "{}".format(tokenizer.tokensToString(input_ids[-1]))
# Now update the entries with offset index.
masked_lms = sorted(masked_lms, key=lambda x: x.pos_index)
mask_labels = np.full(len(seq), -100, dtype = np.int64)
for p in masked_lms:
if p.pos_index < len(seq):
for idx, tid in enumerate(p.token_id):
mask_labels[p.pos_index + idx] = tid
return {
'seen_in_training' : np.int64([1] if train_set else [0]),
'original_input' : seq,
'input_ids' : np.asarray(input_ids[:len(seq)], dtype = np.int64),
'input_mask' : np.ones(len(seq), dtype = np.int64),
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : np.int64([1]),
'next_sentence_labels': np.int64([0]),
}
def MaskedSeqToBlob(enc_text: np.array,
tokenizer,
sequence_length: int,
max_position_embeddings: int,
):
"""
Constructs training/sampling instance from plain input text.
"""
input_sample = enc_text
target_idx = np.where(np.in1d(input_sample, [tokenizer.maskToken, tokenizer.holeToken]))[0]
num_targets = (np.count_nonzero(input_sample == tokenizer.maskToken) +
np.count_nonzero(input_sample == tokenizer.holeToken))
assert np.ndim(input_sample) == 1, "Input samples have to be one-dimensional. {} given.".format(input_sample.shape)
# if tokenizer.requires_mask:
# assert len(target_idx) != 0, "No target prediction in sample text"
seen_in_training = np.zeros([1], dtype = np.int32)
original_input = np.full((1), tokenizer.padToken, dtype = np.int64)
input_ids = np.concatenate([
input_sample, np.array([tokenizer.padToken] * (max_position_embeddings - len(input_sample)), dtype = np.int64)
])[:sequence_length]
input_mask = np.concatenate([
np.ones(len(input_sample), dtype = np.int64),
np.zeros(len(input_ids) - len(input_sample), dtype = np.int64)
])
position_ids = np.arange(sequence_length, dtype = np.int64)
mask_labels = np.full((sequence_length), -100, dtype = np.int64)
masked_lm_lengths = np.full((1), -1, dtype = np.int64)
next_sentence_labels = np.zeros([1], dtype = np.int32)
return {
'seen_in_training' : seen_in_training,
'original_input' : original_input,
'input_ids' : input_ids,
'input_mask' : input_mask,
'position_ids' : position_ids,
'mask_labels' : mask_labels,
'masked_lm_lengths' : masked_lm_lengths,
'next_sentence_labels': next_sentence_labels,
}
def ExhaustiveHoleSequence(all_seq: np.array,
train_set: bool,
# max_predictions: int,
# pickled_distribution: distributions.Distribution,
pickled_tokenizer,
# training_opts,
# is_torch: bool,
# repair_locations: typing.List[int] = None,
) -> typing.Generator:
"""
Placing random holes seems to introduce an overfitting bias on the model.
It doesn't learn a good distribution of what should go in a specific hole
for a given index, a left and a right context. This function may be solving
this, hopefully in a sustainable way.
No holes are placed randomly. Each index produces many holed seq instances;
starting from empty hole up to hiding everything until the end.
Given one sequence, returns a list of instances, one for each hole instances.
!!!WARNING: Currently only supported for PyTorch.
"""
with progressbar.ProgressBar(max_value = len(all_seq)) as bar:
for seq in bar(all_seq):
assert seq.ndim == 1, "Input for masking must be single-dimension array."
# Unpack tokenizer
tokenizer = pickle.loads(pickled_tokenizer)
use_start_end = True if seq[0] == tokenizer.startToken else False
# Actual length represents the sequence length before pad begins
start_idx = 0
if use_start_end:
start_idx = 1
end = np.where(seq == tokenizer.endToken)[0][0]
elif tokenizer.padToken in seq:
end = np.where(seq == tokenizer.padToken)[0][0]
else:
end = len(seq)
st_input_ids = list(seq)
for idx in range(start_idx, end):
for hole_len in range(0, end - idx):
if end + 1 - hole_len >= len(seq):
continue
input_ids = st_input_ids[:idx] + [tokenizer.holeToken] + st_input_ids[idx + hole_len:]
input_ids += [tokenizer.padToken] * (len(seq) - len(input_ids))
input_ids = input_ids[:len(seq)]
mask_labels = np.full(len(seq), -100, dtype = np.int64)
target = seq[idx] if hole_len else tokenizer.endholeToken
mask_labels[ idx if hole_len else idx - 1] = target
mlm_inst = MaskedLmInstance(
pos_index = idx, token_id = target,
hole_length = hole_len, extend_left = False
)
yield ({
'seen_in_training' : np.int64([1] if train_set else [0]),
'original_input' : seq,
'input_ids' : np.asarray(input_ids, dtype = np.int64),
'input_mask' : (seq != tokenizer.padToken),
'position_ids' : np.arange(len(seq), dtype = np.int64),
'mask_labels' : mask_labels,
'masked_lm_lengths' : np.array([hole_len]),
'next_sentence_labels': np.int64([0]),
}, [mlm_inst])
return | 35,295 | 41.019048 | 165 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/telemetry.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines telemetry data gathers."""
import pathlib
import re
import json
import typing
import datetime
import glob
from absl import flags
from deeplearning.benchpress.proto import telemetry_pb2
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class TrainingLogger(object):
"""A TrainingLogger produces telemetry data of a BenchPress model as it is trained.
Telemetry data is gathered after every epoch of training. It includes a
timestamp, the model's loss, and the time spent training the epoch.
See the Keras callback docs: https://keras.io/callbacks/#lambdacallback
"""
def __init__(self, logdir: pathlib.Path):
logdir.mkdir(exist_ok = True, parents = True)
self.logdir = logdir
self.last_epoch_begin_timestamp = None
self.telemetry = None
def EpochBeginCallback(self) -> None:
self.last_epoch_begin_timestamp = datetime.datetime.utcnow()
def EpochEndCallback(self, epoch: int, loss: float):
now = datetime.datetime.utcnow()
epoch_time_ms = now - self.last_epoch_begin_timestamp
telemetry = telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = now.strftime("%m/%d/%Y, %H:%M:%S"),
epoch_num = epoch,
epoch_wall_time_ms = int(round(epoch_time_ms.total_seconds()*1000)),
loss = loss,
)
pbutil.ToFile(telemetry, self.logdir / f"epoch_{epoch:03d}_telemetry.pbtxt")
def KerasEpochBeginCallback(self, epoch: int, logs: typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]) -> None:
"""A Keras "on_epoch_end" callback."""
del epoch
del logs
self.EpochBeginCallback()
def KerasEpochEndCallback(self, epoch: int, logs: typing.Union[typing.List[typing.Any], typing.Dict[str, typing.Any]]) -> None:
"""A Keras "on_epoch_end" callback."""
# Keras epoch numbers are zero indexed.
self.EpochEndCallback(epoch + 1, logs["loss"])
def KerasCallback(self, keras):
"""Returns the keras callback to passed to a model's fit() function."""
return keras.callbacks.LambdaCallback(
on_epoch_begin=self.KerasEpochBeginCallback,
on_epoch_end=self.KerasEpochEndCallback,
)
def TfRecordEpochs(self) -> None:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(str(self.logdir))
event_acc.Reload()
self.tfAccumulateLoss(event_acc)
for key in event_acc.Tags()['scalars']:
_, step, value = zip(*event_acc.Scalars(key))
key_str = str(pathlib.Path(key).stem)
plt.linesSingleAxis(
{key_str: {'y': value, 'x': step}},
y_name = key_str,
x_name = "Train step",
plot_title = key_str,
path = self.logdir,
)
return
def tfAccumulateLoss(self, event_acc):
"""Open accumulator and read total_loss scalar"""
try:
self.telemetry = []
wall_time, step_num, loss = zip(*event_acc.Scalars('training/total_loss'))
for (indx, (wt, st, ls)) in enumerate(zip(wall_time, step_num, loss)):
round_wt = int(round(wt, 0))
if indx == 0:
current_time = round_wt
continue
else:
self.telemetry.append(telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(round_wt),
epoch_num = st,
epoch_wall_time_ms = round_wt - current_time,
loss = ls,
)
)
current_time = round_wt
except KeyError as e:
l.logger().warn("Model loss log not found! Available Tags: {}".format(event_acc.Tags()))
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(0),
epoch_num = 0,
epoch_wall_time_ms = 0,
loss = -1,
)
]
return
def EpochTelemetry(self) -> typing.List[telemetry_pb2.ModelEpochTelemetry]:
"""Return the epoch telemetry files."""
if self.telemetry is None:
if len(glob.glob(str(self.logdir / "epoch_*_telemetry.pbtxt"))) > 0:
return [
pbutil.FromFile(self.logdir / p, telemetry_pb2.ModelEpochTelemetry())
for p in sorted(self.logdir.iterdir())
if re.match(r"epoch_\d\d+_telemetry\.pbtxt", str(p.name))
]
elif len(glob.glob(str(self.logdir / "events.out.tfevents*"))) > 0:
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
event_acc = EventAccumulator(str(self.logdir))
event_acc.Reload()
self.tfAccumulateLoss(event_acc)
elif len(glob.glob(str(self.logdir / "training.json"))) == 1:
with open(self.logdir / "training.json", 'r') as jsf:
data = json.load(jsf)
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = '0',
epoch_num = x['step'],
epoch_wall_time_ms = int(round(x['batch_execution_time_ms'])) if "batch_execution_time_ms" in x else -1,
loss = x['total_loss'] if "total_loss" in x else -1.0,
) for x in data
]
else:
l.logger().warn("Training logs have not been found. Invalid reported loss.")
self.telemetry = [
telemetry_pb2.ModelEpochTelemetry(
timestamp_unix_epoch_ms = str(0),
epoch_num = 0,
epoch_wall_time_ms = 0,
loss = -1,
)
]
return self.telemetry
| 6,194 | 37.006135 | 131 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/bert_flags.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Shared absl flags between Pytorch and Tensorflow
BERT models.
"""
from absl import flags
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"monitor_frequency",
5000,
"Choose frequency (in steps) in which tensors will be logged during training. "
"Default: 5000"
)
flags.DEFINE_integer(
"select_checkpoint_step",
-1,
"Select step checkpoint for sample. Re-training with this flag is not supported. "
"To restart from specific checkpoint, you have to manually remove the checkpoints after the desired one."
"Default: -1, flag ignored and latest checkpoint is loaded."
)
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_boolean("force_eval", False, "Run Validation no matter what.")
flags.DEFINE_integer("sample_per_epoch", 3, "Set above zero to sample model after every epoch.")
flags.DEFINE_boolean("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_boolean("categorical_sampling", True, "Use categorical distribution on logits when sampling.")
| 1,617 | 33.425532 | 107 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/builders.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file builds Keras models from BenchPress Model config protos."""
from deeplearning.benchpress.proto import model_pb2
from absl import flags
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.models import lm_data_generator
FLAGS = flags.FLAGS
def AssertIsBuildable(config: model_pb2.Model) -> model_pb2.Model:
"""Assert that a model configuration is buildable.
Args:
config: A model proto.
Returns:
The input model proto, unmodified.
Raises:
UserError: If the model is not buildable.
InternalError: If the value of the training.optimizer field is not
understood.
"""
# Any change to the Model proto schema will require a change to this function.
try:
pbutil.AssertFieldIsSet(config, "corpus")
pbutil.AssertFieldIsSet(config, "architecture")
pbutil.AssertFieldIsSet(config, "training")
pbutil.AssertFieldIsSet(config.architecture, "backend")
if config.architecture.backend == model_pb2.NetworkArchitecture.KERAS_SEQ:
pbutil.AssertFieldIsSet(config.architecture, "neuron_type")
pbutil.AssertFieldConstraint(
config.architecture,
"embedding_size",
lambda x: 0 < x,
"NetworkArchitecture.embedding_size must be > 0",
)
elif config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_SEQ:
pbutil.AssertFieldIsSet(config.architecture, "neuron_type")
pbutil.AssertFieldConstraint(
config.architecture,
"neurons_per_layer",
lambda x: 0 < x,
"NetworkArchitecture.neurons_per_layer must be > 0",
)
pbutil.AssertFieldConstraint(
config.architecture,
"num_layers",
lambda x: 0 < x,
"NetworkArchitecture.num_layers must be > 0",
)
pbutil.AssertFieldConstraint(
config.architecture,
"post_layer_dropout_micros",
lambda x: 0 <= x <= 1000000,
"NetworkArchitecture.post_layer_dropout_micros "
"must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training,
"num_epochs",
lambda x: 0 < x,
"TrainingOptions.num_epochs must be > 0",
)
elif config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_BERT\
or config.architecture.backend == model_pb2.NetworkArchitecture.TORCH_BERT\
or config.architecture.backend == model_pb2.NetworkArchitecture.INCODER_1B\
or config.architecture.backend == model_pb2.NetworkArchitecture.INCODER_6B:
# Data generator is needed when using bert.
pbutil.AssertFieldIsSet(config.training, "data_generator")
# Parse data_generator params.
_ = lm_data_generator.AssertConfigIsValid(config.training.data_generator)
if config.architecture.backend != model_pb2.NetworkArchitecture.INCODER_1B and config.architecture.backend != model_pb2.NetworkArchitecture.INCODER_6B:
## .architecture params
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_size",
)
pbutil.AssertFieldIsSet(
config.architecture,
"num_hidden_layers",
)
pbutil.AssertFieldIsSet(
config.architecture,
"num_attention_heads",
)
pbutil.AssertFieldIsSet(
config.architecture,
"intermediate_size",
)
pbutil.AssertFieldConstraint(
config.architecture,
"hidden_size",
lambda x: x % config.architecture.num_attention_heads == 0,
"The hidden size is not a multiple of the number of attention "
"heads."
)
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_act",
)
pbutil.AssertFieldIsSet(
config.architecture,
"hidden_dropout_prob",
)
pbutil.AssertFieldIsSet(
config.architecture,
"attention_probs_dropout_prob",
)
pbutil.AssertFieldIsSet(
config.architecture,
"type_vocab_size",
)
pbutil.AssertFieldIsSet(
config.architecture,
"initializer_range",
)
pbutil.AssertFieldIsSet(
config.architecture,
"layer_norm_eps",
)
## Optional feature encoder attributes
if config.architecture.HasField("feature_encoder") and config.architecture.feature_encoder == True:
pbutil.AssertFieldIsSet(
config.architecture,
"feature_sequence_length"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_embedding_size"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_dropout_prob"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_singular_token_thr"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_max_value_token"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_token_range"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_num_attention_heads"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_transformer_feedforward"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_layer_norm_eps"
)
pbutil.AssertFieldIsSet(
config.architecture,
"feature_num_hidden_layers"
)
## .training params
pbutil.AssertFieldIsSet(
config.training,
"max_predictions_per_seq",
)
pbutil.AssertFieldIsSet(
config.training,
"num_train_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"num_warmup_steps",
)
if config.HasField("pre_train_corpus"):
pbutil.AssertFieldIsSet(
config.training,
"num_pretrain_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"num_prewarmup_steps",
)
pbutil.AssertFieldIsSet(
config.training,
"dupe_factor",
)
pbutil.AssertFieldIsSet(
config.training,
"masked_lm_prob",
)
pbutil.AssertFieldConstraint(
config.training,
"random_seed",
lambda x: 0 <= x,
"TrainingOptions.random_seed must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training,
"sequence_length",
lambda x: 1 <= x,
"TrainingOptions.sequence_length must be >= 1",
)
pbutil.AssertFieldIsSet(
config.training, "shuffle_corpus_contentfiles_between_epochs"
)
pbutil.AssertFieldConstraint(
config.training,
"batch_size",
lambda x: 0 < x,
"TrainingOptions.batch_size must be > 0",
)
pbutil.AssertFieldIsSet(config.training, "optimizer")
if config.training.HasField("adam_optimizer"):
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"initial_learning_rate_micros",
lambda x: 0 <= x,
"AdamOptimizer.initial_learning_rate_micros must be >= 0",
)
if config.architecture.backend == model_pb2.NetworkArchitecture.KERAS_SEQ or \
config.architecture.backend == model_pb2.NetworkArchitecture.TENSORFLOW_SEQ:
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"learning_rate_decay_per_epoch_micros",
lambda x: 0 <= x,
"AdamOptimizer.learning_rate_decay_per_epoch_micros must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"beta_1_micros",
lambda x: 0 <= x <= 1000000,
"AdamOptimizer.beta_1_micros must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"beta_2_micros",
lambda x: 0 <= x <= 1000000,
"AdamOptimizer.beta_2_micros must be >= 0 and <= 1000000",
)
pbutil.AssertFieldConstraint(
config.training.adam_optimizer,
"normalized_gradient_clip_micros",
lambda x: 0 <= x,
"AdamOptimizer.normalized_gradient_clip_micros must be >= 0",
)
elif config.training.HasField("rmsprop_optimizer"):
pbutil.AssertFieldConstraint(
config.training.rmsprop_optimizer,
"initial_learning_rate_micros",
lambda x: 0 <= x,
"RmsPropOptimizer.initial_learning_rate_micros must be >= 0",
)
pbutil.AssertFieldConstraint(
config.training.rmsprop_optimizer,
"learning_rate_decay_per_epoch_micros",
lambda x: 0 <= x,
"RmsPropOptimizer.learning_rate_decay_per_epoch_micros must be >= 0",
)
else:
raise SystemError(
"Unrecognized value: 'TrainingOptions.optimizer'"
)
except Exception as e:
raise e
return config
def BuildOptimizer(config: model_pb2.Model) -> "keras.optimizers.Optimizer":
"""Construct the training optimizer from config.
Args:
config: A Model config proto.
Raises:
InternalError: If the value of the optimizer field is not understood.
"""
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
# We do not use *any* default values for arguments, in case for whatever
# reason the Keras API changes a default arg.
if config.training.HasField("adam_optimizer"):
opts = {}
opt = config.training.adam_optimizer
if opt.normalized_gradient_clip_micros:
opts["clipnorm"] = opt.normalized_gradient_clip_micros / 1e6
return keras.optimizers.Adam(
lr=opt.initial_learning_rate_micros / 1e6,
beta_1=opt.beta_1_micros / 1e6,
beta_2=opt.beta_2_micros / 1e6,
epsilon=None,
decay=opt.learning_rate_decay_per_epoch_micros / 1e6,
amsgrad=False,
**opts,
)
elif config.training.HasField("rmsprop_optimizer"):
opt = config.training.rmsprop_optimizer
return keras.optimizers.RMSprop(
lr=opt.initial_learning_rate_micros / 1e6,
decay=opt.initial_learning_rate_micros / 1e6,
rho=0.9,
epsilon=None,
)
else:
raise SystemError(
"Unrecognized value: 'TrainingOptions.optimizer'"
)
def BuildKerasModel(
config: model_pb2.Model, vocabulary_size: int
) -> "keras.models.Sequential":
"""Build a Keras model from a Model proto.
Args:
config: A Model proto instance.
vocabulary_size: The number of tokens in the vocabulary.
Returns:
A Sequential model instance.
"""
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
dropout = (config.architecture.post_layer_dropout_micros or 0) / 1e6
model = keras.models.Sequential()
layer = {
model_pb2.NetworkArchitecture.LSTM: keras.layers.LSTM,
model_pb2.NetworkArchitecture.RNN: keras.layers.RNN,
model_pb2.NetworkArchitecture.GRU: keras.layers.GRU,
}[config.architecture.neuron_type]
# The input layer.
model.add(
keras.layers.Embedding(
vocabulary_size,
config.architecture.embedding_size,
batch_input_shape=(
config.training.batch_size,
config.training.sequence_length,
),
)
)
model.add(keras.layers.Dropout(dropout))
# The recurrent network layers.
for _ in range(config.architecture.num_layers):
model.add(
layer(
config.architecture.neurons_per_layer,
return_sequences=True,
stateful=True,
)
)
model.add(keras.layers.Dropout(dropout))
# The output layer.
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(vocabulary_size, activation="softmax")
)
)
return model
| 12,534 | 31.72846 | 157 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/lm_data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generator specifically used for Mask LM models (namely BERT)."""
import sys
import json
import time
import tqdm
import random
import progressbar
import copy
import glob
import humanize
import typing
import multiprocessing
import functools
import pathlib
import pickle
import numpy as np
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models import lm_database
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"write_text_dataset",
False,
"Set True for MaskLM data generator to write dataset in text format, along with the dataset record."
)
flags.DEFINE_integer(
"memory_limit",
4,
"Set maximum amount of available memory used for masking sequences in Gb. [Default]: 4",
)
flags.DEFINE_boolean(
"force_remake_dataset",
False,
"Force data generator to re-mask encoded dataset and store dataset record."
)
flags.DEFINE_boolean(
"store_datasets_to_DB",
False,
"Set True to store masked datasets to SQL Database for observation."
)
def AssertConfigIsValid(config: model_pb2.DataGenerator) -> model_pb2.DataGenerator:
"""
Parse data generator protobuf message.
Raise Exception if format is wrong.
"""
pbutil.AssertFieldConstraint(
config,
"datapoint_type",
lambda x: x == "kernel" or x == "statement",
"Valid options for datapoint_type are 'kernel' and 'statement'",
)
pbutil.AssertFieldConstraint(
config,
"datapoint_time",
lambda x: x == "online" or x == "pre",
"Valid options for datapoint_time are 'online' and 'pre'",
)
pbutil.AssertFieldIsSet(
config,
"use_start_end",
)
pbutil.AssertFieldIsSet(
config,
"steps_per_epoch",
)
pbutil.AssertFieldConstraint(
config,
"validation_split",
lambda x : 0 <= x <= 100,
"Validation split is expressed in [0-100]%."
)
if config.datapoint_type == "kernel":
pbutil.AssertFieldIsSet(
config,
"truncate_large_kernels",
)
if len(config.validation_set) > 0:
for val_opt in config.validation_set:
if val_opt.HasField("mask"):
pbutil.AssertFieldIsSet(
val_opt.mask,
"random_placed_mask",
)
elif val_opt.HasField("hole"):
if val_opt.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
val_opt.hole,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a hole's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
val_opt.hole,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if val_opt.hole.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
val_opt.hole.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
val_opt.hole.normal_distribution,
"variance",
)
elif not val_opt.hole.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
elif val_opt.HasField("mask_seq"):
if val_opt.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
val_opt.mask_seq,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a mask_seq's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
val_opt.mask_seq,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if val_opt.mask_seq.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
val_opt.mask_seq.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
val_opt.mask_seq.normal_distribution,
"variance",
)
elif not val_opt.mask_seq.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
# Parse masking technique for bert's data generator
pbutil.AssertFieldIsSet(config, "mask_technique")
if config.HasField("mask"):
pbutil.AssertFieldIsSet(
config.mask,
"random_placed_mask",
)
elif config.HasField("hole"):
if config.hole.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.hole,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a hole's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.hole,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.hole.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.hole.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.hole.normal_distribution,
"variance",
)
elif not config.hole.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
pbutil.AssertFieldIsSet(
config.hole,
"stage_training",
)
elif config.HasField("mask_seq"):
if config.mask_seq.HasField("absolute_length"):
pbutil.AssertFieldConstraint(
config.mask_seq,
"absolute_length",
lambda x : x > 0,
"absolute length is the upper bound range of a mask_seq's length. Therefore should be > 0."
)
else:
pbutil.AssertFieldConstraint(
config.mask_seq,
"relative_length",
lambda x : 0.0 < x <= 1.0,
"relative length must be between 0 and 100% of a kernel's actual length."
)
if config.mask_seq.HasField("normal_distribution"):
pbutil.AssertFieldIsSet(
config.mask_seq.normal_distribution,
"mean",
)
pbutil.AssertFieldIsSet(
config.mask_seq.normal_distribution,
"variance",
)
elif not config.mask_seq.HasField("uniform_distribution"):
raise ValueError("Hole length distribution has not been set.")
pbutil.AssertFieldIsSet(
config.mask_seq,
"stage_training",
)
return config
def _addStartEndPadToken(inp: typing.Union[list, tuple], tokenizer, trunc: int = None, seq_len: int = None) -> typing.Tuple[int, np.array]:
"""
Inserts [START] and [END] token at the beginnning and end of a sequence
Arguments:
inp: input_sequence
Returns:
[START] + input_sequence + [END]
"""
tokenizer = pickle.loads(tokenizer)
features = None
if isinstance(inp, tuple):
inp, features = inp
try:
if trunc is not None:
inp = inp[:trunc]
assert len(inp) != 0, "Empty list provided."
assert tokenizer.padToken not in inp, "Use this function before padding a sequence!"
start = [tokenizer.startToken] if inp[0] != tokenizer.startToken else []
end = [tokenizer.endToken ] if inp[-1] != tokenizer.endToken else []
if isinstance(inp, np.ndarray):
inp = list(inp)
ret = start + inp + end
rlen = len(ret)
if seq_len is not None:
ret += [tokenizer.padToken] * (seq_len - len(ret))
if features:
return rlen, np.array(ret), features
else:
return rlen, np.array(ret)
except AssertionError:
return None
class MaskLMDataGenerator(object):
"""Abstract class, shared among TORCH and TF BERT data generators."""
@property
def is_torch(self):
if self.file_extension == "pt_record":
return True
return False
def __init__(self, file_extension: str):
self.file_extension = file_extension
self.mask_func = sequence_masking.MPMaskSequence
self.hole_func = sequence_masking.MPHoleSequence
self.mask_seq_func = sequence_masking.HoleSequenceSeqMasks
self.dataset = None
self.corpus = None
self.tokenizer = None
self.config = None
self.cache = None
self.training_opts = None
self.pre_train = None
self.num_train_steps = None
self.steps_per_epoch = None
self.sample_batch_size = None
self.max_position_embeddings = None
self.num_epochs = None
self.feature_encoder = None
self.feature_tokenizer = None
self.feature_sequence_length = None
self.sampler = None
self.rngen = None
return
def TrainMaskLMBatchGenerator(self,
corpus : "corpuses.Corpus",
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> "data_generator.MaskLMDataGenerator":
"""Initializes data generator for training."""
self.cache = cache.mkcache(cache_path, "dataset")
self.cache.path.mkdir(exist_ok = True, parents = True)
self.dataset = {}
self.corpus = corpus
self.tokenizer = corpus.tokenizer
self.config = training_opts.data_generator
self.training_opts = training_opts
self.rngen = np.random # random.Random(training_opts.random_seed)
self.pre_train = pre_train
self.feature_encoder = feature_encoder
self.feature_tokenizer = feature_tokenizer
self.feature_sequence_length = feature_sequence_length
if num_train_steps:
self.num_train_steps = num_train_steps
else:
self.num_train_steps = self.training_opts.num_train_steps
shaped_corpus = self.createCorpus(self.cache.path)
distrib.barrier()
if self.config.datapoint_time == "pre":
if self.feature_encoder:
raise NotImplementedError("Pre masking corpus does not work with feature encoding model.")
# 'pre' pre-processes/masks training/validation/sampling corpus for the model to use.
# 'online' stores the raw data and masks them on the fly.
self.configDataset(shaped_corpus)
return self
def SampleMaskLMBatchGenerator(self,
model_opts : model_pb2.TrainingOptions,
sampler : "samplers.Sampler",
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> "data_generator.MaskLMBatchGenerator":
"""Initializes data generator for inference."""
self.cache = cache.mkcache(cache_path, "dataset")
self.cache.path.mkdir(exist_ok = True, parents = True)
self.dataset = {}
self.sampler = sampler
self.corpus = sampler.sample_corpus
self.tokenizer = tokenizer
self.config = model_opts.data_generator
self.rngen = np.random
self.seed = seed
self.sample_batch_size = sample_batch_size
if sampler.is_active and FLAGS.sample_workload_size < (self.sample_batch_size * environment.WORLD_SIZE):
throttled_batch_size = int(FLAGS.sample_workload_size // environment.WORLD_SIZE)
l.logger().warn("Too many GPU devices for workload size. Throttling batch size from {} to {}".format(
self.sample_batch_size,
throttled_batch_size,
)
)
self.sample_batch_size = throttled_batch_size
self.max_position_embeddings = max_position_embeddings
self.feature_encoder = feature_encoder
self.feature_tokenizer = feature_tokenizer
self.feature_sequence_length = feature_sequence_length
self.training_opts = model_opts
self.training_opts.sequence_length = sampler.sequence_length
self.training_opts.batch_size = sampler.batch_size
return self
def configDataset(self,
shaped_corpus: np.array,
) -> None:
"""
Configs all necessary training and validation sets described in the model protobuf.
First constructs training set and optionally splits it into validation set, if selected in config.
Then configValidationSets is called which constructs any additional validation_set elements
provided in the model's config.
"""
if FLAGS.force_remake_dataset:
l.logger().warn("Force remaking datasets can cause lots of problems on an already trained model. Are you sure you want to proceed ? [y/n]")
a = input()
if a.lower() != "yes" and a.lower() != "y":
l.logger().warn("Overwriting dataset process was aborted. Good call.")
exit()
if len(glob.glob(str(self.cache.path / "train_dataset_*.{}".format(self.file_extension)))) == 0 or FLAGS.force_remake_dataset:
if self.config.validation_split == 0:
self._maskCorpus(
shaped_corpus, set_name = "train_dataset", path = self.cache.path, train_set = True
)
else:
split_index = (len(shaped_corpus) // 100) * self.config.validation_split
self._maskCorpus(
shaped_corpus[split_index:], set_name = "train_dataset", path = self.cache.path, train_set = True
)
self._maskCorpus(
shaped_corpus[:split_index], set_name = "validation_dataset", path = self.cache.path, train_set = False
)
else:
self.dataset["train_dataset"] = {
"file": sorted(glob.glob(str(self.cache.path / "train_dataset_*.{}".format(self.file_extension)))),
"txt" : sorted(glob.glob(str(self.cache.path / "train_dataset_*.txt"))),
}
if len(glob.glob(str(self.cache.path / "validation_dataset_*.{}".format(self.file_extension)))) != 0:
self.dataset["validation_dataset"] = {
"file": sorted(glob.glob(str(self.cache.path / "validation_dataset_*.{}".format(self.file_extension)))),
"txt" : sorted(glob.glob(str(self.cache.path / "validation_dataset_*.txt"))),
}
self.configValidationSets(self.config.validation_set, shaped_corpus, self.cache.path)
return
def configValidationSets(self,
valset_list: typing.List,
shaped_corpus: np.array,
path: pathlib.Path,
) -> None:
"""
Mask and store any extra validation datasets defined into
model protobuf.
Example:
validation_set {
max_predictions_per_seq: 10
hole {
hole_length: 15
uniform_distribution: true
}
}
Arguments:
valset_list: list of validation_set items
Returns:
None
"""
for valset in valset_list:
set_name = "pred_{}_{}".format(
valset.max_predictions_per_seq,
"mask" if valset.HasField("mask") else "hole_{}".format(valset.hole.absolute_length
if valset.hole.HasField("absolute_length")
else valset.hole.relative_length
)
)
if set_name in self.dataset or len(glob.glob(str(path / "{}_*.{}".format(set_name, self.file_extension)))) > 0:
continue
self._maskCorpus(
shaped_corpus, train_set = False, set_name = set_name, path = path, config = valset
)
return
def configSampleSets(self) -> typing.List[pathlib.Path]:
"""
Parses the types of datasets asked from sampler.
These can be training, validation or a custom sample set
(defined by type of target and hole/mask specs).
If the set does not exist, it is constructed.
Returns:
A list of paths for the requested datasets.
Raises:
FileNotFoundError:
In case sampler asks for validation set,
but this had not been constructed during training.
"""
if self.sampler.config.HasField("train_set"):
path = self.cache.path
sampledDataset = "train_dataset"
elif self.sampler.config.HasField("validation_set"):
path = self.cache.path
sampledDataset = "validation_dataset"
elif self.sampler.config.HasField("sample_set"):
path = self.cache.path
sampledDataset = "pred_{}_{}".format(
self.sampler.config.sample_set.max_predictions_per_seq,
"mask" if self.sampler.config.sample_set.HasField("mask")
else "hole_{}".format(self.sampler.config.sample_set.hole.absolute_length
if self.sampler.config.sample_set.hole.HasField("absolute_length")
else self.sampler.config.sample_set.hole.relative_length)
)
elif self.sampler.config.HasField("sample_corpus"):
path = self.sampler.corpus_directory
sampledDataset = "pred_{}_{}".format(
self.sampler.config.sample_corpus.corpus_config.max_predictions_per_seq,
"mask" if self.sampler.config.sample_corpus.corpus_config.HasField("mask")
else "hole_{}".format(self.sampler.config.sample_corpus.corpus_config.hole.absolute_length
if self.sampler.config.sample_corpus.hole.HasField("absolute_length")
else self.sampler.config.sample_corpus.hole.relative_length)
)
else:
raise ValueError("Unknown dataset type")
return self.getDatasetPath(sampledDataset, path)
def getDatasetPath(self,
set_name: str,
path: pathlib.Path,
) -> typing.List[pathlib.Path]:
"""
Based on a set name, search cache path for its existence.
If not existing, get original pickled corpus, do the masking
and save dataset in pt/tf_record format.
Returns list of created datasets.
"""
path_search = lambda: sorted(
glob.glob(
str(path / "{}_*.{}".format(set_name, self.file_extension))
)
)
path_list = path_search()
if len(path_list) == 0:
if set_name == "validation_dataset":
raise FileNotFoundError("Corpus had not been split in train-val, therefore validation dataset is not found.")
elif set_name == "train_dataset":
raise FileNotFoundError("Trying to sample training dataset, but it doesn't exist!")
shaped_corpus = self.createCorpus(path)
distrib.barrier()
if self.sampler.config.HasField("sample_set"):
config_list = [self.sampler.config.sample_set]
elif self.sampler.config.HasField("sample_corpus"):
config_list = [self.sampler.config.sample_corpus.corpus_config]
else:
raise ValueError("sampler sets can either be sample_set or sample_corpus")
self.configValidationSets(config_list, shaped_corpus, path)
return path_search()
def createCorpus(self, path: pathlib.Path) -> np.array:
"""
Constructs training corpus in text format, stores it in
shaped_corpus
Each corpus datapoint is either a single kernel or a random
sequence of size sequence_length (legacy).
If corpus has been previously pickled and stored, then it is loaded.
"""
start_time = time.time()
# Set corpus dimension parameters
sequence_length = self.training_opts.sequence_length
effect_seq_length = sequence_length - (2 if self.config.use_start_end else 0)
batch_size = self.training_opts.batch_size
dupe_factor = self.training_opts.dupe_factor
shuffle = self.training_opts.shuffle_corpus_contentfiles_between_epochs
pad = [self.tokenizer.padToken ]
start = [self.tokenizer.startToken ]
end = [self.tokenizer.endToken ]
shaped_corpus = None
corpus_file = "{}corpus.pkl".format("pre_" if self.pre_train else "")
# Monitor counts actual length distribution of kernel instances.
kernel_length_monitor = monitors.FrequencyMonitor(path, "{}kernel_length".format("pre_" if self.pre_train else ""))
# Token frequency distribution monitor.
if not self.pre_train:
feature_monitors = {
ftype: monitors.CategoricalDistribMonitor(
path,
"{}{}_distribution".format("pre_" if self.pre_train else "", ftype)
)
for ftype in extractor.extractors.keys()
}
if (path / corpus_file).exists():
with open(path / corpus_file, 'rb') as infile:
shaped_corpus = pickle.load(infile)
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
l.logger().info(
"Loaded from file corpus of {} examples in {} ms.".format(
humanize.intcomma(len(shaped_corpus)),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return shaped_corpus
# generate a kernel corpus
if (path / "text_corpus.pkl").exists():
# Only sampler writes a text_corpus.pkl, to do online or active sampling.
# The reason is, corpus is saved in text format, to be picked up with the
# right tokenizer. And that is the model's tokenizer.
with open(path / "text_corpus.pkl", 'rb') as infile:
encoded_corpus = [self.tokenizer.TokenizeString(x) for x in pickle.load(infile)]
else:
if self.pre_train:
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
if environment.WORLD_RANK == 0:
if len(glob.glob(str(path / "pre_corpus_*.pkl"))) > 0:
return []
encoded_corpus = []
cache_lengths = {}
chunk_size = 250000
i, ch_idx = 0, 0
bar = tqdm.tqdm(total = self.corpus.encoded.size, desc = "Chunk pre-train corpus")
pool = multiprocessing.Pool()
l.logger().info("Processing pre-train corpus chunks...")
l.logger().warn("The routine below does not provide the features.")
for dp in pool.imap_unordered(
functools.partial(
_addStartEndPadToken,
tokenizer = pickle.dumps(self.tokenizer),
trunc = effect_seq_length,
seq_len = sequence_length),
self.corpus.GetTrainingDataGenerator()):
if dp:
input_features = None
if self.feature_encoder:
rlen, enc_kernel, input_features = dp
else:
rlen, enc_kernel = dp
kernel_length_monitor.register(rlen)
if self.feature_encoder:
for fspace in extractor.extractors.KEYS():
if fspace in input_features:
encoded_features = self.feature_tokenizer.TokenizeFeatureVector(input_features[fspace], fspace = fspace, seq_len = self.feature_sequence_length)
encoded_corpus.append((enc_kernel, encoded_features))
else:
encoded_corpus.append(enc_kernel)
i += 1
if i % chunk_size == 0:
encoded_corpus = np.array(encoded_corpus)
corpus_file = "pre_corpus_{}.pkl".format(ch_idx)
cache_lengths[corpus_file] = len(encoded_corpus)
l.logger().info("Storing chunk {}, len: {}".format(ch_idx, encoded_corpus.shape))
with open(path / corpus_file, 'wb') as outf:
pickle.dump(encoded_corpus, outf, protocol = 4)
with open(path / "pre_lengths_cache.json", 'w') as outf:
json.dump(cache_lengths, outf)
ch_idx += 1
encoded_corpus = []
bar.update(1)
if encoded_corpus:
if self.feature_encoder:
encoded_corpus = np.array(encoded_corpus, dtype = object)
else:
encoded_corpus = np.array(encoded_corpus)
l.logger().info("Storing chunk {}, len: {}".format(ch_idx, encoded_corpus.shape))
corpus_file = "pre_corpus_{}.pkl".format(ch_idx)
cache_lengths[corpus_file] = len(encoded_corpus)
with open(path / corpus_file, 'wb') as outf:
pickle.dump(encoded_corpus, outf, protocol = 4)
with open(path / "pre_lengths_cache.json", 'w') as outf:
json.dump(cache_lengths, outf)
kernel_length_monitor.plot()
pool.close()
else:
return []
return encoded_corpus
else:
if environment.WORLD_RANK == 0:
if not self.feature_encoder:
encoded_corpus = self.corpus.GetTrainingData(sequence_length = effect_seq_length if not self.config.truncate_large_kernels else None)
else:
encoded_corpus = self.corpus.GetTrainingDataWFeatures(sequence_length = effect_seq_length if not self.config.truncate_large_kernels else None)
if self.config.datapoint_type == "kernel":
if environment.WORLD_RANK == 0:
# Reject larger than sequence length
initial_length = copy.deepcopy(len(encoded_corpus))
if not self.pre_train:
# Get features of fitting dataset within sequence length
for feature in self.corpus.GetTrainingFeatures(effect_seq_length):
for ftype, fvector in feature.items():
feature_monitors[ftype].register(fvector)
if self.feature_encoder:
training_features = [x for _, x in encoded_corpus]
encoded_corpus = [x for x, _ in encoded_corpus]
idx, t = set(), []
if self.config.truncate_large_kernels:
for i, x in enumerate(encoded_corpus):
if len(x[:effect_seq_length]) <= effect_seq_length:
t.append(list(x[:effect_seq_length]))
else:
idx.add(i)
else:
for i, x in enumerate(encoded_corpus):
if len(x) <= effect_seq_length:
t.append(list(x))
else:
idx.add(i)
encoded_corpus = t
if self.feature_encoder:
training_features = [x for i, x in enumerate(training_features) if i not in idx]
reduced_length = copy.deepcopy(len(encoded_corpus))
# Add start/end tokens
if self.config.use_start_end:
encoded_corpus = [self._addStartEndToken(kf) for kf in encoded_corpus]
# Register the actual lengths before padding.
kernel_length_monitor.register([len(x) for x in encoded_corpus])
# pad sequences to sequence length
encoded_corpus = np.array([x + pad * (sequence_length - len(x)) for x in encoded_corpus])
if self.feature_encoder:
expanded_corpus = []
encoded_features = []
for dp, fvec in zip(encoded_corpus, training_features):
for fspace in extractor.extractors.keys():
if fspace in fvec and fvec[fspace]:
expanded_corpus.append(dp)
encoded_features.append(self.feature_tokenizer.TokenizeFeatureVector(fvec[fspace], fspace, self.feature_sequence_length))
shaped_corpus = [[src, feats] for src, feats in zip(expanded_corpus, encoded_features)]
else:
shaped_corpus = encoded_corpus
# Shuffle
if shuffle:
self.rngen.shuffle(shaped_corpus)
assert len(shaped_corpus) != 0, "Not enought data. All kernels have been rejected."
# Set corpus epoch parameters
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
l.logger().info("{} kernels were rejected (larger than sequence_length)".format(initial_length - reduced_length))
l.logger().info(
"Loaded corpus of shape {}x{} multiplied by dupe factor: {} in {} ms.".format(
len(shaped_corpus),
sequence_length,
dupe_factor,
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
else:
# Set corpus epoch parameters
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.config.steps_per_epoch
self.steps_per_epoch = self.config.steps_per_epoch
elif self.config.datapoint_type == "statement":
## This branch is legacy data processing and does not support DDP.
if shuffle:
self.rngen.shuffle(encoded_corpus)
encoded_corpus = np.concatenate(encoded_corpus)
# encoded_corpus = np.tile(encoded_corpus, dupe_factor)
# Set corpus dimension parameters
self.steps_per_epoch = len(encoded_corpus) // (batch_size * sequence_length * dupe_factor)
assert self.steps_per_epoch != 0, "Not enought data. Use smaller sequence_length and/or batch_size"
if self.num_train_steps:
self.num_epochs = self.num_train_steps // self.steps_per_epoch
# clipped_corpus_length = dupe_factor * self.steps_per_epoch * batch_size * sequence_length
clipped_corpus_length = self.steps_per_epoch * batch_size * sequence_length
clipped_corpus = encoded_corpus[:clipped_corpus_length]
# shaped_corpus = np.split(clipped_corpus, batch_size * self.steps_per_epoch * dupe_factor, 0)
shaped_corpus = np.split(clipped_corpus, batch_size * self.steps_per_epoch, 0)
# Register the actual lengths before padding.
kernel_length_monitor.register([len(x) for x in shaped_corpus])
np_corpus = np.asarray(shaped_corpus)
assert np_corpus.ndim == 2, "Wrong dimensions for shaped_corpus: {}".format(np_corpus.shape)
assert np_corpus.shape[1] == sequence_length, "Second dimension is not equal to sequence length: {}".format(np_corpus.shape[1])
l.logger().info(
"Loaded corpus of {} tokens (clipped last {} tokens) in {} ms.".format(
humanize.intcomma(clipped_corpus_length),
humanize.intcomma(len(encoded_corpus) - clipped_corpus_length),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
else:
raise ValueError("Unrecognized datapoint_type: {}".format(self.config.datapoint_type))
if environment.WORLD_RANK == 0:
kernel_length_monitor.plot()
if not self.pre_train:
for fm in feature_monitors.values():
fm.plot()
with open(path / corpus_file, 'wb') as outf:
pickle.dump(shaped_corpus, outf)
return shaped_corpus
def _maskCorpus(self,
corpus: np.array,
train_set: bool,
set_name: str,
path: pathlib.Path,
config = None,
)-> None:
"""
Entrypoint function that inserts masks or holes to the corpus.
Arguments:
corpus: [num_datapoints, sequence_length],
where num_datapoints = num_batches * dupe_factor * batch_size
Returns:
The masked corpus
"""
# Set-up self.dataset entry
self.dataset[set_name] = {
'file': [],
'txt' : [],
}
# Set up max predictions
if config is None:
config = self.config
max_predictions = self.training_opts.max_predictions_per_seq
else:
max_predictions = config.max_predictions_per_seq
# Apply dupe factor in stages to avoid stressing RAM.
# Limit has been set to 4GB.
single_item_bytes = self.estimatedSize(
1, self.training_opts.sequence_length, self.training_opts.max_predictions_per_seq
)
corpus_bytes = single_item_bytes * len(corpus) + sys.getsizeof(corpus)
# max_dupe is how many times (dupes) the corpus can fit into a dataset record file.
max_dupe = min((FLAGS.memory_limit * (1024**3)) // corpus_bytes, self.training_opts.dupe_factor)
assert max_dupe != 0, "Increase RAM limit to fit corpus."
iterations = self.training_opts.dupe_factor // max_dupe
remaining = self.training_opts.dupe_factor % max_dupe
def apply_dupe_factor(arr: np.array, iters: int) -> np.array:
if iters == 0:
return np.asarray([], dtype = arr.dtype)
start_len = len(arr)
arr = np.expand_dims(arr, 0) # 2D->3D
arr = np.repeat(arr, iters, 0) # -> Repeat 2D blocks over 3D space
arr = arr.reshape(iters * start_len, -1) # Flatten repetitive 2D blocks, into 2D array
return arr
extended_corpus = apply_dupe_factor(corpus, iterations)
remaining_corpus = apply_dupe_factor(corpus, remaining)
l.logger().info("Estimated element size: {}. Dupe factor {} split into {} iterations of {} (plus {} remaining)".format(
humanize.naturalsize(single_item_bytes), self.training_opts.dupe_factor, iterations, max_dupe, remaining
)
)
pool = multiprocessing.Pool()
distribution = None
# Specify the desired masking routine
if config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
config.hole, path, "hole_length_{}".format(set_name)
)
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.hole_func,
train_set = train_set,
max_predictions = max_predictions,
pickled_distribution = pickle.dumps(distribution),
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
elif config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
config.mask_seq, path, "mask_seq_length_{}".format(set_name)
)
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.mask_seq_func,
train_set = train_set,
max_predictions = max_predictions,
pickled_distribution = pickle.dumps(distribution),
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
elif config.HasField("mask"):
maskedSeq = lambda c: pool.imap_unordered(
functools.partial(self.mask_func,
train_set = train_set,
max_predictions = max_predictions,
config = config,
pickled_tokenizer = pickle.dumps(self.tokenizer),
training_opts = self.training_opts,
is_torch = self.is_torch,
),
c
)
else:
raise AttributeError("target predictions can only be mask or hole {}".format(self.config))
# Token frequency distribution monitor.
token_monitor = monitors.NormalizedFrequencyMonitor(path, "{}_token_distribution".format(set_name))
# Monitor counts target idxs of a hole as absolute index value.
abs_start_idx_monitor = monitors.FrequencyMonitor(path, "{}_abs_target_mask_idx".format(set_name))
# Monitors count of target indices (in percentile) that were hidden by a hole.
start_idx_monitor = monitors.FrequencyMonitor(path, "{}_target_mask_idx".format(set_name))
# Monitor counts all absolute indices hidden by a hole.
abs_idx_monitor = monitors.FrequencyMonitor(path, "{}_abs_target_mask_idx".format(set_name))
# Monitors count of indices (in percentile) that were hidden by a hole.
idx_monitor = monitors.FrequencyMonitor(path, "{}_mask_idx".format(set_name))
# Monitors if left or right direction was picked for a hole expansion.
direction_monitor = monitors.FrequencyMonitor(path, "{}_masking_direction".format(set_name))
if FLAGS.store_datasets_to_DB:
lm_db = lm_database.LMDatabase("sqlite:///{}".format(self.cache.path / "{}.db".format(set_name)))
## Core loop of masking.
masked_corpus = []
bar = tqdm.tqdm(total = len(corpus) * self.training_opts.dupe_factor, desc = "Masking datapoints")
kernel_idx = 0
try:
for iteration in range(iterations + 1):
masked_corpus = []
# Select between normal iterations or dupe factor residual and shuffle
if iteration != iterations:
multiproc_corpus = maskedSeq(extended_corpus)
if self.training_opts.shuffle_corpus_contentfiles_between_epochs:
self.rngen.shuffle(extended_corpus)
elif remaining != 0:
multiproc_corpus = maskedSeq(remaining_corpus)
if self.training_opts.shuffle_corpus_contentfiles_between_epochs:
self.rngen.shuffle(remaining_corpus)
else:
continue
# Do parallel masking over corpus
for kernel, masked_idxs in multiproc_corpus:
if distribution:
distribution.register([mid.hole_length for mid in masked_idxs])
try:
if self.is_torch:
actual_length = np.where(kernel['original_input'] == self.tokenizer.padToken)[0][0]
else:
actual_length = np.where(kernel.original_input == self.tokenizer.padToken)[0][0]
except IndexError:
actual_length = len(kernel['original_input'])
token_monitor.register([
self.tokenizer.decoder[int(x)]
for x in kernel['input_ids'] if x != self.tokenizer.padToken]
)
for hole in masked_idxs:
hole_idx = hole.pos_index
selected_idx = hole.pos_index
if hole.extend_left:
selected_idx += hole.hole_length - 1 if hole.hole_length != 0 else 0
abs_start_idx_monitor.register(selected_idx)
start_idx_monitor.register(int(2 * round(100.0 * (selected_idx / actual_length) / 2.0)))
abs_idx_monitor.register([hole_idx + i for i in range(hole.hole_length)])
idx_monitor.register([int(2 * round(100.0 * ((hole_idx + i) / actual_length) / 2.0)) for i in range(hole.hole_length)])
direction_monitor.register(1 if hole.extend_left else 0)
masked_corpus.append(kernel)
bar.update(1)
kernel_idx += 1
if kernel_idx == 1:
self.LogBatchTelemetry(
self.training_opts.batch_size, self.training_opts.sequence_length,
max_predictions, self.steps_per_epoch, self.num_epochs
)
if FLAGS.store_datasets_to_DB:
with lm_db.Session(commit = True) as s:
count = lm_db.count
for idx, kernel in enumerate(masked_corpus):
s.add(
lm_database.LMInstance(**lm_database.LMInstance.FromArgs(
id = count + idx,
original_input = self.tokenizer.tokensToString(kernel['original_input'], ignore_token = self.tokenizer.padToken),
input_ids = self.tokenizer.tokensToString(kernel['input_ids'], ignore_token = self.tokenizer.padToken),
masked_lm_lengths = kernel['masked_lm_lengths'],
masked_lm_predictions = [self.tokenizer.tokensToString([x]) for x in kernel['mask_labels'] if x != -100],
))
)
# write masked_corpus before flushing the list
self.dataset[set_name]['file'].append(
path / "{}_{}.{}".format(set_name, iteration, self.file_extension)
)
self.dataset[set_name]['txt'].append(
path / "{}_{}.txt".format(set_name, iteration)
)
self._saveCorpusRecord({
'corpus': masked_corpus,
'file' : path / "{}_{}.{}".format(set_name, iteration, self.file_extension),
'txt' : path / "{}_{}.txt".format(set_name, iteration)
})
pool.close()
except KeyboardInterrupt as e:
pool.terminate()
raise e
except Exception as e:
pool.terminate()
raise e
if distribution:
distribution.plot()
token_monitor.plot()
start_idx_monitor.plot()
idx_monitor.plot()
direction_monitor.plot()
return
def estimatedSize(self, batch_size, sequence_length, max_predictions_per_seq):
"""
Calculate estimated size of single training example as a dictionary.
"""
return (
2 * np.zeros([batch_size, 1], dtype = np.int64).nbytes +
5 * np.zeros([batch_size, sequence_length], dtype = np.int64).nbytes +
2 * np.zeros([batch_size, max_predictions_per_seq], dtype = np.int64).nbytes
)
def LogBatchTelemetry(self,
batch_size: int,
sequence_length: int,
max_predictions_per_seq: int,
steps_per_epoch: int,
num_epochs: int,
) -> None:
"""Log analytics about the batch."""
if steps_per_epoch is not None and num_epochs is not None:
l.logger().info(
"Memory: {} per batch, {} per epoch, {} total.".format(
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size, binary = True),
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size * steps_per_epoch, binary = True),
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size * steps_per_epoch * num_epochs, binary = True),
)
)
else:
l.logger().info(
"Memory: {} per batch.".format(
humanize.naturalsize(self.estimatedSize(1, sequence_length, max_predictions_per_seq) * batch_size, binary = True),
)
)
def _padToMaxPosition(self, input_sample):
"""
Pads a given sequence to the maximum allowed sequence length, which is max_position_embeddings
Arguments:
input_sample: np.array or list that represents a sequence
Returns:
padded sequence in np.array format
"""
return np.concatenate([input_sample,
np.array([self.tokenizer.padToken] *
(self.max_position_embeddings - len(input_sample)), dtype = np.int64)
])
def _addStartEndToken(self, inp: list) -> list:
"""
Inserts [START] and [END] token at the beginnning and end of a sequence
Arguments:
inp: input_sequence
Returns:
[START] + input_sequence + [END]
"""
assert len(inp) != 0, "Empty list provided."
assert self.tokenizer.padToken not in inp, "Use this function before padding a sequence!"
start = [self.tokenizer.startToken] if inp[0] != self.tokenizer.startToken else []
end = [self.tokenizer.endToken ] if inp[-1] != self.tokenizer.endToken else []
if isinstance(inp, np.ndarray):
inp = list(inp)
return start + inp + end
def GetShortSummary(self) -> str:
return (
"Data Generator: "
"\n"
f" dupe_factor: {self.training_opts.dupe_factor}"
"\n"
f" sequence_length: {self.training_opts.sequence_length}"
"\n"
f" batch_size: {self.training_opts.batch_size}"
"\n"
"LM config:"
"\n"
f" {self.config.hole if True else self.config.mask}"
"\n"
)
| 46,191 | 40.689531 | 164 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/from_pretrained.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Instance of pre-trained BenchPress Instances.
In this mode, a checkpoint is fetched online and the model is only used
for interactive sampling.
"""
import os
import typing
import gdown
import shutil
import threading
import pathlib
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.samplers import samples_database
from deeplearning.benchpress.models import language_models
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.proto import benchpress_pb2
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util import logging as l
from absl import app, flags
FLAGS = flags.FLAGS
PRETRAINED_MODELS = {
"base_opencl": {
'config' : "1Cr9I4b5mSZJgX9LqtC_38WRfEDkyJ9WO",
'tokenizer' : "14ZPYFgL-XT_Fknwmgp6nOatvLFS67QM1",
'checkpoint' : "1ncwxzR23_a6IQqt4F4gIgTeduggD_N9w",
}
}
class PreTrainedModel(object):
"""
Pre-trained instance wrapper for online checkpoint fetching
and sampling.
"""
@classmethod
def FromID(cls, name: str = "base_opencl") -> "PreTrainedModel":
if name not in PRETRAINED_MODELS:
raise ValueError("Pre-trained model {} does not exist. Available models: {}".format(name, ', '.join([x for x in PRETRAINED_MODELS.keys()])))
tdir = "/tmp/"
if FLAGS.local_filesystem:
tdir = FLAGS.local_filesystem
config_path = pathlib.Path(tdir) / "from_pretrained" / name/ "config.pbtxt"
tokenizer_path = pathlib.Path(tdir) / "from_pretrained" / name/ "tokenizer.pkl"
checkpoint_path = pathlib.Path(tdir) / "from_pretrained" / name/ "model-0.pt"
if environment.WORLD_RANK == 0:
config_path.parent.mkdir(exist_ok = True, parents = True)
if not config_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['config']), str(config_path))
if not tokenizer_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['tokenizer']), str(tokenizer_path))
if not checkpoint_path.exists():
gdown.download("https://drive.google.com/uc?id={}".format(PRETRAINED_MODELS[name]['checkpoint']), str(checkpoint_path))
model_config = pbutil.FromFile(config_path, benchpress_pb2.Instance()).language_model
os.environ["PWD"] = str(config_path.parent)
FLAGS.override_preprocessing = True
FLAGS.override_encoding = True
return PreTrainedModel(model_config, tokenizer_path, checkpoint_path)
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.language_model.tokenizer
def __init__(self,
config : model_pb2.Model,
tokenizer_path : tokenizers.TokenizerBase,
checkpoint : pathlib.Path,
):
"""
Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
self.language_model = language_models.Model(config)
if environment.WORLD_RANK == 0:
if not self.language_model.corpus.tokenizer_path.exists():
shutil.copyfile(tokenizer_path, self.language_model.corpus.tokenizer_path)
if not (self.language_model.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(tokenizer_path, self.language_model.cache.path / "checkpoints" / "backup_tokenizer.pkl")
if not (self.language_model.cache.path / "checkpoints" / "model-0.pt").exists():
shutil.copyfile(checkpoint, self.language_model.cache.path / "checkpoints" / "model-0.pt")
if not (self.language_model.cache.path / "checkpoints" / "checkpoint.meta").exists():
with open(self.language_model.cache.path / "checkpoints" / "checkpoint.meta", 'w') as outf:
outf.write("train_step: 0")
if environment.WORLD_SIZE > 1:
distrib.barrier()
if pytorch.num_gpus == 0:
l.logger().warn("No GPUs detected. This process is going to be *very* slow on the CPU.")
return
def Sample(self,
prompt: str,
batch_size: int = 1,
temperature: float = 0.6,
sample_workload_size: int = 1,
sample_indices_limit: int = None,
print_samples: bool = True,
seed: int = None,
) -> typing.Tuple[str, samples_database.Sample]:
"""
Get a string input, tokenize and sample the backend online for a full code.
Args:
prompt:
String input to the language model.
batch_size:
Batch size for model inference.
temperature:
Sampling temperature
sample_workload_size:
How many batches to generate.
sample_indices_limit:
Add a limit to how many tokens BenchPress will generate for a hole.
By default BenchPress generates tokens until it thinks a sequence is complete
([ENDHOLE] is generated). By setting this value, generation loop will be killed
after surpassing this threshold.
"""
FLAGS.sample_workload_size = sample_workload_size
if sample_indices_limit is not None:
FLAGS.sample_indices_limit = sample_indices_limit
self.language_model.Create()
if "[START]" in prompt or "[END]" in prompt:
l.logger().error("Do not add [START] and [END] manually. They will be added automatically by the tokenizer.")
return ""
prompt = "[START]" + prompt + "[END]"
test_sampler = self.getTestSampler(prompt, batch_size, temperature, self.language_model.config.architecture.max_position_embeddings)
obs = [sample_observers.InMemorySampleSaver()]
if print_samples:
obs.append(sample_observers.PrintSampleObserver())
self.language_model.Sample(test_sampler, obs, num_batches = 1, seed = seed)
return [opencl.ClangFormat(x.text) for x in obs[0].samples], obs[0].samples
def getTestSampler(self,
prompt : str,
batch_size : int,
temperature : float,
sequence_length : int
) -> samplers.Sampler:
sampler_str = [
"start_text: \"{}\"".format(prompt),
"batch_size: {}".format(batch_size),
"sequence_length: {}".format(sequence_length),
"temperature_micros: {}".format(int(temperature * 10e6)),
]
mock_config = pbutil.FromString('\n'.join(sampler_str), sampler_pb2.Sampler())
sampler = samplers.Sampler(mock_config, sample_db_name = None)
if sampler.isFixedStr:
sampler.Specialize(self.tokenizer)
return sampler
def main(*args, **kwargs) -> None:
return
def boot() -> None:
app.run(main)
return
th = threading.Thread(target = boot)
th.setDaemon = True
th.start()
| 7,766 | 38.426396 | 146 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/language_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The BenchPress language model."""
import os
import time
import shutil
import pathlib
import typing
import datetime
import humanize
import numpy as np
from deeplearning.benchpress.samplers import sample_observers as sample_observers_lib
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import hidden_state
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.models import builders
from deeplearning.benchpress.models.keras_sequential import keras_sequential
from deeplearning.benchpress.models.tf_sequential import tf_sequential
from deeplearning.benchpress.models.tf_bert import tf_bert
from deeplearning.benchpress.models.torch_bert import torch_bert
from deeplearning.benchpress.models.incoder import incoder
from deeplearning.benchpress.proto import internal_pb2
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.preprocessors import opencl
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"num_train_steps",
None,
"Bypass num_train_steps provided by protobuf file."
)
flags.DEFINE_integer(
"num_pretrain_steps",
None,
"Bypass num_pretrain_steps provided by protobuf file."
)
flags.DEFINE_integer(
"num_epochs",
None,
"Bypass num_epochs provided by protobuf file."
)
flags.DEFINE_integer(
"sample_workload_size",
2048,
"Select size of workload samples for single sample step, per node."
)
class Model(object):
"""A BenchPress language model.
Please note model instances should be treated as immutable. Upon
instantiation, a model's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
@property
def tokenizer(self) -> tokenizers.TokenizerBase:
return self.corpus.tokenizer
@property
def is_trained(self) -> bool:
return self.backend.is_trained
@property
def hidden_state_size(self) -> int:
return self.backend.hidden_state_size
def __init__(self, config: model_pb2.Model):
"""Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, model_pb2.Model):
t = type(config).__name__
raise TypeError(f"Config must be a Model proto. Received: '{t}'")
self.config = model_pb2.Model()
# Validate config options.
self.config.CopyFrom(builders.AssertIsBuildable(config))
if FLAGS.num_train_steps:
self.config.training.num_train_steps = FLAGS.num_train_steps
if FLAGS.num_pretrain_steps:
self.config.training.num_pretrain_steps = FLAGS.num_pretrain_steps
if FLAGS.num_epochs:
self.config.training.num_epochs = FLAGS.num_epochs
# Initialize corpuses
self.corpus = corpuses.Corpus(config.corpus)
self.pre_train_corpus = None
if config.HasField("pre_train_corpus"):
self.pre_train_corpus = corpuses.Corpus(config.pre_train_corpus)
self.hash = self._ComputeHash(self.pre_train_corpus, self.corpus, self.config)
self._created = False
if environment.WORLD_RANK == 0:
self.cache = cache.mkcache("model", self.hash)
self.cache.path.mkdir(exist_ok = True, parents = True)
else:
while not cache.cachepath("model", self.hash).exists():
time.sleep(0.5)
self.cache = cache.mkcache("model", self.hash)
if environment.WORLD_RANK == 0:
# Create the necessary cache directories.
(self.cache.path / "checkpoints").mkdir(exist_ok=True)
(self.cache.path / "samples").mkdir(exist_ok=True)
# Create symlink to encoded corpus.
symlink = self.cache.path / "corpus"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.corpus.encoded.url[len("sqlite:///") :]).parent,
self.cache.path,
),
symlink,
)
if self.pre_train_corpus:
symlink = self.cache.path / "pre_train_corpus"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(
pathlib.Path(self.pre_train_corpus.encoded.url[len("sqlite:///") :]).parent,
self.cache.path,
),
symlink,
)
# Create symlink to the tokenizer and create a backup inside checkpoints.
symlink = self.cache.path / "tokenizer"
if not symlink.is_symlink():
os.symlink(
os.path.relpath(self.corpus.tokenizer_path, self.cache.path), symlink
)
if (self.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(self.cache.path / "checkpoints" / "backup_tokenizer.pkl", self.corpus.tokenizer_path)
# Validate metadata against cache.
if self.cache.get("META.pbtxt"):
cached_meta = pbutil.FromFile(
pathlib.Path(self.cache["META.pbtxt"]), internal_pb2.ModelMeta()
)
# Exclude num_epochs and corpus location from metadata comparison.
config_to_compare = model_pb2.Model()
config_to_compare.CopyFrom(self.config)
config_to_compare.corpus.ClearField("contentfiles")
if config_to_compare.HasField("pre_train_corpus"):
config_to_compare.pre_train_corpus.ClearField("contentfiles")
config_to_compare.training.ClearField("num_epochs")
config_to_compare.training.ClearField("num_train_steps")
if config_to_compare.HasField("pre_train_corpus"):
config_to_compare.training.ClearField("num_pretrain_steps")
config_to_compare.training.ClearField("batch_size")
if config_to_compare.training.HasField("data_generator"):
config_to_compare.training.data_generator.ClearField("steps_per_epoch")
config_to_compare.training.data_generator.ClearField("validation_set")
# These fields should have already been cleared, but we'll do it again
# so that metadata comparisons don't fail when the cached meta schema
# is updated.
cached_to_compare = model_pb2.Model()
cached_to_compare.CopyFrom(cached_meta.config)
cached_to_compare.corpus.ClearField("contentfiles")
if cached_to_compare.HasField("pre_train_corpus"):
cached_to_compare.pre_train_corpus.ClearField("contentfiles")
cached_to_compare.training.ClearField("num_epochs")
cached_to_compare.training.ClearField("num_train_steps")
if cached_to_compare.HasField("pre_train_corpus"):
cached_to_compare.training.ClearField("num_pretrain_steps")
cached_to_compare.training.ClearField("batch_size")
if cached_to_compare.training.HasField("data_generator"):
cached_to_compare.training.data_generator.ClearField("steps_per_epoch")
cached_to_compare.training.data_generator.ClearField("validation_set")
if cached_to_compare.training.sequence_length != config_to_compare.training.sequence_length:
l.logger().warning("Mismatch between pre-trained and current config sequence_length!\
This can only be intended in BERT model!")
cached_to_compare.training.ClearField("sequence_length")
config_to_compare.training.ClearField("sequence_length")
if config_to_compare != cached_to_compare:
raise SystemError("Metadata mismatch: {} \n\n {}".format(config_to_compare, cached_to_compare))
self.meta = cached_meta
else:
self.meta = internal_pb2.ModelMeta()
self.meta.config.CopyFrom(self.config)
self._WriteMetafile()
## Store current commit
commit.saveCommit(self.cache.path)
self.backend = {
model_pb2.NetworkArchitecture.TENSORFLOW_SEQ: tf_sequential.tfSequential,
model_pb2.NetworkArchitecture.KERAS_SEQ: keras_sequential.kerasSequential,
model_pb2.NetworkArchitecture.TENSORFLOW_BERT: tf_bert.tfBert,
model_pb2.NetworkArchitecture.TORCH_BERT: torch_bert.torchBert,
model_pb2.NetworkArchitecture.INCODER_1B: incoder.Incoder1B,
model_pb2.NetworkArchitecture.INCODER_6B: incoder.Incoder6B,
}[config.architecture.backend](self.config, self.cache, self.hash)
hidden_state.setup_lm(self.backend)
l.logger().info("Initialized {} in {}".format(self.backend, self.cache.path))
return
def GetShortSummary(self) -> str:
return self.backend.GetShortSummary()
@staticmethod
def _ComputeHash(pre_train_corpus_ : corpuses.Corpus,
corpus_ : corpuses.Corpus,
config : model_pb2.Model,
) -> str:
"""Compute model hash.
The hash is computed from the ID of the corpus and the serialized
representation of the config proto. The number of epochs that the model is
trained for does not affect the hash, since we can share checkpoints
between different models if the only variable is the epoch count. E.g.
we have a model trained for 10 epochs, we can use the checkpoint as the
starting point for a training a model for 20 epochs.
Args:
corpus: A corpus instance.
config: A Model config proto.
Returns:
The unique model ID.
"""
config_to_hash = model_pb2.Model()
config_to_hash.CopyFrom(config)
config_to_hash.ClearField("pre_train_corpus")
config_to_hash.ClearField("corpus")
config_to_hash.training.ClearField("num_epochs")
config_to_hash.training.ClearField("num_train_steps")
config_to_hash.training.ClearField("batch_size")
if config_to_hash.training.HasField("data_generator"):
config_to_hash.training.data_generator.ClearField("steps_per_epoch")
config_to_hash.training.data_generator.ClearField("validation_set")
if pre_train_corpus_:
hash_list = [pre_train_corpus_.hash, corpus_.hash, config_to_hash.SerializeToString()]
else:
hash_list = [corpus_.hash, config_to_hash.SerializeToString()]
if FLAGS.custom_incoder_ckpt is not None:
hash_list.append(FLAGS.custom_incoder_ckpt)
return crypto.sha1_list(hash_list)
def Create(self) -> bool:
if self._created:
return False
self._created = True
self.corpus.Create()
if self.pre_train_corpus:
self.pre_train_corpus.Create(self.corpus.tokenizer)
if not (self.cache.path / "checkpoints" / "backup_tokenizer.pkl").exists():
shutil.copyfile(self.corpus.tokenizer_path, self.cache.path / "checkpoints" / "backup_tokenizer.pkl")
self.backend.Create(tokenizer = self.corpus.tokenizer)
return
def PreTrain(self, **kwargs) -> "Model":
"""
Pre-Train the model. Only supported for PyTorch BERT.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
self.Create()
self.backend.PreTrain(self.pre_train_corpus, **kwargs)
pre_telemetry_logs = self.backend.pre_telemetry.EpochTelemetry()
l.logger().info(
"Pre-trained model for {} {} in {} ms. " "Training loss: {}."
.format(
pre_telemetry_logs[-1].epoch_num,
"steps" if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) else "epochs",
humanize.intcomma(sum(t.epoch_wall_time_ms for t in pre_telemetry_logs)),
pre_telemetry_logs[-1].loss,
)
)
return self
def Train(self, **kwargs) -> "Model":
"""Train the model.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
self.Create()
self.backend.Train(self.corpus, **kwargs)
telemetry_logs = self.backend.telemetry.EpochTelemetry()
l.logger().info(
"Trained model for {} {} in {} ms. " "Training loss: {}."
.format(
telemetry_logs[-1].epoch_num if FLAGS.select_checkpoint_step == -1 else telemetry_logs[FLAGS.select_checkpoint_step-1].epoch_num,
"steps" if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) else "epochs",
humanize.intcomma(sum(t.epoch_wall_time_ms for t in telemetry_logs)),
telemetry_logs[-1].loss if FLAGS.select_checkpoint_step == -1 else telemetry_logs[FLAGS.select_checkpoint_step-1].loss,
)
)
return self
def Sample(
self,
sampler: 'samplers.Sampler',
sample_observers: typing.List[sample_observers_lib.SampleObserver],
seed: int = None,
num_batches: int = None,
) -> None:
"""Sample a model.
This method uses the observer model, returning nothing. To access the
samples produced, implement a SampleObserver and pass it in as an argument.
Sampling continues indefinitely until one of the sample observers returns
False when notified of a new sample.
If the model is not already trained, calling Sample() first trains the
model. Thus a call to Sample() is equivalent to calling Train() then
Sample().
Args:
sampler: The sampler to sample using.
sample_observers: A list of SampleObserver objects that are notified of
new generated samples.
seed: A numeric value to seed the RNG with. If not present, the RNG is
seeded randomly.
Raises:
UserError: If called with no sample observers.
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
InvalidStartText: If the sampler start text cannot be encoded.
InvalidSymtokTokens: If the sampler symmetrical depth tokens cannot be
encoded.
"""
if not sample_observers:
raise ValueError("Cannot sample without any observers")
self.Create()
sampler.Create()
epoch = self.backend.telemetry.EpochTelemetry()[-1].epoch_num
sample_start_time = datetime.datetime.utcnow()
if environment.WORLD_RANK == 0:
(self.cache.path / "samples" / sampler.hash).mkdir(exist_ok = True)
tokenizer = self.corpus.tokenizer
if sampler.isFixedStr and not sampler.is_active:
sampler.Specialize(tokenizer)
elif sampler.is_live:
start_text = [str(input("Live Feed: "))]
while True:
try:
start_text.append(str(input()))
except EOFError:
break
sampler.start_text = '\n'.join(start_text)
sampler.Specialize(tokenizer)
self.backend.InitSampling(sampler, seed, self.corpus)
[obs.Specialize(self, sampler) for obs in sample_observers]
if isinstance(self.backend, tf_bert.tfBert) or isinstance(self.backend, torch_bert.torchBert) or isinstance(self.backend, incoder.Incoder):
sample_batch = lambda : self._SampleLMBatch(sampler, tokenizer, sample_observers, epoch)
elif isinstance(self.backend, tf_sequential.tfSequential) or isinstance(self.backend, keras_sequential.kerasSequential):
sample_batch = lambda : self._SampleSeqBatch(sampler, tokenizer, sample_observers, epoch)
else:
raise ValueError("Unrecognized backend.")
try:
seq_count, cont, compiled = 0, True, 0
nb = 0
while cont:
if num_batches and nb >= num_batches:
break
nb+=1
cont, s, c = sample_batch()
seq_count += s
compiled += c
if sampler.is_live:
start_text = [str(input("Live Feed: "))]
while True:
try:
start_text.append(str(input()))
except EOFError:
break
sampler.start_text = '\n'.join(start_text)
sampler.Specialize(tokenizer)
except KeyboardInterrupt:
l.logger().info("Wrapping up sampling...")
except Exception as e:
raise e
if environment.WORLD_RANK == 0:
for obs in sample_observers:
obs.endSample()
if isinstance(self.backend, torch_bert.torchBert) and sampler.is_active:
self.backend.sample.data_generator.samples_cache_obs.endSample()
time_now = datetime.datetime.utcnow()
l.logger().info( "Produced {} samples at a rate of {} ms / sample. Session's compilation rate was {}%"
.format(
humanize.intcomma(seq_count),
humanize.intcomma(int(1000 * ((time_now - sample_start_time) / max(seq_count, 1)).total_seconds())),
round(100 * ((compiled / seq_count if seq_count > 0 else 0)), 3),
)
)
return
def _SampleLMBatch(self,
sampler: 'samplers.Sampler',
tokenizer: tokenizers.TokenizerBase,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
epoch: int,
) -> bool:
"""
Run a sampling iteration over BERT models.
"""
start_time = datetime.datetime.utcnow()
seq_count = 0
compiled = 0
self.backend.InitSampleBatch(sampler, workload_size = FLAGS.sample_workload_size // environment.WORLD_SIZE)
try:
org_inputs, input_ids, samples, indices = self.backend.SampleNextIndices(sampler)
except StopIteration:
return False, seq_count, compiled
if not samples:
# Return empty means model has not produced something that can be stored.
# This 'if' accommodates active sampling, which is very selective.
return True, seq_count, compiled
continue_sampling = True
if environment.WORLD_RANK == 0:
assert len(org_inputs) == len(input_ids) == len(samples) == len(indices), "Length mismatch, {}-{}-{}-{}".format(len(org_inputs), len(input_ids), len(samples), len(indices))
for org, inp, sample, idxs in zip(org_inputs, input_ids, samples, indices):
src = self.tokenizer.ArrayToCode(sample, with_formatting = True)
try:
stdout = opencl.Compile(src)
compile_flag = True
compiled += 1
features = extractor.ExtractRawFeatures(src)
except ValueError:
compile_flag = False
features = ""
end_time = datetime.datetime.utcnow()
sample = model_pb2.Sample(
train_step = epoch,
text = src,
sample_indices = ','.join([self.tokenizer.decoder[idx].replace('\n', '\\n') for idx in idxs]).replace('\n', '\\n'),
encoded_sample_indices = ','.join([str(idx) for idx in idxs]),
original_input = self.tokenizer.tokensToString(org, with_formatting = False, ignore_token = self.tokenizer.padToken),
sample_feed = self.tokenizer.tokensToString(inp, with_formatting = False, ignore_token = self.tokenizer.padToken),
encoded_text = ",".join([str(x) for x in sample]),
sample_start_epoch_ms_utc = int(start_time.strftime("%s%f")),
sample_time_ms = int(round(1000 * ((end_time - start_time) / len(samples)).total_seconds())),
wall_time_ms = int(round(1000 * ((end_time - start_time) / len(samples)).total_seconds())),
feature_vector = features,
num_tokens = np.where(sample == self.tokenizer.padToken)[0][0] if self.tokenizer.padToken in sample else len(sample),
compile_status = compile_flag,
categorical_sampling = self.backend.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
# Notify sample observers.
continue_sampling &= all(
[obs.OnSample(sample) for obs in sample_observers]
)
seq_count += 1
if environment.WORLD_SIZE > 1:
_ = distrib.broadcast(str(continue_sampling))
else:
status = distrib.broadcast()
if status == "True":
continue_sampling = True
elif status == "False":
continue_sampling = False
else:
raise OSError("Broken distributed message: '{}'".format(status))
return continue_sampling, seq_count, compiled
def _SampleSeqBatch(
self,
sampler: 'samplers.Sampler',
tokenizer: tokenizers.TokenizerBase,
sample_observers: typing.List[sample_observers_lib.SampleObserver],
epoch: int,
) -> bool:
"""
Run a single iteration of the batched sample inner-loop for sequential models.
"""
start_time = datetime.datetime.utcnow()
self.backend.InitSampleBatch(sampler)
samples_in_progress = [
sampler.tokenized_start_text.copy() for _ in range(sampler.batch_size)
]
done = np.zeros(sampler.batch_size, dtype=np.bool)
wall_time_start = start_time
seq_count = 0
compiled = 0
# The return value of this method. If any of the sample_observers return
# False, this value is set to False.
continue_sampling = True
# Sampling loop. Continues until all samples in the batch are done.
while not done.all():
indices, _ = self.backend.SampleNextIndices(sampler, done)
# Iterate over all samples in batch to determine whether they're
# done.
for i in range(len(indices)):
if done[i]:
continue
for index in indices[i]:
samples_in_progress[i].append(tokenizer.decoder[index])
if sampler.SampleIsComplete(samples_in_progress[i]):
end_time = datetime.datetime.utcnow()
sample_kernel = [x for x in samples_in_progress[i]]
features = extractor.ExtractRawFeatures(''.join(samples_in_progress[i]))
done[i] = 1
try:
stdout = opencl.Compile(''.join(samples_in_progress[i]))
compile_flag = True
compiled += 1
except ValueError:
compile_flag = False
sample = model_pb2.Sample(
train_step = epoch,
text = ''.join(samples_in_progress[i]),
sample_indices = "",
encoded_sample_indices = "",
sample_feed = sampler.start_text,
encoded_text = ",".join([str(tokenizer.vocab[x]) for x in sample_kernel]),
sample_start_epoch_ms_utc = int(start_time.strftime("%s%f")),
sample_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
wall_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
feature_vector = features,
num_tokens = len(samples_in_progress[i]),
compile_status = compile_flag,
categorical_sampling = self.backend.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
# Notify sample observers.
continue_sampling &= all(
[obs.OnSample(sample) for obs in sample_observers]
)
if sampler.is_live and self.backend.feature_encoder:
print(sample.feature_vector)
seq_count += 1
# Wall sample time is the difference between the end of the previous
# sample and the end of the current sample.
wall_time_start = datetime.datetime.utcnow()
break
return continue_sampling, seq_count, compiled
def EncodeInputs(self, src: typing.List[str]) -> np.array:
"""
According to each LM's rules, encode a list of source codes to encoded arrays
ready to be fed into the model.
Args:
src: List of source codes.
Returns:
A list of encoded numpy arrays.
"""
return self.backend.EncodeInputs(src)
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""
Extract hidden state from backend language model.
Args:
input_ids: A list of input ids that will be provided to the LM.
Returns:
The hidden state of the provided inputs.
"""
return self.backend.ExtractHidden(encoded)
def SamplerCache(self, sampler: 'samplers.Sampler') -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache.path / "samples" / sampler.hash
def _WriteMetafile(self) -> None:
pbutil.ToFile(self.meta, pathlib.Path(self.cache.keypath("META.pbtxt")))
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
return sorted(
[self.cache.path / "tokenizer", self.cache.path / "META.pbtxt",]
+ self.backend.InferenceManifest()
)
def __repr__(self) -> str:
"""String representation."""
return f"model[{self.hash}]"
def __eq__(self, rhs) -> bool:
if not isinstance(rhs, Model):
return False
return rhs.hash == self.hash
def __ne__(self, rhs) -> bool:
return not self.__eq__(rhs)
| 26,387 | 38.621622 | 178 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/backends.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas and Chris Cummins.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network backends for language models."""
import typing
import numpy as np
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import cache
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
class BackendBase(object):
"""The base class for a language model backend.
A language model backend encapsulates all of the neural network logic.
"""
def __init__(
self,
config: model_pb2.Model,
fs_cache: cache.FSCache,
hash: str,
tokenizer: tokenizers.TokenizerBase = None,
**kwargs,
):
self.config = config
self.cache = fs_cache
self.hash = hash
self.tokenizer = tokenizer
## Legacy function to support lazy creation of corpus
def Create(self, tokenizer: tokenizers.TokenizerBase) -> None:
self.tokenizer = tokenizer
def PreTrain(self, corpus: 'corpuses.Corpus', **extra_kwargs) -> None:
"""Pre-train the backend"""
raise NotImplementedError("pre-training is only supported in PyTorch BERT.")
def Train(self, corpus: 'corpuses.Corpus', **extra_kwargs) -> None:
"""Train the backend."""
raise NotImplementedError("Abstract Class.")
def TrainBatch(self, batch) -> None:
"""Incrementally train language model on a batch of data."""
raise NotImplementedError("Abstract Class.")
def InitSampling(
self, sampler: 'samplers.Sampler', seed: typing.Optional[int] = None
) -> None:
"""Initialize backend for sampling."""
raise NotImplementedError("Abstract Class.")
def InitSampleBatch(self, sampler: 'samplers.Sampler') -> None:
"""Begin a new sampling batch. Only called after InitSampling()."""
raise NotImplementedError("Abstract Class.")
def SampleNextIndices(
self, sampler: 'samplers.Sampler', done: np.ndarray, tokenizer = None
) -> np.ndarray:
"""Sample the next indices for the current sample batch.
Returns:
A numpy array of int32 values with shape (batch_size,).
"""
raise NotImplementedError("Abstract Class.")
def SampleBatch(self, batch) -> np.ndarray:
"""Specifically sample a requested batch of data."""
raise NotImplementedError("Abstract Class.")
def EncodeInputs(self, src: typing.List[str]) -> np.array:
"""Encode text inputs to numpy arrays."""
raise NotImplementedError("Abstract Class.")
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""Extract Hidden State from Language Model"""
raise NotImplementedError("Abstract Class")
def GetEncoderModule(self, **kwargs) -> torch.nn.Module:
"""Return the internal torch module of an architecture."""
raise NotImplementedError("Abstract class")
def GetDecoderModule(self, **kwargs) -> torch.nn.Module:
"""Return a decoder version of LM's decoder."""
raise NotImplementedError("Abstract class")
| 3,511 | 34.12 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_sequential/tf_sequential.py | # Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""BenchPress models using a Keras backend."""
import copy
import os
import pathlib
import time
import typing
import humanize
from deeplearning.benchpress.util import logging as l
import numpy as np
import progressbar
import tensorflow_addons as tfa
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.util import tf as local_tf
from deeplearning.benchpress.models.tf_sequential.data_generator import TensorflowBatchGenerator
from absl import flags
FLAGS = flags.FLAGS
tf = local_tf.tf
flags.DEFINE_boolean(
"clgen_tf_backend_reset_inference_state_between_batches",
False,
"If set, reset the network state between sample batches. Else, the model "
"state is unaffected.",
)
flags.DEFINE_integer(
"clgen_tf_backend_tensorboard_summary_step_count",
25,
"The number of steps between writing tensorboard summaries.",
)
flags.DEFINE_integer(
"clgen_per_epoch_test_samples",
16,
"The number of samples to make at the end of each training epoch.",
)
class tfSequential(backends.BackendBase):
"""A model with an embedding layer, using a keras backend."""
@property
def hidden_state_size(self) -> int:
return self.config.architecture.neurons_per_layer
def __init__(self, *args, **kwargs):
"""Instantiate a model.
Args:
args: Arguments to be passed to BackendBase.__init__().
kwargs: Arguments to be passed to BackendBase.__init__().
"""
super(tfSequential, self).__init__(*args, **kwargs)
local_tf.initTensorflow()
# Attributes that will be lazily set.
self.cell = None
self.input_data = None
self.targets = None
self.lengths = None
self.seed_length = None
self.temperature = None
self.initial_state = None
self.logits = None
self.generated = None
self.loss = None
self.final_state = None
self.learning_rate = None
self.epoch = None
self.train_op = None
self.data_generator = None
self.inference_tf = None
self.inference_sess = None
self.inference_indices = None
self.inference_state = None
# Create the summary writer, shared between Train() and
# _EndOfEpochTestSample().
tf.compat.v1.disable_eager_execution()
tensorboard_dir = f"{self.cache.path}/tensorboard"
l.logger().info(
"Using tensorboard to log training progress. View progress using:\n"
f" $ tensorboard --logdir='{tensorboard_dir}'",
)
self.summary_writer = tf.compat.v1.summary.FileWriter(tensorboard_dir)
def samplesWithCategorical(self):
return True
def InitTfGraph(
self, sampler: typing.Optional[samplers.Sampler] = None
) -> "tf":
"""Instantiate a TensorFlow graph for training or inference.
The tensorflow graph is different for training and inference, so must be
reset when switching between modes.
Args:
sampler: If set, initialize the model for inference using the given
sampler. If not set, initialize model for training.
Returns:
The imported TensorFlow module.
"""
start_time = time.time()
# Quiet tensorflow.
# See: https://github.com/tensorflow/tensorflow/issues/1258
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# Deferred importing of TensorFlow.
tf.compat.v1.disable_eager_execution()
from deeplearning.benchpress.models.tf_sequential import helper
cell_type = {
model_pb2.NetworkArchitecture.LSTM: tf.compat.v1.nn.rnn_cell.LSTMCell,
model_pb2.NetworkArchitecture.GRU: tf.compat.v1.nn.rnn_cell.GRUCell,
model_pb2.NetworkArchitecture.RNN: tf.compat.v1.nn.rnn_cell.BasicRNNCell,
}.get(self.config.architecture.neuron_type, None)
if cell_type is None:
raise NotImplementedError
# Reset the graph when switching between training and inference.
tf.compat.v1.reset_default_graph()
if sampler:
sequence_length = sampler.sequence_length
batch_size = sampler.batch_size
else:
sequence_length = self.config.training.sequence_length
batch_size = self.config.training.batch_size
vocab_size = self.tokenizer.vocab_size
cells_lst = []
for _ in range(self.config.architecture.num_layers):
cells_lst.append(cell_type(self.config.architecture.neurons_per_layer))
self.cell = cell = tf.keras.layers.StackedRNNCells(cells_lst)
self.input_data = tf.compat.v1.placeholder(
tf.int32, [batch_size, sequence_length]
)
self.targets = tf.compat.v1.placeholder(
tf.int32, [batch_size, sequence_length]
)
self.initial_state = self.cell.get_initial_state(batch_size = batch_size, dtype = tf.float32)
self.temperature = tf.Variable(1.0, trainable=False)
self.seed_length = tf.compat.v1.placeholder(name = "seed_length", dtype = tf.int32, shape = ())
if sampler:
self.lengths = tf.compat.v1.placeholder(tf.int32, [batch_size])
else:
self.lengths = tf.fill([batch_size], sequence_length)
scope_name = "rnnlm"
with tf.compat.v1.variable_scope(scope_name):
with tf.device("/cpu:0"):
embedding = tf.compat.v1.get_variable(
"embedding", [vocab_size, self.config.architecture.neurons_per_layer]
)
inputs = tf.nn.embedding_lookup(embedding, self.input_data)
if sampler:
decode_helper = helper.CustomInferenceHelper(
self.seed_length, embedding, self.temperature
)
else:
decode_helper = tfa.seq2seq.sampler.TrainingSampler(time_major=False)
decoder = tfa.seq2seq.BasicDecoder(
cell,
decode_helper,
tf.compat.v1.layers.Dense(vocab_size),
dtype = tf.float32,
)
outputs, self.final_state, _ = tfa.seq2seq.dynamic_decode(
decoder,
decoder_init_input = inputs,
decoder_init_kwargs = {
'initial_state': self.initial_state,
'sequence_length': self.lengths,
},
output_time_major=False,
impute_finished=True,
swap_memory=True,
scope=scope_name,
)
self.generated = outputs.sample_id
self.logits = outputs.rnn_output
sequence_weigths = tf.ones([batch_size, sequence_length])
self.loss = tfa.seq2seq.sequence_loss(
self.logits, self.targets, sequence_weigths
)
self.learning_rate = tf.Variable(0.0, trainable=False)
self.epoch = tf.Variable(0, trainable=False)
trainable_variables = tf.compat.v1.trainable_variables()
# TODO(cec): Support non-adam optimizers.
grads, _ = tf.clip_by_global_norm(
tf.gradients(self.loss, trainable_variables, aggregation_method=2),
self.config.training.adam_optimizer.normalized_gradient_clip_micros / 1e6,
)
optimizer = tf.compat.v1.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(zip(grads, trainable_variables))
if not sampler:
# Create tensorboard summary writers for training progress.
tf.compat.v1.summary.scalar("loss", self.loss)
tf.compat.v1.summary.scalar("learning_rate", self.learning_rate)
tf.compat.v1.summary.scalar("epoch_num", self.epoch)
num_trainable_params = int(
np.sum([np.prod(v.shape) for v in tf.compat.v1.trainable_variables()])
)
l.logger().info(
"Instantiated TensorFlow graph with {} trainable parameters " "in {} ms."
.format(
humanize.intcomma(num_trainable_params),
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
return tf
def GetShortSummary(self) -> str:
return (
f"{self.config.architecture.neurons_per_layer}×"
f"{self.config.architecture.num_layers} "
f"{model_pb2.NetworkArchitecture.NeuronType.Name(self.config.architecture.neuron_type)} "
"network"
)
@property
def epoch_checkpoints(self) -> typing.Set[int]:
"""Get the set of epoch numbers which we have trained models for.
Note that Tensorflow checkpoint paths don't translate to actual files, but
rather a pair of <.index,.meta> files.
Returns:
A mapping of epoch numbers to paths.
"""
if not (self.cache.path / "checkpoints" / "checkpoints"):
# No saver file means no checkpoints.
return {}
# Count the number of checkpoint files which TensorFlow has created.
checkpoint_files = [
f.stem
for f in (self.cache.path / "checkpoints").iterdir()
if f.name.startswith("checkpoint-") and f.name.endswith(".meta")
]
# The checkpoint paths are appended with the epoch number.
epoch_nums = [int(x.split("-")[-1]) for x in checkpoint_files]
return set(epoch_nums)
def GetParamsPath(
self, checkpoint_state
) -> typing.Tuple[typing.Optional[str], typing.List[str]]:
"""Return path to checkpoint closest to target num of epochs."""
# Checkpoints are saved with relative path, so we must prepend cache paths.
paths = [
str(self.cache.path / "checkpoints" / p)
for p in checkpoint_state.all_model_checkpoint_paths
]
# The checkpoint paths are appended with the epoch number.
epoch_nums = [int(x.split("-")[-1]) for x in paths]
diffs = [self.config.training.num_epochs - e for e in epoch_nums]
pairs = zip(paths, diffs)
positive_only = [p for p in pairs if p[1] >= 0]
return min(positive_only, key=lambda x: x[1])[0], paths
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
# The TensorFlow save file.
paths = [
self.cache.path / "checkpoints" / "checkpoint",
]
# Export only the TensorFlow checkpoint files for the target number of
# epochs.
paths += [
path.absolute()
for path in (self.cache.path / "checkpoints").iterdir()
if path.name.startswith(f"checkpoint-{self.config.training.num_epochs}")
]
# Include the epoch telemetry. This is not strictly required, but the files
# are small and contain useful information for describing the model, such as
# the total training time and model loss.
paths += [
path.absolute()
for path in (self.cache.path / "logs").iterdir()
if (
path.name.startswith("epoch_")
and path.name.endswith("_telemetry.pbtxt")
)
]
return sorted(paths)
def Train(
self,
corpus,
test_sampler: typing.Optional[samplers.Sampler] = None,
**unused_kwargs,
) -> None:
"""Locked training.
If there are cached epoch checkpoints, the one closest to the target number
of epochs will be loaded, and the model will be trained for only the
remaining number of epochs, if any. This means that calling this function
twice will only actually train the model the first time, and all subsequent
calls will be no-ops.
This method must only be called when the model is locked.
"""
del unused_kwargs
self.num_epochs = self.config.training.num_epochs
self.telemetry = telemetry.TrainingLogger(self.cache.path / "logs")
if self.is_trained:
return
if self.data_generator is None:
self.data_generator = TensorflowBatchGenerator(
corpus, self.config.training
)
tf = self.InitTfGraph()
# Create and merge the tensorboard summary ops.
merged = tf.compat.v1.summary.merge_all()
# training options
# TODO(cec): Enable support for multiple optimizers:
initial_learning_rate = (
self.config.training.adam_optimizer.initial_learning_rate_micros / 1e6
)
decay_rate = (
self.config.training.adam_optimizer.learning_rate_decay_per_epoch_micros
/ 1e6
)
# # resume from prior checkpoint
ckpt_path, ckpt_paths = None, None
if (self.cache.path / "checkpoints" / "checkpoint").exists():
checkpoint_state = tf.train.get_checkpoint_state(
self.cache.path / "checkpoints",
)
assert checkpoint_state
assert checkpoint_state.model_checkpoint_path
ckpt_path, ckpt_paths = self.GetParamsPath(checkpoint_state)
with tf.compat.v1.Session() as sess:
tf.compat.v1.global_variables_initializer().run()
# Keep all checkpoints.
saver = tf.compat.v1.train.Saver(
tf.compat.v1.global_variables(), max_to_keep=100, save_relative_paths=True
)
# restore model from closest checkpoint.
if ckpt_path:
l.logger().info("Restoring checkpoint {}".format(ckpt_path))
saver.restore(sess, ckpt_path)
# make sure we don't lose track of other checkpoints
if ckpt_paths:
saver.recover_last_checkpoints(ckpt_paths)
# Offset epoch counts by 1 so that they are in the range [1..n]
current_epoch = sess.run(self.epoch) + 1
max_epoch = self.config.training.num_epochs + 1
# Per-epoch training loop.
for epoch_num in range(current_epoch, max_epoch):
self.telemetry.EpochBeginCallback()
# decay and set learning rate
new_learning_rate = initial_learning_rate * (
(float(100 - decay_rate) / 100.0) ** (epoch_num - 1)
)
sess.run(tf.compat.v1.assign(self.learning_rate, new_learning_rate))
sess.run(tf.compat.v1.assign(self.epoch, epoch_num))
# TODO(cec): refactor data generator to a Python generator.
self.data_generator.CreateBatches()
l.logger().info("Epoch {}/{}:".format(epoch_num, self.config.training.num_epochs))
state = sess.run(self.initial_state)
# Per-batch inner loop.
bar = progressbar.ProgressBar(max_value=self.data_generator.num_batches)
last_log_time = time.time()
for i in bar(range(self.data_generator.num_batches)):
x, y = self.data_generator.NextBatch()
feed = {self.input_data: x, self.targets: y}
for j, (c, h) in enumerate(self.initial_state):
feed[c], feed[h] = state[j].c, state[j].h
summary, loss, state, _ = sess.run(
[merged, self.loss, self.final_state, self.train_op], feed
)
# Periodically write progress to tensorboard.
if i % FLAGS.clgen_tf_backend_tensorboard_summary_step_count == 0:
step = (epoch_num - 1) * self.data_generator.num_batches + i
self.summary_writer.add_summary(summary, step)
# Log the loss and delta.
l.logger().info("Loss: {:.6f}.".format(loss))
# Save after every epoch.
start_time = time.time()
global_step = epoch_num
checkpoint_prefix = self.cache.path / "checkpoints" / "checkpoint"
checkpoint_path = saver.save(
sess, str(checkpoint_prefix), global_step=global_step
)
l.logger().info(
"Saved checkpoint {} in {} ms."
.format(
checkpoint_path,
humanize.intcomma(int((time.time() - start_time) * 1000)),
)
)
assert pathlib.Path(
f"{checkpoint_prefix}-{global_step}.index"
).is_file()
assert pathlib.Path(f"{checkpoint_prefix}-{global_step}.meta").is_file()
self.telemetry.EpochEndCallback(epoch_num, loss)
# If we have a sampler that we can use at the end of epochs, then
# break now to run the test sampler.
# This is confusing logic! Consider a refactor to simplify things.
if test_sampler:
break
else:
return
if test_sampler and FLAGS.clgen_per_epoch_test_samples > 0:
self._EndOfEpochTestSample(corpus, test_sampler, step, epoch_num)
self.Train(corpus, test_sampler=test_sampler)
def _EndOfEpochTestSample(
self, corpus, sampler: samplers.Sampler, step: int, epoch_num: int
):
"""Run sampler"""
tf.compat.v1.disable_eager_execution()
tokenizer = corpus.tokenizer
sampler.Specialize(tokenizer)
sampler.batch_size = 1
seed = 0
self.InitSampling(sampler, seed)
self.InitSampleBatch(sampler)
samples, stats = [], []
for i in range(FLAGS.clgen_per_epoch_test_samples):
done = np.zeros(1, dtype=np.bool)
start_time = time.time()
sample_in_progress = sampler.tokenized_start_text.copy()
while not done[0]:
indices, _ = self.SampleNextIndices(sampler, done)
# Iterate over all samples in batch to determine whether they're
# done.
for index in indices[0]:
sample_in_progress.append(tokenizer.decoder[index])
if sampler.SampleIsComplete(sample_in_progress):
stats.append(
(len(sample_in_progress), int((time.time() - start_time) * 1000))
)
sample = "".join(sample_in_progress)
print(f"=== CLGEN SAMPLE ===\n\n{sample}\n")
samples.append(sample)
done[0] = True
break
samples_as_markdown = [
self.FormatCodeAsMarkdown(sample) for sample in samples
]
samples_tensor = tf.convert_to_tensor(samples_as_markdown, dtype=tf.string)
summary_op = tf.compat.v1.summary.text("samples", samples_tensor)
summary = self.inference_sess.run(summary_op)
self.summary_writer.add_summary(summary, step)
@staticmethod
def FormatCodeAsMarkdown(text: str) -> str:
return f"<pre>{text.strip()}</pre>"
def InitSampling(self,
sampler: samplers.Sampler,
seed: typing.Optional[int] = None,
*unused_args,
**unused_kwargs,
) -> None:
"""Initialize model for sampling."""
del unused_args
del unused_kwargs
tf.compat.v1.disable_eager_execution()
# Delete any previous sampling session.
if self.inference_tf:
del self.inference_tf
if self.inference_sess:
del self.inference_sess
self.inference_tf = self.InitTfGraph(sampler=sampler)
self.inference_sess = self.inference_tf.compat.v1.Session()
# Seed the RNG.
if seed is not None:
np.random.seed(seed)
self.inference_tf.compat.v1.set_random_seed(seed)
# If --clgen_tf_backend_reset_inference_state_between_batches, the state
# is reset at the beginning of every sample batch. Else, this is the only
# place it is initialized.
self.inference_state = self.inference_sess.run(
self.cell.get_initial_state(batch_size = sampler.batch_size, dtype = self.inference_tf.float32)
)
self.inference_tf.compat.v1.global_variables_initializer().run(
session=self.inference_sess
)
# Restore trained model weights.
saver = self.inference_tf.compat.v1.train.Saver(
self.inference_tf.compat.v1.global_variables()
)
checkpoint_state = self.inference_tf.train.get_checkpoint_state(
self.cache.path / "checkpoints",
)
# These assertions will fail if the model has no checkpoints. Since this
# should only ever be called after Train(), there is no good reason for
# these assertions to fail.
assert checkpoint_state
assert checkpoint_state.model_checkpoint_path
if FLAGS.select_checkpoint_step == -1:
saver.restore(self.inference_sess, checkpoint_state.model_checkpoint_path)
else:
saver.restore(self.inference_sess, str(self.cache.path / "checkpoints" / "checkpoint-{}".format(FLAGS.select_checkpoint_step)))
self.inference_sess.run(
tf.compat.v1.assign(self.temperature, sampler.temperature)
)
def InitSampleBatch(self, sampler: samplers.Sampler) -> None:
if FLAGS.clgen_tf_backend_reset_inference_state_between_batches:
self.inference_state = self.inference_sess.run(
self.cell.get_initial_state(batch_size = sampler.batch_size, dtype = self.inference_tf.float32)
)
self.inference_indices = np.tile(
sampler.encoded_start_text, [sampler.batch_size, 1]
)
def SampleNextIndices(self, sampler: samplers.Sampler, done: np.ndarray):
length = self.inference_indices.shape[1]
assert length < sampler.sequence_length
expanded_indices = np.zeros((sampler.batch_size, sampler.sequence_length))
expanded_indices[:, :length] = self.inference_indices
synthesized_lengths = np.full([sampler.batch_size], sampler.sequence_length)
synthesized_lengths[done] = 0
feed = {
self.initial_state: self.inference_state,
self.input_data: expanded_indices,
self.lengths: synthesized_lengths,
self.seed_length: length,
}
generated, self.inference_state = self.inference_sess.run(
[self.generated, self.final_state], feed
)
self.inference_indices = generated[:, -1].reshape((sampler.batch_size, 1))
if length > 1:
generated = generated[:, length - 1 :]
return generated, generated
def RandomizeSampleState(self) -> None:
tf.compat.v1.disable_eager_execution()
self.inference_state = [
tf.compat.v1.nn.rnn_cell.LSTMStateTuple(
st1 + np.random.normal(scale=0.2, size=np.shape(st1)),
st2 + np.random.normal(scale=0.2, size=np.shape(st2)),
)
for st1, st2 in self.inference_state
]
def ResetSampleState(self, sampler: samplers.Sampler, state, seed) -> None:
self.inference_state = copy.deepcopy(state)
self.inference_indices = np.tile(seed, [sampler.batch_size, 1])
def EvaluateSampleState(self, sampler: samplers.Sampler):
length = self.inference_indices.shape[1] - 1
if length == 0:
return
last_indices = self.inference_indices[:, -1:]
self.inference_indices = self.inference_indices[:, :-1]
expanded_indices = np.zeros((sampler.batch_size, sampler.sequence_length))
expanded_indices[:, :length] = self.inference_indices
synthesized_lengths = np.full([sampler.batch_size], length)
feed = {
self.initial_state: self.inference_state,
self.input_data: expanded_indices,
self.lengths: synthesized_lengths,
self.seed_length: length,
}
self.inference_state = self.inference_sess.run([self.final_state], feed)
self.inference_indices = last_indices
state_copy = copy.deepcopy(self.inference_state)
input_carry_copy = self.inference_indices[0]
return state_copy, input_carry_copy
@property
def is_trained(self) -> bool:
"""Determine if model has been trained."""
# Count the number of checkpoint files which TensorFlow has created.
checkpoint_files = [
f.stem
for f in (self.cache.path / "checkpoints").iterdir()
if f.name.startswith("checkpoint-") and f.name.endswith(".meta")
]
epoch_nums = [int(x.split("-")[-1]) for x in checkpoint_files]
return self.config.training.num_epochs in epoch_nums
| 23,354 | 34.493921 | 133 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/keras_sequential/keras_sequential.py | # Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""BenchPress models using a Keras backend."""
import io
import pathlib
import typing
import numpy as np
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import builders
from deeplearning.benchpress.models.keras_sequential.data_generator import KerasBatchGenerator
from absl import flags
import humanize
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class kerasSequential(backends.BackendBase):
"""A model with an embedding layer, using a keras backend."""
def __init__(self, *args, **kwargs):
"""Instantiate a model.
Args:
args: Arguments to be passed to BackendBase.__init__().
kwargs: Arguments to be passed to BackendBase.__init__().
"""
super(kerasSequential, self).__init__(*args, **kwargs)
# Create the necessary cache directories.
(self.cache.path / "embeddings").mkdir(exist_ok=True)
# Attributes that will be lazily set.
self._training_model: typing.Optional["keras.models.Sequential"] = None
self._inference_model: typing.Optional["keras.models.Sequential"] = None
self._inference_batch_size: typing.Optional[int] = None
self.inference_indices = None
self.inference_model = None
def GetTrainingModel(self) -> "keras.models.Sequential":
"""Get the Keras model."""
if self._training_model:
return self._training_model
self._training_model = self.Train()
return self._training_model
def samplesWithCategorical(self):
return True
def Train(self, corpus, **unused_kwargs) -> "keras.models.Sequential":
"""Locked training.
If there are cached epoch checkpoints, the one closest to the target number
of epochs will be loaded, and the model will be trained for only the
remaining number of epochs, if any. This means that calling this function
twice will only actually train the model the first time, and all subsequent
calls will be no-ops.
This method must only be called when the model is locked.
Returns:
The trained Keras model.
"""
del unused_kwargs
model = builders.BuildKerasModel(self.config, self.tokenizer.vocab_size)
with open(self.cache.keypath("model.yaml"), "w") as f:
f.write(model.to_yaml())
model.compile(
loss="categorical_crossentropy",
optimizer=builders.BuildOptimizer(self.config),
)
# Print a model summary.
buf = io.StringIO()
model.summary(print_fn=lambda x: buf.write(x + "\n"))
l.logger().info("Model summary:\n{}".format(buf.getvalue()))
# TODO(cec): Add an tokenizer.CreateVocabularyFile() method, with frequency
# counts for a given corpus.
def Escape(token: str) -> str:
"""Make a token visible and printable."""
if token == "\t":
return "\\t"
elif token == "\n":
return "\\n"
elif not token.strip():
return f"'{token}'"
else:
return token
if not (self.cache.path / "embeddings" / "metadata.tsv").is_file():
with open(self.cache.path / "embeddings" / "metadata.tsv", "w") as f:
for _, token in sorted(
self.tokenizer.decoder.items(), key=lambda x: x[0]
):
f.write(Escape(token) + "\n")
self.num_epochs = self.config.training.num_epochs
starting_epoch = 0
epoch_checkpoints = self.epoch_checkpoints
if len(epoch_checkpoints) >= self.num_epochs:
# We have already trained a model to at least this number of epochs, so
# simply the weights from that epoch and call it a day.
l.logger().info( "Loading weights from {}"
.format(
epoch_checkpoints[self.num_epochs - 1]
)
)
model.load_weights(epoch_checkpoints[self.num_epochs - 1])
return model
# Now entering the point at which training is inevitable.
# with logutil.TeeLogsToFile("train", self.cache.path / "logs"):
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
if epoch_checkpoints:
# We have already trained a model at least part of the way to our target
# number of epochs, so load the most recent one.
starting_epoch = len(epoch_checkpoints)
l.logger().info("Resuming training from epoch {}.".format(starting_epoch))
model.load_weights(epoch_checkpoints[-1])
callbacks = [
keras.callbacks.ModelCheckpoint(
str(self.cache.path / "checkpoints" / "{epoch:03d}.hdf5"),
verbose=1,
mode="min",
save_best_only=False,
),
keras.callbacks.TensorBoard(
str(self.cache.path / "embeddings"),
write_graph=True,
embeddings_freq=1,
embeddings_metadata={
"embedding_1": str(self.cache.path / "embeddings" / "metadata.tsv"),
},
),
self.telemetry.TrainingLogger(self.cache.path / "logs").KerasCallback(keras),
]
generator = KerasBatchGenerator()
steps_per_epoch = (corpus.encoded.token_count - 1) // (
self.config.training.batch_size * self.config.training.sequence_length
)
l.logger().info(
"Step counts: {} per epoch, {} left to do, {} total"
.format(
humanize.intcomma(steps_per_epoch),
humanize.intcomma((self.num_epochs - starting_epoch) * steps_per_epoch),
humanize.intcomma(self.num_epochs * steps_per_epoch),
)
)
model.fit_generator(
generator.AutoGenerator(corpus, self.config.training),
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
initial_epoch=starting_epoch,
epochs=self.num_epochs,
)
return model
def GetInferenceModel(self) -> "keras.models.Sequential":
"""Like training model, but with different batch size."""
if self._inference_model:
return self._inference_model
# Deferred importing of Keras so that we don't have to activate the
# TensorFlow backend every time we import this module.
import keras
l.logger().info("Building inference model.")
model = self.GetTrainingModel()
config = model.get_config()
l.logger().info("Sampling with batch size {}".format(sampler.batch_size))
config[0]["config"]["batch_input_shape"] = (sampler.batch_size, 1)
inference_model = keras.models.Sequential.from_config(config)
inference_model.trainable = False
inference_model.set_weights(model.get_weights())
self._inference_model = inference_model
self._inference_batch_size = sampler.batch_size
return inference_model
def InitSampling(
self, sampler: samplers.Sampler, seed: typing.Optional[int] = None
) -> None:
self.inference_model = self.GetInferenceModel()
if seed is not None:
np.random.seed(seed)
def InitSampleBatch(self, sampler: samplers.Sampler) -> None:
self.inference_model.reset_states()
# Set internal states from seed text.
for index in sampler.encoded_start_text[:-1]:
x = np.array([[index]] * sampler.batch_size)
# input shape: (batch_size, 1)
self.inference_model.predict(x)
self.inference_indices = [
sampler.encoded_start_text[-1]
] * sampler.batch_size
def SampleNextIndices(self, sampler: samplers.Sampler, done: np.ndarray):
del done
result = np.zeros((sampler.batch_size, 1024))
for idx in range(1024):
# Predict the next index for the entire batch.
x = np.reshape(self.inference_indices, [sampler.batch_size, 1])
# Input shape: (batch_size, 1).
probabilities = self.inference_model.predict(x)
# Output shape: (batch_size, 1, vocab_size).
self.inference_indices = [
WeightedPick(p.squeeze(), sampler.temperature) for p in probabilities
]
result[:, idx] = self.inference_indices
return result
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
raise NotImplementedError
@property
def epoch_checkpoints(self) -> typing.List[pathlib.Path]:
"""Get the paths to all epoch checkpoint files in order.
Remember that the returned list is zero-indexed, so the epoch number is
the array index plus one. E.g. The checkpoint for epoch 5 is
epoch_checkpoints[4].
Returns:
A list of paths.
"""
checkpoint_dir = pathlib.Path(self.cache.path) / "checkpoints"
return [
checkpoint_dir / x
for x in sorted(pathlib.Path(self.cache["checkpoints"]).iterdir())
]
@property
def is_trained(self) -> bool:
"""Return whether the model has previously been trained."""
return len(self.epoch_checkpoints) >= self.config.training.num_epochs
def WeightedPick(predictions: np.ndarray, temperature: float) -> int:
"""Make a weighted choice from a predictions array."""
predictions = np.log(np.asarray(predictions).astype("float64")) / temperature
predictions_exp = np.exp(predictions)
# Normalize the probabilities.
predictions = predictions_exp / np.sum(predictions_exp)
predictions = np.random.multinomial(1, predictions, 1)
return np.argmax(predictions)
| 9,992 | 35.075812 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/modeling_utils.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
import re
import typing
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.models.torch_bert import generation_utils
from deeplearning.benchpress.util import logging as l
def find_pruneable_heads_and_indices(
heads: typing.List[int], n_heads: int, head_size: int, already_pruned_heads: typing.Set[int]
) -> typing.Tuple[typing.Set[int], torch.LongTensor]:
"""
Finds the heads and their indices taking :obj:`already_pruned_heads` into account.
Args:
heads (:obj:`typing.List[int]`): typing.List of the indices of heads to prune.
n_heads (:obj:`int`): The number of heads in the model.
head_size (:obj:`int`): The size of each head.
already_pruned_heads (:obj:`typing.Set[int]`): A set of already pruned heads.
Returns:
:obj:`typing.Tuple[typing.Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
"""
mask = torch.ones(n_heads, head_size)
heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index: torch.LongTensor = torch.arange(len(mask))[mask].long()
return heads, index
class ModuleUtilsMixin:
"""
A few utilities for :obj:`torch.torch.nn.Modules`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
@staticmethod
def _hook_rss_memory_pre_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_pre_forward = mem.rss
return None
@staticmethod
def _hook_rss_memory_post_forward(module, *args, **kwargs):
try:
import psutil
except (ImportError):
raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")
process = psutil.Process(os.getpid())
mem = process.memory_info()
module.mem_rss_post_forward = mem.rss
mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
return None
def add_memory_hooks(self):
"""
Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.
Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
zero with :obj:`model.reset_memory_hooks_state()`.
"""
for module in self.modules():
module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
module.register_forward_hook(self._hook_rss_memory_post_forward)
self.reset_memory_hooks_state()
def reset_memory_hooks_state(self):
"""
Reset the :obj:`mem_rss_diff` attribute of each module (see
:func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
"""
for module in self.modules():
module.mem_rss_diff = 0
module.mem_rss_post_forward = 0
module.mem_rss_pre_forward = 0
@property
def device(self) -> pytorch.device:
"""
:obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
device).
"""
try:
return next(self.parameters()).pytorch.device
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> typing.List[typing.Tuple[str, torch.Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].pytorch.device
@property
def dtype(self) -> torch.dtype:
"""
:obj:`torch.torch.dtype`: The torch.dtype of the module (assuming that all the module parameters have the same torch.dtype).
"""
try:
return next(self.parameters()).dtype
except StopIteration:
# For torch.nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: torch.nn.Module) -> typing.List[typing.Tuple[str, torch.Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].dtype
def invert_attention_mask(self, encoder_attention_mask: torch.Tensor) -> torch.Tensor:
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
if self.dtype == torch.float16:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
elif self.dtype == torch.float32:
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
else:
raise ValueError(
"{} not recognized. `torch.dtype` should be set to either `torch.float32` or `torch.float16`".format(
self.dtype
)
)
return encoder_extended_attention_mask
def get_extended_attention_mask(self, attention_mask: torch.Tensor, input_shape: typing.Tuple[int], device: device) -> torch.Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`typing.Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same torch.dtype as :obj:`attention_mask.torch.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype = self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(
self, head_mask: typing.Optional[torch.Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
) -> torch.Tensor:
"""
Prepare the head mask if needed.
Args:
head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
num_hidden_layers (:obj:`int`):
The number of hidden layers in the model.
is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
Whether or not the attentions scores are computed by chunks or not.
Returns:
:obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]`
or list with :obj:`[None]` for each layer.
"""
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype = self.dtype) # switch to fload if need + fp16 compatibility
return head_mask
class PreTrainedModel(torch.nn.Module, ModuleUtilsMixin, generation_utils.GenerationMixin):
r"""
Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **load_tf_weights** (:obj:`typing.Callable`) -- A python `method` for loading a torch.TensorFlow checkpoint in a
PyTorch model, taking as arguments:
- **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
torch.TensorFlow checkpoint.
- **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated
to the model.
- **path** (:obj:`str`) -- A path to the torch.TensorFlow checkpoint.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
- **authorized_missing_keys** (:obj:`typing.Optional[typing.List[str]]`) -- A list of re pattern of tensor names to ignore
when loading the model (and avoid unnecessary warnings).
"""
config_class = None
base_model_prefix = ""
authorized_missing_keys = None
@property
def dummy_inputs(self) -> typing.Dict[str, torch.Tensor]:
"""
:obj:`typing.Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
# Save config in model
self.config = config
@property
def base_model(self) -> torch.nn.Module:
"""
:obj:`torch.torch.nn.Module`: The main body of the model.
"""
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self) -> torch.nn.Module:
"""
Returns the model's input embeddings.
Returns:
:obj:`torch.nn.Module`: A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value: torch.nn.Module):
"""
typing.Set model's input embeddings
Args:
value (:obj:`torch.nn.Module`): A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self) -> torch.nn.Module:
"""
Returns the model's output embeddings.
Returns:
:obj:`torch.nn.Module`: A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)
@staticmethod
def _tie_encoder_decoder_weights(encoder: torch.nn.Module, decoder: torch.nn.Module, base_model_prefix: str):
uninitialized_encoder_weights: typing.List[str] = []
assert decoder.__class__ == encoder.__class__, f"{decoder.__class__} and {encoder.__class__} have to be equal."
def tie_encoder_to_decoder_recursively(
decoder_pointer: torch.nn.Module,
encoder_pointer: torch.nn.Module,
module_name: str,
uninitialized_encoder_weights: typing.List[str],
depth=0,
):
assert isinstance(decoder_pointer, torch.nn.Module) and isinstance(
encoder_pointer, torch.nn.Module
), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.torch.nn.Module"
if hasattr(decoder_pointer, "weight"):
assert hasattr(encoder_pointer, "weight")
encoder_pointer.weight = decoder_pointer.weight
if hasattr(decoder_pointer, "bias"):
assert hasattr(encoder_pointer, "bias")
encoder_pointer.bias = decoder_pointer.bias
return
encoder_modules = encoder_pointer._modules
decoder_modules = decoder_pointer._modules
if len(decoder_modules) > 0:
assert (
len(encoder_modules) > 0
), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"
all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
encoder_layer_pos = 0
for name, module in decoder_modules.items():
if name.isdigit():
encoder_name = str(int(name) + encoder_layer_pos)
decoder_name = name
if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])):
# this can happen if the name corresponds to the position in a list module list of layers
# in this case the decoder has added a cross-attention that the encoder does not have
# thus skip this step and substract one layer pos from encoder
encoder_layer_pos -= 1
continue
elif name not in encoder_modules:
continue
elif depth > 500:
raise ValueError(
"Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `torch.nn.Modules` of your model."
)
else:
decoder_name = encoder_name = name
tie_encoder_to_decoder_recursively(
decoder_modules[decoder_name],
encoder_modules[encoder_name],
module_name + "/" + name,
uninitialized_encoder_weights,
depth=depth + 1,
)
all_encoder_weights.remove(module_name + "/" + encoder_name)
uninitialized_encoder_weights += list(all_encoder_weights)
# tie weights recursively
tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
if len(uninitialized_encoder_weights) > 0:
l.logger().warning(
f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
)
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of whether we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = torch.nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if getattr(output_embeddings, "bias", None) is not None:
output_embeddings.bias.data = torch.torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens: typing.Optional[int] = None) -> torch.torch.nn.Embedding:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`torch.torch.nn.Embedding` module of the model wihtout doing
anything.
Return:
:obj:`torch.torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(
self, old_embeddings: torch.torch.nn.Embedding, new_num_tokens: typing.Optional[int] = None
) -> torch.torch.nn.Embedding:
"""
Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`torch.torch.nn.Embedding`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`torch.torch.nn.Embedding`` module of the model wihtout doing anything.
Return:
:obj:`torch.torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = torch.nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.pytorch.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy token embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
"""
Initializes and prunes weights if needed.
"""
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune: typing.Dict[int, typing.List[int]]):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`typing.Dict[int, typing.List[int]]`):
typing.Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list
of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will
prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
`:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
l.logger().error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
if getattr(self.config, "xla_device", False):
if pytorch.xla_model.is_master_ordinal():
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# pytorch.xla_model.save takes care of saving only from master
pytorch.xla_model.save(model_to_save.state_dict(), output_model_file)
else:
model_to_save.config.save_pretrained(save_directory)
torch.save(model_to_save.state_dict(), output_model_file)
l.logger().info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated).
To train the model, you should first set it back in training mode with ``model.train()``.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the torch.TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`typing.Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by suppling the save directory.
- The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (:obj:`typing.Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a torch.TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`typing.Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionnary containing missing keys, unexpected keys and error
messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on
our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attention=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
Examples::
from transformers import BertConfig, BertModel
# Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('bert-base-uncased')
# Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
model = BertModel.from_pretrained('./test/saved_model/')
# Update configuration during loading.
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True)
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_cdn = kwargs.pop("use_cdn", True)
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a torch.TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
use_cdn=use_cdn,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise EnvironmentError
except EnvironmentError:
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
l.logger().info("loading weights file {}".format(archive_file))
else:
l.logger().info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a torch.TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our torch.TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
l.logger().error(
"Loading a torch.TensorFlow model in PyTorch, requires both PyTorch and torch.TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: torch.nn.Module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
start_prefix = ""
model_to_load = model
has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
start_prefix = cls.base_model_prefix + "."
if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
model_to_load = getattr(model, cls.base_model_prefix)
load(model_to_load, prefix=start_prefix)
if model.__class__.__name__ != model_to_load.__class__.__name__:
base_model_state_dict = model_to_load.state_dict().keys()
head_model_state_dict_without_base_prefix = [
key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
]
missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)
# Some models may have keys that are not in the state by design, removing them before needlessly warning
# the user.
if cls.authorized_missing_keys is not None:
for pat in cls.authorized_missing_keys:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
l.logger().warning(
f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
l.logger().info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
l.logger().warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
l.logger().info(
f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)
)
)
# make sure token embedding weights are still tied if needed
model.tie_weights()
# typing.Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
model = pytorch.xla_model.send_cpu_data_to_device(model, pytorch.xla_model.xla_device())
model.to(pytorch.xla_model.xla_device())
return model
def prune_linear_layer(layer: torch.torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.torch.nn.Linear:
"""
Prune a linear layer to keep only entries in index.
Used to remove heads.
Args:
layer (:obj:`torch.torch.nn.Linear`): The layer to prune.
index (:obj:`torch.LongTensor`): The indices to keep in the layer.
dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.
Returns:
:obj:`torch.torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
"""
index = index.to(layer.weight.pytorch.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = torch.nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.pytorch.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def apply_chunking_to_forward(
forward_fn: typing.Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
) -> torch.Tensor:
"""
This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.
If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Args:
forward_fn (:obj:`typing.Callable[..., torch.Tensor]`):
The forward function of the model.
chunk_size (:obj:`int`):
The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
chunk_dim (:obj:`int`):
The dimension over which the :obj:`input_tensors` should be chunked.
input_tensors (:obj:`typing.Tuple[torch.Tensor]`):
The input tensors of ``forward_fn`` which will be chunked.
Returns:
:obj:`torch.Tensor`: A tensor with the same shape as the :obj:`foward_fn` would have given if applied`.
Examples::
# rename the usual forward() fn to forward_chunk()
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
# implement a chunked forward function
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
"""
assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
tensor_shape = input_tensors[0].shape
assert all(
input_tensor.shape == tensor_shape for input_tensor in input_tensors
), "All input tenors have to be of the same shape"
# inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability
num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
assert num_args_in_forward_chunk_fn == len(
input_tensors
), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
num_args_in_forward_chunk_fn, len(input_tensors)
)
if chunk_size > 0:
assert (
input_tensors[0].shape[chunk_dim] % chunk_size == 0
), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
input_tensors[0].shape[chunk_dim], chunk_size
)
num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size
# chunk input tensor into tuples
input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
# apply forward fn to every tuple
output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
# concatenate output at same dimension
return torch.cat(output_chunks, dim=chunk_dim)
return forward_fn(*input_tensors)
| 47,214 | 45.063415 | 180 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import os
import typing
import copy
import datetime
import glob
import humanize
import sklearn
import pickle
import functools
import numpy as np
import pathlib
import multiprocessing
import math
import tqdm
import threading
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import feature_sampler
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.features import evaluate_cand_database
from deeplearning.benchpress.models import lm_data_generator
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models.torch_bert import datasets
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.preprocessors import opencl
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"skip_first_queue",
False,
"Hacky way to speedup active sampling experiments."
)
flags.DEFINE_boolean(
"evaluate_candidates",
False,
"Select to do exhaustive evaluation of sampling search candidates."
)
flags.DEFINE_boolean(
"evolutionary_search",
True,
"Select to perform independent per-generation candidate search instead of son-better-than-parent paradigm."
)
flags.DEFINE_boolean(
"features_standard_scaler",
False,
"Select to use sklearn StandardScaler for generation standardization."
)
flags.DEFINE_boolean(
"start_from_cached",
False,
"Select to start from cached active feeds instead of restarting from axis origins."
)
class ActiveSampleFeed(typing.NamedTuple):
"""
Representation of an active learning input to the model.
"""
# An array of original input
input_feed : np.array
# The feature space of the original input
input_features : typing.Dict[str, float]
# Distance from target features of input feed. Valid after 1st generation.
input_score : float
# Depth increases when a valid inference sample is fed back as an input.
gen_id : int
def ActiveSampleFeed_to_JSON(f: ActiveSampleFeed) -> typing.Dict[str, typing.Any]:
"""
Convert NamedTuple to JSON serializable dictionary.
"""
return {
'input_feed' : list([int(x) for x in f.input_feed]),
'input_features' : {k: float(v) for k, v in f.input_features.items()},
'input_score' : float(f.input_score),
'gen_id' : int(f.gen_id),
}
def JSON_to_ActiveSampleFeed(d: typing.Dict[str, typing.Any]) -> ActiveSampleFeed:
"""
JSON serializable dictionary to ActiveSampleFeed.
"""
return ActiveSampleFeed(**d)
class ActiveSample(typing.NamedTuple):
"""
Representation of an active learning sample.
"""
# ActiveSampleFeed instance of model input
sample_feed : typing.TypeVar("ActiveSamplingGenerator.ActiveSampleFeed")
# Input ids that led to this prediction
input_ids : np.array
# hole lengths and positions of input ids.
hole_lengths : typing.List[sequence_masking.MaskedLmInstance]
# Model prediction
sample : np.array
# Sample indices of given prediction.
sample_indices : np.array
# number of tokens the model filled holes with.
sample_indices_size : int
# Output features of sample
features : typing.Dict[str, float]
# Runtime features of ActiveSample (will be populated lazily.)
runtime_features : typing.Dict[str, float]
# Score of sample based on active learning search.
score : typing.Union[bool, float]
def ActiveSample_to_JSON(f: ActiveSample) -> typing.Dict[str, typing.Any]:
"""
Convert NamedTuple to JSON serializable dictionary.
"""
return {
'sample_feed' : ActiveSampleFeed_to_JSON(f.sample_feed),
'input_ids' : list([int(x) for x in f.input_ids]),
'hole_lengths' : list([int(x) for x in f.hole_lengths]),
'sample' : list([int(x) for x in f.sample]),
'sample_indices' : list([int(x) for x in f.sample_indices]),
'sample_indices_size' : list([int(x) for x in f.sample_indices]),
'features' : {k: float(v) for k, v in f.features.items()},
'runtime_features' : {k: int(v) if k != "label" else str(v) for k, v in f.runtime_features.items() },
'score' : float(f.score),
}
def JSON_to_ActiveSample(d: typing.Dict[str, typing.Any]) -> ActiveSample:
"""
JSON serializable dictionary to ActiveSampleFeed.
"""
return ActiveSample(
sample_feed = JSON_to_ActiveSampleFeed(d['sample_feed']),
input_ids = d['input_ids'],
hole_lengths = d['hole_lengths'],
sample = d['sample'],
sample_indices = d['sample_indices'],
sample_indices_size = d['sample_indices_size'],
features = d['features'],
runtime_features = d['runtime_features'],
score = d['score']
)
def IR_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
ActiveSample worker for LLVM-IR feature spaces.
"""
sample, sample_indices, input_ids, mlm_lengths, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
features = extractor.ExtractFeatures(code, [feature_space])[feature_space]
if features:
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def text_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
ActiveSample worker for text-based feature spaces.
"""
sample, sample_indices, input_ids, mlm_lengths, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
_ = opencl.Compile(code)
features = extractor.ExtractFeatures(code, [feature_space])[feature_space]
if features:
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def hidden_state_candidate_worker(sample : np.array,
feature_space : str,
target_benchmark : feature_sampler.Benchmark,
tokenizer : tokenizers.TokenizerBase,
) -> ActiveSample:
"""
Provided hidden states by the language model, choose those that compile and create ActiveSamples.
"""
sample, sample_indices, input_ids, mlm_lengths, hidden_state, feed = sample
assert sample[0] != tokenizer.padToken, sample
try:
code = tokenizer.ArrayToCode(sample, with_formatting = False)
_ = opencl.Compile(code)
features = extractor.RawToDictFeats(hidden_state, [feature_space])[feature_space]
return (True, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = features,
runtime_features = target_benchmark.runtime_features,
score = feature_sampler.calculate_distance(features, target_benchmark.features, feature_space),
))
except ValueError:
pass
except Exception as e:
raise e
return (False, ActiveSample(
sample_feed = feed,
sample = sample,
sample_indices = [x for x in sample_indices if x != tokenizer.padToken],
input_ids = [x for x in input_ids if x != tokenizer.padToken],
hole_lengths = mlm_lengths,
sample_indices_size = len([x for x in sample_indices if x != tokenizer.padToken]),
features = {},
runtime_features = target_benchmark.runtime_features,
score = math.inf,
))
def dataload_worker(x : int,
feed : typing.List[np.array],
func : typing.Union[
sequence_masking.HoleSequence,
sequence_masking.HoleSequenceSeqMasks,
'sequence_masking.MaskSequence'
],
batch : int,
batch_per_feed : int,
) -> typing.Dict[str, np.array]:
"""
Masking input feed worker.
"""
try:
return [f for _ in range(batch // batch_per_feed) for f in [func(fd) for fd in feed * batch_per_feed]]
except Exception as e:
raise e
def write_samples_cache(db_sample_obs : sample_observers.SamplesDatabaseObserver,
tokenizer : tokenizers.TokenizerBase,
samples : typing.List[ActiveSample],
) -> None:
"""
Candidate logging/caching worker.
"""
for sample in samples:
try:
s = model_pb2.Sample(
train_step = -1,
text = tokenizer.ArrayToCode(sample.sample, with_formatting = True),
sample_indices = "",
encoded_sample_indices = "",
original_input = "",
sample_feed = tokenizer.ArrayToCode(sample.sample_feed.input_feed, with_formatting = True),
encoded_text = "",
sample_start_epoch_ms_utc = 0,
sample_time_ms = 0,
wall_time_ms = 0,
feature_vector = '\n'.join(["{}:{}".format(k, v) for k, v in sample.features.items()]) if sample.features else "None",
num_tokens = np.where(sample.sample == tokenizer.padToken)[0][0] if tokenizer.padToken in sample.sample else len(sample),
compile_status = True,
categorical_sampling = FLAGS.categorical_sampling,
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
db_sample_obs.OnSample(s)
except Exception:
pass
return
def write_eval_db(eval_db : evaluate_cand_database.SearchCandidateDatabase,
tokenizer : tokenizers.TokenizerBase,
compiling_samples : typing.List[ActiveSample],
rejected_samples : typing.List[ActiveSample],
target_benchmark : typing.Tuple[str, str],
target_features : typing.Dict[str, float],
gen_id : int,
) -> None:
"""
Evaluated step and rejected candidates monitoring/caching.
"""
with eval_db.Session(commit = True) as session:
cached = {
d.sha256: d for d in
session.query(evaluate_cand_database.SearchCandidate).filter_by(target_benchmark = "// {}\n{}".format(target_benchmark[0], target_benchmark[1])).all()
}
objs = {}
for idx, samples in enumerate([compiling_samples, rejected_samples]):
for sample in samples:
if idx == 0:
compile_status = True
else:
compile_status = False
sobj = evaluate_cand_database.SearchCandidate.FromArgs(
tokenizer = tokenizer,
id = eval_db.count,
input_feed = sample.sample_feed.input_feed,
input_ids = sample.input_ids,
input_features = sample.sample_feed.input_features,
input_score = sample.sample_feed.input_score,
hole_lengths = sample.hole_lengths,
sample = sample.sample,
sample_indices = sample.sample_indices,
output_features = sample.features,
runtime_features = sample.runtime_features,
sample_score = sample.score,
target_benchmark = target_benchmark,
target_features = target_features,
compile_status = compile_status,
generation_id = gen_id,
)
if sobj.sha256 in objs:
objs[sobj.sha256][1] += 1
else:
objs[sobj.sha256] = [sobj, 1]
offset_idx = 0
try:
for sha, obj in objs.items():
if sha in cached:
entry = cached[sha]
entry.frequency += obj[1]
else:
obj[0].frequency = obj[1]
obj[0].id += offset_idx
offset_idx += 1
session.add(obj[0])
session.commit()
except Exception as e:
l.logger().error(entry)
if entry is not None:
l.logger().error(entry.id)
l.logger().error(entry.sha256)
l.logger().error(sha)
l.logger().error("count: {}".format(eval_db.count))
l.logger().error("offset_idx: {}".format(offset_idx))
print(e)
return
class torchLMDataGenerator(lm_data_generator.MaskLMDataGenerator):
"""Data generator subclass designed for PyTorch BERT model."""
@classmethod
def TrainMaskLMBatchGenerator(cls,
corpus : corpuses.Corpus,
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> lm_data_generator.MaskLMDataGenerator:
"""Initializes data generator for training."""
d = super(torchLMDataGenerator, cls()).TrainMaskLMBatchGenerator(
corpus, training_opts, cache_path, num_train_steps, pre_train,
feature_encoder, feature_tokenizer, feature_sequence_length,
)
d.dataloader = d.train_dataloader()
return d
@classmethod
def SampleMaskLMBatchGenerator(cls,
model_opts : model_pb2.TrainingOptions,
sampler : 'samplers.Sampler',
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
corpus : corpuses.Corpus = None,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> lm_data_generator.MaskLMDataGenerator:
"""Initializes data generator for inference."""
d = super(torchLMDataGenerator, cls()).SampleMaskLMBatchGenerator(
model_opts, sampler, tokenizer, seed,
sample_batch_size, max_position_embeddings, cache_path,
feature_encoder, feature_tokenizer, feature_sequence_length
)
if sampler.is_active:
corpus_config = d.sampler.config.sample_corpus.corpus_config
if corpus_config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
corpus_config.hole, d.sampler.corpus_directory, "sample_corpus"
)
d.func = functools.partial(sequence_masking.HoleSequence,
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
distribution = distribution,
tokenizer = d.tokenizer,
)
elif corpus_config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
corpus_config.mask_seq, d.sampler.corpus_directory, "sample_corpus"
)
d.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
distribution = distribution,
tokenizer = d.tokenizer,
)
elif corpus_config.HasField("mask"):
d.func = functools.partial('sequence_masking.MaskSequence',
train_set = False,
max_predictions = corpus_config.max_predictions_per_seq,
masked_lm_prob = corpus_config.masked_lm_prob,
config = corpus_config,
pickled_tokenizer = d.tokenizer,
is_torch = True,
)
d.loadCheckpoint()
# Active sampling attributes.
d.active_db = active_feed_database.ActiveFeedDatabase(
url = "sqlite:///{}".format(d.sampler.corpus_directory / "active_feeds.db"),
)
d.samples_cache_obs = sample_observers.SamplesDatabaseObserver(
path = d.sampler.corpus_directory / "samples_cache.db",
must_exist = False,
)
if FLAGS.evaluate_candidates:
if environment.WORLD_RANK == 0:
d.eval_db = evaluate_cand_database.SearchCandidateDatabase(
url = "sqlite:///{}".format(d.sampler.corpus_directory / "evaluated_candidates.db"),
must_exist = False,
)
if corpus_config.active.HasField("target"):
d.feat_sampler = feature_sampler.BenchmarkSampler(
workspace = d.sampler.corpus_directory,
feature_space = corpus_config.active.feature_space,
target = corpus_config.active.target,
git_corpus = corpus,
seed = d.seed,
)
else:
d.feat_sampler = feature_sampler.ActiveSampler(
workspace = d.sampler.corpus_directory,
feature_space = corpus_config.active.feature_space,
active_learner = d.sampler.active_learner,
tokenizer = d.tokenizer,
seed = d.seed,
)
d.candidate_monitor = monitors.CategoricalDistribMonitor.loadCheckpoint(
d.sampler.corpus_directory, "feature_distance"
)
d.tsne_monitor = monitors.TSNEMonitor.loadCheckpoint(
d.sampler.corpus_directory, "tsne_feature_map"
)
d.comp_rate_mon = monitors.CategoricalHistoryMonitor.loadCheckpoint(
d.sampler.corpus_directory, "comp_rate_per_gen"
)
d.exec_time_mon = monitors.CategoricalHistoryMonitor.loadCheckpoint(
d.sampler.corpus_directory, "exec_time_per_gen"
)
# Check if benchmark set has been registed to monitor.
if not d.feat_sampler.is_active:
if d.feat_sampler.target not in d.tsne_monitor.groups_set:
for b in d.feat_sampler.benchmarks:
d.tsne_monitor.register((b.features, d.feat_sampler.target, b.name))
d.tsne_monitor.plot()
# Store unique specs to database once.
d.addToDB(
active_feed_database.ActiveSamplingSpecs.FromArgs(
act_s_dep = corpus_config.active.active_search_depth,
act_s_wid = corpus_config.active.active_search_width,
feat_space = corpus_config.active.feature_space
)
)
d.raised_keyboard_int = False
d.raised_exception = None
d.skip_first_queue = FLAGS.skip_first_queue
d.dataloader = d.predict_dataloader()
d.loader = iter(d.dataloader)
return d
def __init__(self):
super(torchLMDataGenerator, self).__init__("pt_record")
self.dataloader = None
self.loader = None
self.comp_rate = {}
self.exec_time = {}
self.feed_queue = []
self.active_db = None
self.samples_cache_obs = None
self.eval_db = None
self.feat_sampler = None
self.candidate_monitor = None
self.tsne_monitor = None
self.comp_rate_mon = None
self.exec_time_mon = None
self.raised_keyboard_int = None
self.raised_exception = None
self.skip_first_queue = None
self.bench_idx = None
return
def train_dataloader(self, set_name = 'train_dataset', is_train = True) -> torch.utils.data.dataloader:
"""
Pytorch dataloader used for training.
set_name defaults to train_dataset, and that way this function
this dataloader's function is used for training.
eval_dataloaders sets set_name to reuse the function for all different sets.
"""
if self.config.datapoint_time == "pre":
# Pre-computed dataset with system of files. [DEPRECATED].
dataset = datasets.LazyConcatDataset([x for x in self.dataset[set_name]['file']])
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
elif self.config.datapoint_time == "online":
# Online masking of training instances.
if self.pre_train:
dataset = datasets.LazyOnlineDataset(self, is_train)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
else:
dataset = datasets.OnlineDataset(self, is_train)
if environment.WORLD_SIZE == 1:
sampler = torch.utils.data.RandomSampler(dataset, replacement = False)
else:
sampler = torch.utils.data.DistributedSampler(dataset)
else:
raise ValueError(self.config.datapoint_time)
dataloader = torch.utils.data.dataloader.DataLoader(
dataset = dataset,
batch_size = self.training_opts.batch_size,
sampler = (sampler
if pytorch.num_nodes <= 1 or not pytorch.torch_tpu_available or pytorch.torch_xla.xrt_world_size() <= 1
else torch.utils.data.distributed.DistributedSampler(
dataset = dataset,
num_replicas = pytorch.num_nodes if not pytorch.torch_tpu_available else pytorch.torch_xla.xrt_world_size(),
rank = pytorch.torch.distributed.get_rank() if not pytorch.torch_tpu_available else pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = True if environment.WORLD_SIZE > 1 else False,
)
return dataloader
def eval_dataloaders(self) -> torch.utils.data.dataloader:
"""Pytorch dataloader used for validation."""
if self.config.datapoint_time == "online":
yield "Online Corpus", self.train_dataloader(is_train = False)
else:
for set_name in self.dataset:
yield set_name, self.train_dataloader(set_name)
def predict_dataloader(self) -> torch.utils.data.dataloader:
"""
Pytorch dataloader used for inference.
isFixedStr == True means there is a fixed sample feed, e.g. 'kernel void [HOLE]'
Otherwise, a set has been given to provide random samples from it.
"""
batch_size = self.sample_batch_size
if not self.sampler.is_active and (self.sampler.isFixedStr or self.sampler.is_live):
sample_element = sequence_masking.MaskedSeqToBlob(
self.sampler.encoded_start_text, self.tokenizer, self.sampler.sequence_length, self.max_position_embeddings
)
dataset = [{k: torch.from_numpy(v) for (k, v) in sample_element.items()}] * self.sample_batch_size
sampler = torch.utils.data.SequentialSampler(dataset)
else:
if self.sampler.is_online:
"""
TODO maybe add configSampleSets here as well.
"""
if self.pre_train:
dataset = datasets.LazyOnlineDataset(self, False)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
else:
dataset = datasets.OnlineDataset(self, False)
if environment.WORLD_SIZE == 1:
sampler = torch.utils.data.RandomSampler(dataset, replacement = False)
else:
sampler = torch.utils.data.DistributedSampler(dataset)
elif self.sampler.is_active:
if self.sampler.isFixedStr:
dataset = [np.asarray(self.tokenizer.TokenizeString(self.sampler.start_text))]
else:
dataset = self.createCorpus(self.sampler.corpus_directory)
batch_size = 1
sampler = torch.utils.data.SequentialSampler(dataset)
else:
path_list = self.configSampleSets()
dataset = datasets.LazyConcatDataset(
[x for x in path_list]
)
sampler = datasets.LazyRandomSampler(dataset, replacement = False)
dataloader = torch.utils.data.dataloader.DataLoader(
dataset = dataset,
# Model's batch size is divided by sampler's batch size, in order to get
# multiple generation candidates from a given sample feed, but still
# efficiently feed big batches to make sampling faster.
# Example: model batch size 32 and sampler batch size 4.
# This dataloader will return 8 feeds. Each will be repeated 4 times.
# 32 sequences will be given to the model.
batch_size = batch_size,
sampler = (sampler
if pytorch.num_nodes <= 1 or not pytorch.torch_tpu_available or pytorch.torch_xla.xrt_world_size() <= 1
else torch.utils.data.distributed.DistributedSampler(
dataset = dataset,
num_replicas = pytorch.num_nodes if not pytorch.torch_tpu_available else pytorch.torch_xla.xrt_world_size(),
rank = pytorch.torch.distributed.get_rank() if not pytorch.torch_tpu_available else pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False,
)
return dataloader
def ActiveGeneration(self,
mwrapper : typing.TypeVar('torch_bert.torchBert'),
estimator : typing.TypeVar('torch_bert.SampleBertEstimator')
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""
Active Learning generation core routine.
This function starts with a feed from a dataset
and returns all active samples that have reached the requested feature space.
Args:
mwrapper: BERT model wrapper.
estimator: BERT model pipeline.
Returns:
A tuple of 4 arrays:
a) Original inputs
b) Original input ids
c) Generated samples
d) Sample indices
The arrays are ordered by index.
"""
if self.feat_sampler.is_terminated():
raise StopIteration
if self.raised_keyboard_int:
self.raised_keyboard_int = False
raise KeyboardInterrupt
if self.raised_exception:
raise self.raised_exception
# Active sampling specs initialization
active_search_depth = self.sampler.config.sample_corpus.corpus_config.active.active_search_depth
active_search_width = self.sampler.config.sample_corpus.corpus_config.active.active_search_width
active_dropout_prob = self.sampler.config.sample_corpus.corpus_config.active.active_dropout_prob
sample_batch_per_feed = self.sampler.config.sample_corpus.corpus_config.active.batch_size_per_feed
if sample_batch_per_feed > self.sample_batch_size:
l.logger().warn("Throttling sample batch per feed to ({}), equal to batch size".format(self.sample_batch_size))
sample_batch_per_feed = min(sample_batch_per_feed, self.sample_batch_size)
# Initialize feed queue
org_inp = self.initOrGetQueue(self.feat_sampler.target_benchmark.features)
org_ids = copy.copy(org_inp)
total_cand, total_cand_hash = [], set()
# Sample cache thread, eval cand DB thread.
write_cache_proc = None
if FLAGS.evaluate_candidates:
write_eval_proc = None
# If the sampler is active, monitor on the go each target benchmark separately.
if self.feat_sampler.is_active:
self.tsne_monitor.register((self.feat_sampler.target_benchmark.features,
self.feat_sampler.target,
self.feat_sampler.target_benchmark.name
)
)
if len(self.feat_sampler.target_benchmark.features) > 100:
pretty_features = {k: round(v, 2) for k, v in list(self.feat_sampler.target_benchmark.features.items())[:50]}
pretty_features.update({k: round(v, 2) for k, v in list(self.feat_sampler.target_benchmark.features.items())[-50:]})
else:
pretty_features = {k: round(v, 2) for k, v in self.feat_sampler.target_benchmark.features.items()}
l.logger().info(
"{}Target features: {}{}".format(
"Target benchmark: {}\n".format(self.feat_sampler.target_benchmark.name) if self.feat_sampler.target_benchmark.name != "" else "",
pretty_features,
"\nRuntime features: {}".format(self.feat_sampler.target_benchmark.runtime_features) if self.feat_sampler.target_benchmark.runtime_features else ""
)
)
try:
## BFS style. While you have jobs, keep going.
while self.feed_queue:
## Pop the feed that will provide a sample workload.
if FLAGS.evolutionary_search:
try:
# Evolutionary search will create a workload out of all current generation
init_feed = self.feed_queue.pop(0)
feeds = [init_feed]
cur_gen = init_feed.gen_id
while self.feed_queue[0].gen_id == cur_gen:
feeds.append(self.feed_queue.pop(0))
except Exception:
pass
else:
# Non-evolutionary search will do a small workload per feed and will not give up if it doesn't further reduce distance.
# p.s.: It doesn't work.
feeds = [self.feed_queue.pop(0)]
if self.skip_first_queue:
self.skip_first_queue = False
try:
feeds = [self.feed_queue.pop(0)]
except Exception:
pass
l.logger().info("Benchmark {}, generation {}".format(self.bench_idx, feeds[0].gen_id))
# Compilation rate, execution time, per generation.
cmp_rate = [0, 0]
exec_time = 0.0
if feeds[0].gen_id not in self.comp_rate:
self.comp_rate[feeds[0].gen_id] = [0, 0]
if feeds[0].gen_id not in self.exec_time:
self.exec_time[feeds[0].gen_id] = 0.0
# Specialize sampler to current sampling input.
for feed in feeds[:1]:
self.sampler.setStartText(self.tokenizer.tokensToString(feed.input_feed, ignore_token = self.tokenizer.padToken))
self.sampler.Specialize(self.tokenizer)
# Iterate until you get a better sample or surpass the limit.
better_found, it, threshold = None, 0, 160000
while not better_found and cmp_rate[1] < threshold:
## Pre-process inputs
# workload size: how many batches of sequences you need.
wsize = (FLAGS.sample_workload_size) // (self.sample_batch_size * environment.WORLD_SIZE)
if FLAGS.evolutionary_search and feeds[0].gen_id == 0 and len(feeds) == 1:
wsize = wsize * active_search_width
# Give the input feed and some specs, get the tensor ready to feed.
inputs = self.collateInputData([feed.input_feed for feed in feeds], wsize, sample_batch_per_feed)
## Workload inference.
outputs, time = mwrapper.sample_model_step(
estimator.model,
inputs,
iteration = it,
extract_hidden_state = True if self.feat_sampler.feature_space == "HiddenState" else False,
)
## Post-process outputs.
# Keep step_candidates and evaluate them. Keep rejected candidates only for eval_cand database.
step_candidates, rejected_candidates = [], []
tcs, ts = 0, 0
(cs, s), better_found = self.registerOutputData(
outputs,
[feeds[idx] for fidx, _ in enumerate(feeds) for idx in [fidx]*wsize*self.sample_batch_size],
step_candidates,
rejected_candidates,
)
tcs += cs
ts = s
# l.logger().info("Length before: {}".format(len(step_candidates)), ddp_nodes = True)
step_candidates = distrib.get_consistent(step_candidates)
rejected_candidates = distrib.get_consistent(rejected_candidates)
## Register good offsprings, along with step candidates in tsne monitor.
if not FLAGS.evolutionary_search and better_found and environment.WORLD_RANK == 0:
self.tsne_monitor.register((better_found.features, "gen_{}_accepted".format(str(feeds[0].gen_id)), str(better_found.score)))
for c in step_candidates:
self.tsne_monitor.register((c.features, "gen_{}".format(str(feeds[0].gen_id))))
## Recalculate compilation rate of generation.
cmp_rate[0] += tcs
cmp_rate[1] += ts
exec_time += time
if FLAGS.evaluate_candidates and environment.WORLD_RANK == 0:
## Write all candidates to eval_cand DB.
if write_eval_proc:
write_eval_proc.join()
write_eval_proc = multiprocessing.Process(
target = write_eval_db,
kwargs = {
'eval_db' : self.eval_db,
'tokenizer' : self.tokenizer,
'compiling_samples' : step_candidates,
'rejected_samples' : rejected_candidates,
'target_benchmark' : (self.feat_sampler.target_benchmark.name, self.feat_sampler.target_benchmark.contents),
'target_features' : self.feat_sampler.target_benchmark.features,
'gen_id' : feeds[0].gen_id,
}
)
write_eval_proc.start()
## Write to samples cache DB.
if write_cache_proc:
write_cache_proc.join()
self.samples_cache_obs.sample_id = self.samples_cache_obs.db.count
write_cache_proc = multiprocessing.Process(
target = write_samples_cache,
kwargs = {
'db_sample_obs' : self.samples_cache_obs,
'tokenizer' : self.tokenizer,
'samples' : step_candidates,
}
)
write_cache_proc.start()
if not FLAGS.evolutionary_search and better_found and feeds[0].gen_id > 0:
l.logger().info("Improved score {} -> {} in {} iterations".format(round(feed.input_score, 3), round(better_found.score, 3), it))
# Step counter.
it += 1
if FLAGS.evolutionary_search:
# No need to keep looking for better samples than parents.
# In this mode, you get a workload and keep the best independently.
break
######## End of while.
## Update all monitors.
if environment.WORLD_RANK == 0:
self.comp_rate[feeds[0].gen_id] = [sum(x) for x in zip(self.comp_rate[feeds[0].gen_id], cmp_rate)]
self.exec_time[feeds[0].gen_id] += exec_time
self.comp_rate_mon.register((feeds[0].gen_id, self.comp_rate[feeds[0].gen_id][0] / self.comp_rate[feeds[0].gen_id][1]))
self.exec_time_mon.register((feeds[0].gen_id, self.exec_time[feeds[0].gen_id] / self.comp_rate[feeds[0].gen_id][1]))
self.comp_rate_mon.plot()
self.exec_time_mon.plot()
# self.tsne_monitor.plot()
## Collect surviving candidates of generation.
# If we just started, get top-K.
if FLAGS.evolutionary_search:
best_cands = self.feat_sampler.sample_from_set(step_candidates, active_search_width, active_dropout_prob)
l.logger().info("Top-{} ({} unique) samples of generation {}: {}".format(active_search_width, len(best_cands), feeds[0].gen_id, ', '.join([str(round(c.score, 3)) for c in best_cands])))
for x in best_cands[:3]:
l.logger().info(self.tokenizer.ArrayToCode(x.sample, with_formatting = True))
elif feeds[0].gen_id == 0:
best_cands = self.feat_sampler.sample_from_set(step_candidates, active_search_width, active_dropout_prob)
l.logger().info("Starting scores: {}".format(', '.join([str(round(c.score, 3)) for c in best_cands])))
else:
# If nothing was found, there are no best cands, and we will keep searching.
if not better_found:
best_cands = []
l.logger().warn("No better candidate found...")
else:
# Otherwise, this single input feed, provides a new single better sample.
best_cands = [better_found]
# Monitor the new better candidate(s), if any.
if best_cands and environment.WORLD_RANK == 0:
self.candidate_monitor.register(
{str(best_cands[0].sample_feed.gen_id): [c.score for c in best_cands]}
)
self.candidate_monitor.plot()
# Add them back to queue and to active feed database.
found_match = False
if len(best_cands) == 0:
for feed in feeds:
self.feed_queue.append(
ActiveSampleFeed(
input_feed = feed.input_feed,
input_features = feed.input_features,
input_score = feed.input_score,
gen_id = 1 + feed.gen_id,
)
)
for nc in best_cands:
if FLAGS.evolutionary_search and environment.WORLD_RANK == 0:
self.tsne_monitor.register((nc.features, "gen_{}_accepted".format(str(feeds[0].gen_id))))
sample_hash = ''.join([str(x) for x in nc.sample])
if FLAGS.evolutionary_search or (sample_hash not in total_cand_hash):
if sample_hash not in total_cand_hash:
total_cand.append(nc)
total_cand_hash.add(sample_hash)
if nc.score == 0.0 and FLAGS.evolutionary_search:
found_match = True
if not found_match and 1+nc.sample_feed.gen_id <= active_search_depth and (FLAGS.evolutionary_search or 0 < nc.score < feed.input_score):
assert nc.sample[0] != self.tokenizer.padToken, nc.sample
self.feed_queue.append(
ActiveSampleFeed(
input_feed = nc.sample,
input_features = nc.features,
input_score = nc.score,
gen_id = 1 + nc.sample_feed.gen_id,
)
)
self.addToDB(
active_feed_database.ActiveFeed.FromArgs(
tokenizer = self.tokenizer,
id = self.active_db.active_count,
input_feed = nc.sample_feed.input_feed,
input_features = nc.sample_feed.input_features,
sample = nc.sample,
output_features = nc.features,
sample_quality = nc.score,
target_benchmark = (self.feat_sampler.target_benchmark.name, self.feat_sampler.target_benchmark.contents),
target_features = self.feat_sampler.target_benchmark.features,
compile_status = True,
generation_id = nc.sample_feed.gen_id,
)
)
if environment.WORLD_RANK == 0:
self.tsne_monitor.plot()
self.feat_sampler.step_generation(best_cands)
# save state for this generation and re-loop for the next.
self.saveCheckpoint()
# Catch threads on last iteration.
if write_cache_proc and environment.WORLD_RANK == 0:
write_cache_proc.join()
if FLAGS.evaluate_candidates and write_eval_proc and environment.WORLD_RANK == 0:
write_eval_proc.join()
## Finished, save state, switch benchmark, return samples.
self.bench_idx += 1
if environment.WORLD_RANK == 0:
self.saveCheckpoint()
distrib.barrier()
self.feat_sampler.iter_benchmark(target_samples = total_cand)
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
except KeyboardInterrupt:
self.raised_keyboard_int = True
if write_cache_proc and environment.WORLD_RANK == 0:
write_cache_proc.terminate()
if FLAGS.evaluate_candidates and write_eval_proc and environment.WORLD_RANK == 0:
write_eval_proc.terminate()
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
except Exception as e:
l.logger().error(e)
self.raised_exception = e
return (np.repeat([org_inp], len(total_cand), axis = 0),
np.repeat([org_ids], len(total_cand), axis = 0),
[x.sample for x in total_cand],
[[]] * len(total_cand))
def initOrGetQueue(self, target_features: typing.Dict[str, float] = None) -> np.array:
"""
If feed queue is not initialized, initialize it by getting new datapoint.
Otherwise, don't do anything as feed_queue is already loaded from checkpoint.
Adds datapoint to InputFeed table of database.
Returns:
Starting input feed of sampling.
"""
if not self.feed_queue:
# Initialize feed_queue if empty.
if FLAGS.start_from_cached and target_features is not None:
# Get cached samples to start with an advantage for new benchmark.
cached_samples = [[x.sample, {':'.join(f.split(':')[:-1]): float(f.split(':')[-1]) for f in x.output_features.split('\n')}, -1] for x in self.active_db.get_data]
if len(cached_samples) == 0:
# If no cache, re-try without caching from target.
return self.initOrGetQueue()
else:
for idx, cs in enumerate(cached_samples):
cached_samples[idx][-1] = self.feat_sampler.calculate_distance(cs[1])
sorted_cache_samples = sorted(cached_samples, key = lambda x: x[-1])
# The queue will be no longer than the beam search width specified.
for scs in sorted_cache_samples[:self.sampler.config.sample_corpus.corpus_config.active.active_search_width]:
# Tokenize, pad, add start/end tokens to be ready for inference.
tokenized = self.tokenizer.TokenizeString(scs[0])
w_start_end = self._addStartEndToken(tokenized)
padded = self._padToMaxPosition(w_start_end)[:self.sampler.sequence_length]
if padded[0] == self.tokenizer.padToken:
l.logger().error("Pad token was found again at the beginning of the sequence.")
l.logger().error(scs[0])
l.logger().error(tokenized)
l.logger().error(w_start_end)
l.logger().error(padded)
encoded = self._padToMaxPosition(self._addStartEndToken([int(x) for x in tokenized]))[:self.sampler.sequence_length]
assert encoded[0] != self.tokenizer.padToken, encoded
self.feed_queue.append(
ActiveSampleFeed(
input_feed = encoded,
input_features = scs[1],
input_score = scs[-1],
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = encoded, input_features = scs[1],
)
)
else:
# If no caching is wanted, bring whatever the dataloader
# specified in the sampler's pbtxt wants. Usually this is a start
# text, but could also be a sampled datapoint from a dataset, DB etc.
try:
cf = next(self.loader).squeeze(0)
except StopIteration:
self.loader = iter(self.dataloader)
cf = next(self.loader).squeeze(0)
cf = [int(x) for x in cf]
assert cf[0] != self.tokenizer.padToken, cf
self.feed_queue.append(
ActiveSampleFeed(
input_feed = cf,
input_features = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(cf), [self.feat_sampler.feature_space])[self.feat_sampler.feature_space],
input_score = math.inf,
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = cf, input_features = self.feed_queue[-1].input_features,
)
)
l.logger().info("Feed queue input scores: {}".format(', '.join([str(round(c.input_score, 3)) for c in self.feed_queue])))
return self.feed_queue[0].input_feed
def collateInputData(self,
feed : typing.List[np.array],
wload_size : int,
sample_batch_per_feed : int,
) -> typing.Dict[str, torch.Tensor]:
"""
Create a full generation workload out of a sample feed.
If feed is already masked, then just repeat it across the whole workload.
If it is not masked, then feed is masked wload_size times.
Args:
feed: numpy array of input feed (expressed as list of a single np element),
or a list of numpys in case multiple workloads are merged.
wload_size: Number of inputs that will be fed to the model in a single workload.
Returns:
The tensor inputs dictionary filled for BERT.
"""
if self.feature_encoder:
target_features = self.feature_tokenizer.TokenizeFeatureVector(self.feat_sampler.target_benchmark.features, self.feat_sampler.feature_space, self.feature_sequence_length)
if self.tokenizer.maskToken in feed[0] or self.tokenizer.holeToken in feed[0]:
inputs = sequence_masking.MaskedSeqToBlob(
feed[0], self.tokenizer,
self.sampler.sequence_length,
self.max_position_embeddings
)
if self.feature_encoder:
inputs["input_features"] = target_features
inputs = {
k: torch.from_numpy(v).unsqueeze(0).repeat_interleave(self.sample_batch_size, dim = 0).unsqueeze(0).repeat_interleave(wload_size, dim = 0)
for k, v in inputs.items()
}
else:
inputs = {
'input_ids': [], 'input_mask': [], 'position_ids': [],
'mask_labels': [], 'masked_lm_lengths': [], 'next_sentence_labels': []
}
if self.feature_encoder:
inputs["input_features"] = []
try:
pool = multiprocessing.Pool()
for batch in pool.imap_unordered(
functools.partial(
dataload_worker, feed = feed,
func = self.func, batch = self.sample_batch_size,
batch_per_feed = sample_batch_per_feed
),range(wload_size)
):
if batch:
# convert dict values from np -> torch.Tensor.
out = {
k: torch.from_numpy(v).unsqueeze(0)
for (k, v) in batch[0].items()
}
for f in batch[1:]:
for k, v in f.items():
nt = torch.from_numpy(v).unsqueeze(0)
out[k] = torch.cat((out[k], nt), 0)
if self.feature_encoder:
out["input_features"] = torch.from_numpy(target_features).unsqueeze(0).repeat_interleave(out['input_ids'].shape[0], dim = 0)
for k in inputs.keys():
inputs[k].append(out[k])
for k, v in inputs.items():
s = torch.stack(v)
inputs[k] = s.view(-1, self.sample_batch_size, s.shape[-1])
pool.close()
pool.terminate()
except KeyboardInterrupt as e:
pool.close()
pool.terminate()
raise e
return inputs
def registerOutputData(self,
outputs : typing.Dict[str, typing.List[np.array]],
feeds : ActiveSampleFeed,
candidates : typing.List[ActiveSample],
rejected_candidates : typing.List[ActiveSample],
) -> typing.List[int]:
"""
Gets workload output from model.
In parallel, every sample is checked for compilability and features are extracted.
If sample compiles, it is stored as an active learning candidate.
Args:
outputs: Dictionary output of workload
candidates: Passed by reference and filled within this function
bar: tqdm bar for status checking
Returns:
cm_rate: List of two elements that express compilation rate of workload.
0th el: Total compiling.
1st el: Total samples.
"""
cm_rate = [0, 0]
pool = multiprocessing.Pool()
cm_rate[1] += len(outputs['generated_samples'])
better_found = None
try:
if self.feat_sampler.feature_space == "HiddenState":
it = zip(
outputs['generated_samples'], outputs['sample_indices'],
outputs['input_ids'], outputs['masked_lm_lengths'],
outputs['hidden_state'], feeds
)
else:
it = zip(
outputs['generated_samples'], outputs['sample_indices'],
outputs['input_ids'], outputs['masked_lm_lengths'],
feeds
)
if self.feat_sampler.feature_space == "GreweFeatures":
candidate_worker = functools.partial(
text_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
elif self.feat_sampler.feature_space == "HiddenState":
candidate_worker = functools.partial(
hidden_state_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
else:
candidate_worker = functools.partial(
IR_candidate_worker,
tokenizer = self.tokenizer,
feature_space = self.feat_sampler.feature_space,
target_benchmark = self.feat_sampler.target_benchmark,
)
t = 0
for idx, batch in tqdm.tqdm((enumerate(pool.map(candidate_worker, it))), total = len(outputs['generated_samples']), desc = "Register Output Data", leave = False):
t = idx
if batch[0]:
cm_rate[0] += 1
candidates.append(batch[1])
if 0 < batch[1].score < batch[1].sample_feed.input_score:
if better_found is None or batch[1].score < better_found.score:
better_found = batch[1]
else:
if FLAGS.evaluate_candidates:
rejected_candidates.append(batch[1])
if FLAGS.features_standard_scaler:
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit([[float(y) for y in x.features.values()] for x in candidates + [self.feat_sampler.target_benchmark]])
target_feats = {k: v for k, v in zip(self.feat_sampler.target_benchmark.features.keys(), scaler.transform([[float(x) for x in self.feat_sampler.target_benchmark.features.values()]])[0])}
for idx, cd in enumerate(candidates):
outfeats = {k: v for k, v in zip(cd.features.keys(), scaler.transform([[float(x) for x in cd.features.values()]])[0])}
candidates[idx]._replace(score = feature_sampler.calculate_distance(outfeats, target_feats, self.feat_sampler.feature_space))
pool.close()
pool.terminate()
except KeyboardInterrupt as e:
pool.close()
pool.terminate()
raise e
return cm_rate, better_found
def saveCheckpoint(self):
"""
Save feed queue checkpoint for easy restart.
"""
with open(self.sampler.corpus_directory / "gen_state.pkl", 'wb') as outf:
pickle.dump({'feed_queue': self.feed_queue, 'bench_idx': self.bench_idx}, outf)
self.candidate_monitor.saveCheckpoint()
self.tsne_monitor.saveCheckpoint()
self.comp_rate_mon.saveCheckpoint()
self.exec_time_mon.saveCheckpoint()
return
def loadCheckpoint(self):
"""
Load checkpointed feed queue, if exists.
"""
if (self.sampler.corpus_directory / "gen_state.pkl").exists():
distrib.lock()
with open(self.sampler.corpus_directory / "gen_state.pkl", 'rb') as infile:
checkpoint = pickle.load(infile)
self.feed_queue = checkpoint['feed_queue']
self.bench_idx = checkpoint['bench_idx']
distrib.unlock()
else:
self.feed_queue = []
self.bench_idx = 1
return
def addToDB(self,
db_input: typing.Union[
active_feed_database.ActiveSamplingSpecs,
active_feed_database.ActiveInput,
active_feed_database.ActiveFeed
]
) -> None:
"""
If not exists, add current sample state to database
"""
with self.active_db.get_session(commit = True) as session:
exists = session.query(
type(db_input)
).filter(type(db_input).sha256 == db_input.sha256).scalar() is not None
if not exists:
session.add(db_input)
return
def _saveCorpusRecord(self, masked_corpus: typing.Dict[str, np.array]) -> None:
"""Converts corpus nparrays to torch tensors and stores corpus to pt_record"""
torch.save(
[{k: torch.from_numpy(v) for (k, v) in inst.items()} for inst in masked_corpus['corpus']],
masked_corpus['file']
)
if FLAGS.write_text_dataset:
with open(masked_corpus['txt'], 'w') as file_writer:
for instance in masked_corpus['corpus']:
file_writer.write("'seen_in_training': {}\n'original_input': {}\n'input_ids': {}\n'input_mask': {}\n'position_ids': {}\n'mask_labels': {}\n'masked_lm_lengths': {}\n'next_sentence_labels': {}\n\n"
.format((True if instance['seen_in_training'] == 1 else False),
self.tokenizer.tokensToString(instance['original_input'], ignore_token = self.tokenizer.padToken),
self.tokenizer.tokensToString(instance['input_ids'], ignore_token = self.tokenizer.padToken),
instance['input_mask'],
instance['position_ids'],
instance['mask_labels'],
instance['masked_lm_lengths'],
instance['next_sentence_labels']
)
)
l.logger().info("Wrote {} instances ({} batches of {} datapoints) to {}"
.format(len(masked_corpus['corpus']), self.steps_per_epoch, self.training_opts.batch_size, masked_corpus['file']))
return
| 59,181 | 43.800908 | 205 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
import math
import typing
import numpy as np
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.models.torch_bert import config
from deeplearning.benchpress.models.torch_bert import modeling_utils
from deeplearning.benchpress.models.torch_bert import compiler
from deeplearning.benchpress.util import logging as l
# import tensorrt as trt
# import pycuda.autoinit
# import pycuda.driver as cuda
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.functional.relu,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish
}
class BertEmbeddings(torch.nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = torch.nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = torch.nn.Embedding(config.max_position_embeddings, config.hidden_size)
# self.token_type_embeddings = torch.nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
# token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings # + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(torch.nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.key = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.value = torch.nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = torch.nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = torch.nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class BertSelfOutput(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = modeling_utils.find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = modeling_utils.prune_linear_layer(self.self.query, index)
self.self.key = modeling_utils.prune_linear_layer(self.self.key, index)
self.self.value = modeling_utils.prune_linear_layer(self.self.value, index)
self.output.dense = modeling_utils.prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
self_attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
layer_output = modeling_utils.apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = torch.nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if getattr(self.config, "gradient_checkpointing", False):
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
head_mask[i],
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
class BertPooler(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.dense = torch.nn.Linear(config.hidden_size, config.hidden_size)
self.activation = torch.nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class FeaturePositionalEncoding(torch.nn.Module):
def __init__(self, config):
super().__init__()
position = torch.arange(config.feature_sequence_length).unsqueeze(1)
div_term = torch.exp(torch.arange(0, config.feature_embedding_size, 2) * (-math.log(10000.0) / config.feature_embedding_size))
pe = torch.zeros(config.feature_sequence_length, 1, config.feature_embedding_size)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
self.dropout = torch.nn.Dropout(config.feature_dropout_prob)
return
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = x + self.pe[:x.size(0)]
return self.dropout(x)
class FeatureTransformer(torch.nn.Module):
def __init__(self, config):
super().__init__()
## Encoding space
self.encoder_embedding = torch.nn.Embedding(
num_embeddings = config.feature_vocab_size,
embedding_dim = config.feature_embedding_size,
padding_idx = config.feature_pad_idx
)
self.encoder_pos_encoder = FeaturePositionalEncoding(config)
encoder_layers = torch.nn.TransformerEncoderLayer(
d_model = config.feature_embedding_size,
nhead = config.feature_num_attention_heads,
dim_feedforward = config.feature_transformer_feedforward,
dropout = config.feature_dropout_prob,
batch_first = True
)
encoder_norm = torch.nn.LayerNorm(
config.feature_embedding_size,
eps = config.feature_layer_norm_eps
)
self.encoder_transformer = torch.nn.TransformerEncoder(
encoder_layer = encoder_layers,
num_layers = config.feature_num_hidden_layers,
norm = encoder_norm,
)
## Decoder space
# self.decoder_embedding = torch.nn.Embedding(
# num_embeddings = config.feature_vocab_size,
# embedding_dim = config.feature_embedding_size,
# padding_idx = feature_pad_idx
# )
# self.encoder_pos_encoder = FeaturePositionalEncoding(config)
# decoder_layers = torch.nn.TransformerDecoderLayer(
# d_model = config.feature_embedding_size,
# nhead = config.feature_num_attention_heads,
# dim_feedforward = config.feature_transformer_feedforward,
# dropout = config.feature_dropout_prob,
# batch_first = True
# )
# decoder_norm = torch.nn.LayerNorm(
# config.feature_embedding_size,
# eps = config.feature_layer_norm_eps
# )
# self.decoder_transformer = torch.nn.TransformerDecoder(
# decoder_layer = decoder_layers,
# num_layers = config.feature_num_hidden_layers,
# norm = decoder_norm,
# )
self.mapper = torch.nn.Linear(config.feature_embedding_size, config.feature_vocab_size)
self.reducer = torch.nn.Linear(config.feature_vocab_size, 1)
self.transpose = lambda t: torch.reshape(t, (-1, 1, config.feature_sequence_length))
self.repeater = lambda t, y: t.repeat(1, y, 1)
self.embedding_size = config.feature_embedding_size
self.init_weights()
return
def init_weights(self) -> None:
initrange = 0.1
self.encoder_embedding.weight.data.uniform_(-initrange, initrange)
self.mapper.bias.data.zero_()
self.mapper.weight.data.uniform_(-initrange, initrange)
self.reducer.bias.data.zero_()
self.reducer.weight.data.uniform_(-initrange, initrange)
return
def forward(self,
features : torch.Tensor,
sequence_length : torch.Size,
features_mask : torch.Tensor = None,
features_key_padding_mask : torch.Tensor = None
) -> torch.Tensor:
embed = self.encoder_embedding(features) * math.sqrt(self.embedding_size)
pos_embed = self.encoder_pos_encoder(embed)
encoded = self.encoder_transformer(
pos_embed,
mask = features_mask,
src_key_padding_mask = features_key_padding_mask
)
mapped = self.mapper(encoded)
reduced = self.reducer(mapped)
reshaped = self.transpose(reduced)
output = self.repeater(reshaped, sequence_length)
return output
class BertPredictionHeadTransform(torch.nn.Module):
def __init__(self, config):
super().__init__()
if config.feature_encoder:
input_hidden_size = config.hidden_size + config.feature_sequence_length
else:
input_hidden_size = config.hidden_size
self.dense = torch.nn.Linear(input_hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = torch.nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
return
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertLMFeaturePredictionHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
# Transformer for raw features encoding.
self.feature_encoder = FeatureTransformer(config)
# BERT predictions transformation.
self.transform = BertPredictionHeadTransform(config)
## Res transform acts as a reducer for encoded_feature residual/skip connection.
self.res_transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
## Decoder maps hidden size to vocabulary size.
self.decoder = torch.nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.dbias = torch.nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.dbias
return
def forward(self, hidden_states, features):
encoded_features = self.feature_encoder(features, hidden_states.size(1))
res1 = torch.cat((hidden_states, encoded_features), -1)
hidden_states = self.transform(res1)
# res2 = torch.cat((hidden_states, encoded_features), -1)
# hidden_states = self.res_transform(res2)
hidden_states = self.decoder(hidden_states)
return hidden_states, encoded_features
class BertOnlyMLMHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output, features):
prediction_scores = self.predictions(sequence_output)
return prediction_scores, None
class BertMLMFeatureHead(torch.nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMFeaturePredictionHead(config)
def forward(self, sequence_output, features):
prediction_scores, encoded_features = self.predictions(sequence_output, features)
return prediction_scores, encoded_features
class BertPreTrainedModel(modeling_utils.PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = config.BertConfig
base_model_prefix = "bert"
authorized_missing_keys = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (torch.nn.Linear, torch.nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, torch.nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, torch.nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def get_output(self,
input_ids,
attention_mask,
position_ids,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
raise NotImplementedError("Abstract class")
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well
as a decoder, in which case a layer of cross-attention is added between
the self-attention layers, following the architecture described in `Attention is all you need`_ by Ashish Vaswani,
Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the
:obj:`is_decoder` argument of the configuration set to :obj:`True`.
To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an
:obj:`encoder_hidden_states` is then expected as an input to the forward pass.
.. _`Attention is all you need`:
https://arxiv.org/abs/1706.03762
"""
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask
is used in the cross-attention if the model is configured as a decoder.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class BertForPreTraining(BertPreTrainedModel):
def __init__(self,
config,
tokenizer = None,
use_categorical : bool = False,
temperature : int = None,
target_lm : str = "hole",
without_label_head : bool = False,
):
super().__init__(config)
self.bert = BertModel(config)
if without_label_head is False:
if self.config.feature_encoder:
self.cls = BertMLMFeatureHead(config)
else:
self.cls = BertOnlyMLMHead(config)
else:
self.cls = None
if self.config.reward_compilation >= 0 or self.config.is_sampling:
self.compile_sampler = compiler.CompilationSampler(
tokenizer, use_categorical, temperature, target_lm
)
else:
self.compile_sampler = None
self.init_weights()
def get_output_embeddings(self):
if self.cls is not None:
return self.cls.predictions.decoder
else:
return None
def get_output(self,
input_ids,
attention_mask,
position_ids,
input_features = None,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
extract_hidden_state: bool = False,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
outputs = self.bert(
input_ids = input_ids,
attention_mask = attention_mask,
position_ids = position_ids,
token_type_ids = token_type_ids,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
if self.cls is None or extract_hidden_state:
prediction_scores, encoded_features = None, None
else:
prediction_scores, encoded_features = self.cls(sequence_output, input_features)
return prediction_scores, encoded_features, sequence_output, pooled_output
def forward(
self,
input_ids = None,
attention_mask = None,
input_features = None,
token_type_ids = None,
position_ids = None,
head_mask = None,
inputs_embeds = None,
masked_lm_labels = None,
next_sentence_labels = None,
workload = None,
output_attentions = None,
output_hidden_states = None,
is_validation = False,
step = -1,
**kwargs
):
r"""
labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
next_sentence_labels (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Examples::
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
>>> model = BertForPreTraining.from_pretrained('bert-base-uncased')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outptus.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
if workload is not None:
input_ids, attention_mask, position_ids, input_features = workload
extract_hidden_state = kwargs.get('extract_hidden_state', False)
if extract_hidden_state:
## If using only for hidden state extraction.
prediction_scores, encoded_features, hidden_state, _ = self.get_output(
input_ids,
attention_mask,
position_ids,
extract_hidden_state = False, ## Don't forget to set this to true if you don't need prediction scores.
)
return prediction_scores, hidden_state
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
## If there is a sampling workload, load it directly to the compiler.
if workload is not None:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
prediction_scores, encoded_features, hidden_states, attentions = self.get_output(
input_ids[0], attention_mask[0], position_ids[0], input_features[0] if input_features is not None else None,
)
bar = kwargs.get('bar', None)
return self.compile_sampler.generateSampleWorkload(
self,
device,
input_ids,
attention_mask,
input_features,
prediction_scores,
position_ids[0],
bar = bar,
)
## Otherwise select one other mode.
prediction_scores, encoded_features, hidden_states, attentions = self.get_output(
input_ids, attention_mask, position_ids, input_features,
token_type_ids, head_mask, inputs_embeds,
output_attentions, output_hidden_states
)
## [DEPRECATED]: Training with a compile sampler is proven to not work.
if not is_validation and self.compile_sampler and step >= self.config.reward_compilation and not self.config.is_sampling:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
samples, compile_flag, masked_lm_labels = self.compile_sampler.generateTrainingBatch(
self,
device,
input_ids.cpu(),
input_features.cpu(),
prediction_scores.cpu(),
torch.clone(position_ids),
masked_lm_labels.cpu().numpy(),
)
loss_fct = torch.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
return {
'masked_lm_loss' : masked_lm_loss,
'total_loss' : total_loss,
'prediction_logits' : prediction_scores,
'hidden_states' : hidden_states,
'attentions' : attentions,
'compile_status' : torch.LongTensor(compile_flag).to(device),
'generated_samples' : torch.LongTensor(samples).to(device),
'batch_compilation_rate' : torch.full((1,), float(sum(compile_flag)) / len(compile_flag), dtype = torch.float).to(device),
# 'sample_indices' : [0],
}
## Sampling without a workload. Not really useful anymore.
elif not is_validation and self.compile_sampler and self.config.is_sampling:
if self.cls is None:
raise ValueError("This mode requires a classification head.")
samples, sample_indices, scores_history = self.compile_sampler.generateSampleBatch(
self,
device,
input_ids,
input_features,
prediction_scores,
position_ids,
)
return {
'generated_samples': samples,
'sample_indices' : sample_indices,
}
## Training mode or Validation mode.
else:
if masked_lm_labels is not None:
loss_fct = torch.nn.CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
total_loss = masked_lm_loss
else:
masked_lm_loss, total_loss = None, None
return {
'masked_lm_loss' : masked_lm_loss,
'total_loss' : total_loss,
'prediction_logits' : prediction_scores,
'hidden_states' : hidden_states,
}
class BertForPreTrainingTRT(BertForPreTraining):
def __init__(self, config, tokenizer = None, use_categorical = False, temperature = None):
super().__init__(config, tokenizer=tokenizer, use_categorical=use_categorical, temperature=temperature)
self.forward = self._forward_pytorch
self.get_output = self._get_output_pytorch
self.TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
def init_engine(self, cache, device_id, batch_size, sequence_length, vocab_size, max_position_embeddings):
self.engine_path = cache.path / f'active_bert.{device_id}.engine'
self.model_onnx_path = cache.path / f'active_bert.{device_id}.onnx'
if not self.engine_path.exists():
self._create_engine(batch_size, sequence_length, vocab_size, max_position_embeddings)
self.runtime = trt.Runtime(self.TRT_LOGGER)
with open(self.engine_path, 'rb') as f:
self.engine = self.runtime.deserialize_cuda_engine(f.read())
self.stream = cuda.Stream()
self.inputs = []
self.outputs = []
self.bindings = []
for binding in self.engine:
shape = self.engine.get_binding_shape(binding)
size = trt.volume(shape)# * batch_size
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
host_mem = cuda.pagelocked_empty(size, dtype).reshape(shape)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
self.bindings.append(int(device_mem))
# Append to the appropriate list.
if self.engine.binding_is_input(binding):
self.inputs.append((host_mem, device_mem))
else:
self.outputs.append((host_mem, device_mem))
# Override the pytorch module () operator
self.__call__ = self._forward_trt
self.forward = self._forward_trt
self.get_output = self._get_output_trt
def _create_engine(self, batch_size, sequence_length, vocab_size, max_position_embeddings):
with torch.no_grad():
dims = (batch_size, sequence_length)
input_ids = torch.autograd.Variable(torch.randint(vocab_size, dims)).cuda()
attention_mask = torch.autograd.Variable(torch.ones(dims)).cuda()
position_ids = torch.autograd.Variable(torch.randint(max_position_embeddings, dims)).cuda()
args = (input_ids, attention_mask, position_ids)
inputs = ['input_ids', 'attention_mask', 'position_ids']
outputs = ['prediction_scores']
dynamic_axes = {
'input_ids': {0: 'batch'},
'attention_mask': {0: 'batch'},
'position_ids': {0: 'batch'},
'prediction_scores':{0: 'batch'}
}
#out = torch.onnx.export(self.sample.model, args=args, f=model_onnx_path, input_names=inputs, output_names=outputs, dynamic_axes=dynamic_axes)
out = torch.onnx.export(self, args=args, f=self.model_onnx_path, input_names=inputs, output_names=outputs)
EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(self.TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, self.TRT_LOGGER) as parser:
with open(self.model_onnx_path, 'rb') as model_onnx:
if not parser.parse(model_onnx.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
with trt.Builder(self.TRT_LOGGER) as builder, builder.create_builder_config() as config:
config.max_workspace_size = 1 << 29 # This determines the amount of memory available to the builder when building an optimized engine and should generally be set as high as possible.
with builder.build_engine(network, config) as engine:
with open(self.engine_path, 'wb') as f:
f.write(engine.serialize())
def _get_output_pytorch(self,
input_ids,
attention_mask,
position_ids,
token_type_ids = None,
head_mask = None,
inputs_embeds = None,
output_attentions = None,
output_hidden_states = None,
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
outputs = self.bert(
input_ids = input_ids,
attention_mask = attention_mask,
position_ids = position_ids,
token_type_ids = token_type_ids,
head_mask = head_mask,
inputs_embeds = inputs_embeds,
output_attentions = output_attentions,
output_hidden_states = output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
return prediction_scores, seq_relationship_score, outputs[0], outputs[1]
def _forward_pytorch(
self,
input_ids,
attention_mask,
position_ids
):
prediction_scores, _, _, _ = self._get_output_pytorch(input_ids, attention_mask, position_ids)
return prediction_scores
def _get_output_trt(self,
input_ids,
attention_mask,
position_ids
) -> typing.Tuple[torch.FloatTensor, torch.FloatTensor]:
np.copyto(self.inputs[0][0], input_ids.cpu())
np.copyto(self.inputs[1][0], attention_mask.cpu())
np.copyto(self.inputs[2][0], position_ids.cpu())
for inp in self.inputs:
cuda.memcpy_htod_async(inp[1], inp[0], self.stream)
self.context.execute_async_v2(bindings=self.bindings, stream_handle=self.stream.handle)
cuda.memcpy_dtoh_async(self.outputs[0][0], self.outputs[0][1], self.stream)
self.stream.synchronize()
return torch.tensor(self.outputs[0][0]).cpu(), None, None, None
def _forward_trt(
self,
input_ids = None,
attention_mask = None,
token_type_ids = None,
position_ids = None,
head_mask = None,
inputs_embeds = None,
masked_lm_labels = None,
next_sentence_labels = None,
output_attentions = None,
output_hidden_states = None,
is_validation = False,
is_live = False,
step = -1,
**kwargs
):
if is_validation or not self.compile_sampler or not self.config.is_sampling:
raise NotImplementedError
with self.engine.create_execution_context() as self.context:
prediction_scores, _, _, _ = self._get_output_trt(input_ids, attention_mask, position_ids)
device = input_ids.get_device()
samples, sample_indices, scores_history = self.compile_sampler.generateSampleBatch(
self,
input_ids.get_device(),
input_ids.cpu(),
prediction_scores.cpu(),
position_ids,
is_live,
)
return {
'prediction_scores' : scores_history, # This is mainly used for live sampling. Else, watch out!
'generated_samples' : samples,
'sample_indices' : sample_indices,
}
| 44,186 | 39.316606 | 188 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/config.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
class BertConfig(object):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
It is used to instantiate an BERT model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 30522):
Vocabulary size of the BERT model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
hidden_size (:obj:`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, optional, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with.
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, optional, defaults to 2):
The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, optional, defaults to False):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
Example::
>>> from transformers import BertModel, BertConfig
>>> # Initializing a BERT bert-base-uncased style configuration
>>> configuration = BertConfig()
>>> # Initializing a model from the bert-base-uncased style configuration
>>> model = BertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
Note:
A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights.
It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
Args:
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list
of heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer
2.
xla_device (:obj:`bool`, `optional`):
A flag to indicate if TPU are available or not.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks.
A chunk size of :obj:`0` means that the feed forward layer is not chunked.
A chunk size of n means that the feed forward layer processes :obj:`n` < sequence_length embeddings at a time.
For more information on feed forward chunking, see `How does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by
default in the :obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by
default in the :obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in
the :obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by
default in the :obj:`generate` method of the model. Whether to stop the beam search when at least
``num_beams`` sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be
used by default in the :obj:`generate` method of the model. 1 means no beam search.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to
keep for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens
with probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty
that will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that
will be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default
in the :obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of
that size can only occur once.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be
generated that will be used by default in the :obj:`generate` method of the model. In order to get the
tokens of the words that should not appear in the generated text, use
:obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed
returned sequences for each element in the batch that will be used by default in the :obj:`generate`
method of the model.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the
model pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`List[str]`, `optional`) -- A map from index (for instance prediction index, or target
index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for
the current task.
Parameters linked to the tokenizer
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each
text before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with
a different token than `bos`, the id of that token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should
use BFloat16 scalars (only used by some TensorFlow models).
"""
model_type = "bert"
def __init__(
self,
vocab_size,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_position_embeddings,
pad_token_id,
type_vocab_size,
initializer_range,
layer_norm_eps,
**kwargs
):
## Bert-specific attributes
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.gradient_checkpointing = kwargs.pop("gradient_checkpointing", False)
# Attributes with defaults
self.reward_compilation = kwargs.pop("reward_compilation", -1)
self.is_sampling = kwargs.pop("is_sampling", False)
self.return_dict = kwargs.pop("return_dict", False)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.use_cache = kwargs.pop("use_cache", True) # Not used by all models
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
# Attributes for feature vector encoding
self.feature_encoder = kwargs.pop("feature_encoder", False)
self.feature_sequence_length = kwargs.pop("feature_sequence_length", 256)
self.feature_embedding_size = kwargs.pop("feature_embedding_size", 512)
self.feature_pad_idx = kwargs.pop("feature_pad_idx", -1)
self.feature_dropout_prob = kwargs.pop("feature_dropout_prob", 0.1)
self.feature_vocab_size = kwargs.pop("feature_vocab_size", 768)
self.feature_num_attention_heads = kwargs.pop("feature_num_attention_heads", 4)
self.feature_transformer_feedforward = kwargs.pop("feature_transformer_feedforward", 2048)
self.feature_layer_norm_eps = kwargs.pop("feature_layer_norm_eps", 1e-5)
self.feature_num_hidden_layers = kwargs.pop("feature_num_hidden_layers", 2)
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forwar", 0)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
self.xla_device = kwargs.pop("xla_device", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
l.logger().error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@classmethod
def from_dict(cls, bert_dict, **extra_args):
config = BertConfig(
vocab_size = bert_dict['vocab_size'],
hidden_size = bert_dict['hidden_size'],
num_hidden_layers = bert_dict['num_hidden_layers'],
num_attention_heads = bert_dict['num_attention_heads'],
intermediate_size = bert_dict['intermediate_size'],
hidden_act = bert_dict['hidden_act'],
hidden_dropout_prob = bert_dict['hidden_dropout_prob'],
attention_probs_dropout_prob = bert_dict['attention_probs_dropout_prob'],
max_position_embeddings = bert_dict['max_position_embeddings'],
type_vocab_size = bert_dict['type_vocab_size'],
initializer_range = bert_dict['initializer_range'],
layer_norm_eps = bert_dict['layer_norm_eps'],
pad_token_id = bert_dict['pad_token_id'],
**extra_args,
)
return config
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
| 19,055 | 56.225225 | 273 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/compiler.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tqdm
import typing
import pathlib
import concurrent.futures
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"sample_indices_limit",
None,
"Hard-stop model generating more indices per sample than this specified integer."
)
class CompilationSampler(object):
"""
Compilation driven generation handler.
Used during training to iteratively fill a sequence
and feed to Clang for compilation status.
Also used during sampling to fill sequence and get
compilation status.
"""
def __init__(self,
tokenizer : tokenizers.TokenizerBase,
use_categorical : bool,
temperature : float,
target_lm : str,
):
self.tokenizer = tokenizer
self.temperature = temperature
self.use_categorical = use_categorical
if target_lm == "hole":
self.step_batch = self.StepHoleSeq
elif target_lm == "mask":
self.step_batch = self.StepMaskSeq
else:
raise KeyError(target_lm)
return
def argmax(self, t):
"""Sample argmax from a tensor."""
if self.use_categorical:
try:
ct = torch.distributions.relaxed_categorical.RelaxedOneHotCategorical(
temperature = self.temperature if self.temperature is not None else 1.0,
logits = t,
validate_args = False if "1.9." in torch.__version__ else None,
).sample()
except ValueError as e:
dump_cf = ""
dump_types = ""
p = pathlib.Path("./dump_argmax_error.log").absolute()
if not p.exists():
l.logger().error(t.shape)
l.logger().error(p)
for d0 in t:
for d1 in d0:
dump_cf += str(d1) + ", "
if isinstance(d1, torch.Tensor):
dump_types += str(d1.type()) + ", "
else:
dump_types += str(type(d1)) + ", "
with open(p, 'w') as outf:
outf.write(str(t.shape) + "\n\n\n" + dump_cf + "\n\n\n" + dump_types)
raise e
return torch.argmax(ct, dim = -1)
def checkIfBatchCompiles(self,
sample: np.array
) -> int:
"""Sends a filled sequence to the compiler"""
try:
stdout = opencl.Compile(self.tokenizer.ArrayToCode(sample))
return 1
except ValueError:
return 0
def generateTrainingBatch(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
masked_lm_labels : torch.LongTensor,
) -> typing.Tuple[typing.List[np.array], typing.List[int]]:
batch_size, sequence_length = tuple(input_ids.shape)
with concurrent.futures.ThreadPoolExecutor() as executor:
jobs = [executor.submit(self.iterTrainingSeq,
model = model,
device = device,
input_ids = input_ids [i],
input_features = input_features [i] if input_features else None,
prediction_scores = prediction_scores[i],
position_ids = position_ids [i],
masked_lm_labels = masked_lm_labels [i],
) for i in range(batch_size)]
results = [j.result() for j in jobs]
samples = [x.numpy() for (x, _, _) in results]
compile_flag = [y for (_, y, _) in results]
masked_lm_labels = torch.LongTensor([z for (_, _, z) in results]).to(device)
return samples, compile_flag, masked_lm_labels
def iterTrainingSeq(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
masked_lm_labels : torch.LongTensor,
) -> typing.Tuple[torch.LongTensor, int]:
"""
Main training sequence filling loop.
Function takes model's initial input, prediction and states.
Fills input sequence with step predictions and keeps asking
iteratively for predictions until target [MASK] or [HOLE] tokens
are closed.
Compiler is invoked for final sequence to get binary compilation status.
##!! This function is designed to work with multithreading and exercises
said functionalities on a single sequence. CANNOT be applied to the
whole batch at the same time.
"""
new_holes, next_input_ids, attention_mask = self.StepTrainingSeq(input_ids, prediction_scores)
with torch.no_grad():
while new_holes:
next_prediction_scores, _, _, _ = model.get_output(
next_input_ids.to(device), attention_mask.to(device), position_ids, input_features
)
new_holes, next_input_ids, attention_mask = self.StepTrainingSeq(
next_input_ids[0], next_prediction_scores[0],
)
compile_flag = self.checkIfBatchCompiles(next_input_ids[0].numpy())
if compile_flag:
masked_lm_labels = np.full(masked_lm_labels.shape, -100, dtype = np.int64)
return next_input_ids[0], compile_flag, masked_lm_labels
def StepTrainingSeq(self,
seq : torch.LongTensor,
prediction_scores : torch.FloatTensor,
) -> typing.Tuple[bool, torch.LongTensor, np.array]:
"""
Applies step predictions to input sequence.
Specifically optimized for training; does not compute sample indices for speed-up.
"""
seq_length = tuple(seq.shape)[0]
allowed_incr = (seq_length - int(torch.where(seq==self.tokenizer.padToken)[0][0])
if self.tokenizer.padToken in seq
else 0)
endTokens = self.tokenizer.metaTokenValues
closed_hole = np.zeros(seq_length, dtype=np.bool)
new_hole = np.zeros(seq_length, dtype=np.bool)
temp_seq = seq.numpy().copy()
for target_idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]:
idx = int(target_idx)
prediction = int(self.argmax(prediction_scores[target_idx]))
is_hole = temp_seq[idx] == self.tokenizer.holeToken
if prediction in endTokens:
# Model predicted sth that will close the hole.
closed_hole[idx] = True
continue
# We replace the hole with a prediction
temp_seq[idx] = prediction
rem_adds = allowed_incr + np.sum(closed_hole) - np.sum(new_hole)
if is_hole and rem_adds:
# if this was a hole and we have more empty space, reinsert the hole
new_hole[idx] = True
new_seq = np.full(seq_length, self.tokenizer.padToken, dtype=np.int64)
new_idx = 0
for idx, t in enumerate(temp_seq):
if closed_hole[idx]:
continue
try:
new_seq[new_idx] = t
except IndexError:
l.logger().info("seq: {}".format(self.tokenizer.tokensToString([x for x in seq.cpu().numpy()])))
l.logger().info("temp_seq {}".format(self.tokenizer.tokensToString([x for x in temp_seq])))
l.logger().info("pred idx: {}".format(torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]))
l.logger().info("pred_toks {}".format(self.tokenizer.tokensToString([int(self.argmax(prediction_scores[idx])) for idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]])))
l.logger().info("allowed_incr: {}".format(allowed_incr))
l.logger().info("new_hole: {}".format(new_hole))
l.logger().info("closed_hole: {}".format(closed_hole))
new_idx += 1
if new_hole[idx]:
try:
new_seq[new_idx] = self.tokenizer.holeToken
except IndexError:
l.logger().warn("seq: {}".format(self.tokenizer.tokensToString([x for x in seq.cpu().numpy()])))
l.logger().warn("temp_seq {}".format(self.tokenizer.tokensToString([x for x in temp_seq])))
l.logger().warn("pred idx: {}".format(torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]))
l.logger().warn("pred_toks {}".format(self.tokenizer.tokensToString([int(self.argmax(prediction_scores[idx])) for idx in torch.where((seq == self.tokenizer.holeToken) | (seq == self.tokenizer.maskToken))[0]])))
l.logger().warn("allowed_incr: {}".format(allowed_incr))
l.logger().warn("new_hole: {}".format(new_hole))
l.logger().warn("closed_hole: {}".format(closed_hole))
new_idx += 1
if new_idx >= seq_length:
break
new_seq = torch.LongTensor([new_seq])
attention_mask = (new_seq != self.tokenizer.padToken)
return np.any(new_hole), new_seq, attention_mask
def generateSampleBatch(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
input_ids : torch.LongTensor,
input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
is_live : bool,
) -> typing.Tuple[typing.List[np.array], typing.List[typing.List[int]]]:
"""
Get a batch of input ids and iteratively fill the holes and return a batch of samples.
"""
batch_size, sequence_length = tuple(input_ids.shape)
input_idxs = torch.arange(batch_size).to(device)
sample_indices = torch.full((batch_size, sequence_length), self.tokenizer.padToken, dtype = torch.int64).to(device)
res_idx = 0
samples = torch.zeros_like(input_ids)
new_holes = self.step_batch(input_ids, input_idxs, sample_indices, None, prediction_scores, device)
open_holes = torch.where(new_holes == True)[0]
closed_holes = torch.where(new_holes == False)[0]
samples[res_idx: res_idx + len(closed_holes)] = input_ids[closed_holes]
res_idx += len(closed_holes)
input_ids = torch.index_select(input_ids, 0, open_holes.to(device))
attention_mask = (input_ids != self.tokenizer.padToken)
while torch.any(new_holes):
prediction_scores, _, _, _ = model.get_output(
input_ids, attention_mask, position_ids[:len(input_ids)], input_features,
)
new_holes = self.step_batch(input_ids, input_idxs, sample_indices, None, prediction_scores, device)
open_holes = torch.where(new_holes == True)[0]
closed_holes = torch.where(new_holes == False)[0]
samples[res_idx: res_idx + len(closed_holes)] = input_ids[closed_holes]
res_idx += len(closed_holes)
input_ids = torch.index_select(input_ids, 0, open_holes.to(device))
attention_mask = (input_ids != self.tokenizer.padToken)
return samples, sample_indices, None
def generateSampleWorkload(self,
model : typing.TypeVar("model.BertPreTrainedModel"),
device : torch.device,
workload_input_ids : torch.LongTensor,
workload_attention_mask : torch.LongTensor,
workload_input_features : torch.LongTensor,
prediction_scores : torch.FloatTensor,
position_ids : torch.LongTensor,
bar : tqdm.tqdm = None,
) -> typing.Tuple[typing.List[np.array], typing.List[typing.List[int]]]:
"""
This function receives a full workload of input ids to be sampled.
Heavy optimisations are perfmormed to keep the GPU busy at all times.
The workload is streamed online and when a sequence is finished it is replaced
with a new one from the workload queue.
Returns a fullworkload of sampled instances.
"""
# [workload_size x batch_size x sequence_length]
wload_size, batch_size, sequence_length = tuple(workload_input_ids.shape)
# Also compute feature embeddings sequence length.
if workload_input_features is not None:
_, _, feature_sequence_length = tuple(workload_input_features.shape)
# Number of sequences
nseq = wload_size * batch_size
# Iteration idx of workload
w_idx = batch_size
# Get current input_ids - attention mask.
input_ids = workload_input_ids[0]
input_idxs = torch.arange(batch_size).to(device)
attention_mask = workload_attention_mask[0]
if workload_input_features is not None:
input_features = workload_input_features[0]
else:
input_features = None
# sample indices array that will be returned.
sample_indices = torch.full((nseq, sequence_length), self.tokenizer.padToken, dtype = torch.int64).to(device)
if FLAGS.sample_indices_limit is not None:
sidx_length = torch.full((batch_size, 1), 0, dtype = torch.int64).to(device)
# Workload of input_ids and attention_mask pairs.
# queue input_idxs ensure direct ordering from inputs -> outputs.
queue_input_ids = torch.reshape(workload_input_ids, (1, nseq, sequence_length)).squeeze(0)
queue_input_idxs = torch.arange(nseq).to(device)
queue_attention_mask = torch.reshape(workload_attention_mask, (1, nseq, sequence_length)).squeeze(0)
if workload_input_features is not None:
queue_input_features = torch.reshape(workload_input_features, (1, nseq, feature_sequence_length)).squeeze(0)
#! This is the return queue [nseq x sequence_length].
queue = torch.zeros(tuple(queue_input_ids.shape), dtype = torch.int64).to(device)
new_holes = self.step_batch(
input_ids,
input_idxs,
sample_indices,
sidx_length if FLAGS.sample_indices_limit else None,
prediction_scores,
device
)
open_holes = torch.where(new_holes == True)[0].to(device)
closed_holes = torch.where(new_holes == False)[0]
for i in closed_holes:
queue[input_idxs[i]] = input_ids[i]
if bar:
bar.update(1)
input_ids = torch.index_select(input_ids, 0, open_holes)
input_idxs = torch.index_select(input_idxs, 0, open_holes)
attention_mask = (input_ids != self.tokenizer.padToken)
if input_features is not None:
input_features = torch.index_select(input_features, 0, open_holes)
if FLAGS.sample_indices_limit:
sidx_length = torch.index_select(sidx_length, 0, open_holes)
res = batch_size - len(input_ids)
if res > 0:
input_ids = torch.cat((input_ids, queue_input_ids[w_idx: w_idx + res]), 0)
input_idxs = torch.cat((input_idxs, queue_input_idxs[w_idx: w_idx + res]), 0)
attention_mask = torch.cat((attention_mask, queue_attention_mask[w_idx: w_idx + res]), 0)
if input_features is not None:
input_features = torch.cat((input_features, queue_input_features[w_idx: w_idx + res]), 0)
if FLAGS.sample_indices_limit:
sidx_length = torch.cat((sidx_length, torch.full((res, 1), 0, dtype = torch.int64).to(device)), 0)
w_idx += res
while w_idx < nseq or torch.any(new_holes):
prediction_scores, _, _, _ = model.get_output(
input_ids, attention_mask, position_ids[:len(input_ids)], input_features
)
# Array of new hole existence per seq idx
new_holes = self.step_batch(
input_ids,
input_idxs,
sample_indices,
sidx_length if FLAGS.sample_indices_limit else None,
prediction_scores,
device
)
# Fill these holes.
open_holes = torch.where(new_holes == True)[0].to(device)
# Those are done.
closed_holes = torch.where(new_holes == False)[0]
# Add to return queue those that have finished.
for i in closed_holes:
queue[input_idxs[i]] = input_ids[i]
if bar:
bar.update(1)
input_ids = torch.index_select(input_ids, 0, open_holes)
input_idxs = torch.index_select(input_idxs, 0, open_holes)
attention_mask = (input_ids != self.tokenizer.padToken)
if input_features is not None:
input_features = torch.index_select(input_features, 0, open_holes)
if FLAGS.sample_indices_limit:
sidx_length = torch.index_select(sidx_length, 0, open_holes)
res = batch_size - len(input_ids)
if res > 0:
input_ids = torch.cat((input_ids, queue_input_ids[w_idx: w_idx + res]), 0)
input_idxs = torch.cat((input_idxs, queue_input_idxs[w_idx: w_idx + res]), 0)
attention_mask = torch.cat((attention_mask, queue_attention_mask[w_idx: w_idx + res]), 0)
if input_features is not None:
input_features = torch.cat((input_features, queue_input_features[w_idx: w_idx + res]), 0)
if FLAGS.sample_indices_limit:
sidx_length = torch.cat((sidx_length, torch.full((res, 1), 0, dtype = torch.int64).to(device)), 0)
w_idx += res
return queue, sample_indices
def StepHoleSeq(self,
batch : torch.LongTensor,
batch_idxs : torch.LongTensor,
sample_indices : torch.LongTensor,
indices_lengths : torch.LongTensor,
prediction_scores : torch.LongTensor,
device,
) -> typing.Tuple[
bool,
torch.LongTensor,
np.array,
]:
"""
Applies sample step with hole predictions to input batch.
!!!!!!WARNING!!!!!
This function works appropriately ONLY for 1 [HOLE] per sequence.
If more HOLES existed, then further operations would be needed to
re-calculate the proceeding hole indices, which would lead to unnecessary
operations. Removing this feature keeps things faster for 1 hole scenario.
"""
endTokens = self.tokenizer.metaTokenValues
# Array of boolean values, shows where holes are still left.
new_hole = torch.zeros(len(batch), dtype=np.bool)
# [seq_idx, hole_idx] of batch.
idxs, targets = torch.where(batch == self.tokenizer.holeToken)
# Predictions for these indices.
predictions = self.argmax(prediction_scores[(idxs, targets)])
for seq_idx, el_idx in zip(idxs, targets):
# seq_idx -> indices within the batch
# el_idx -> element index within a sequence
if int(predictions[seq_idx]) in endTokens:
# Close hole, shift left one position, add pad to the end.
batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], batch[seq_idx][el_idx+1:], torch.LongTensor([self.tokenizer.padToken]).to(device)), 0)
elif int(batch[seq_idx][-1]) != self.tokenizer.padToken or (indices_lengths is not None and indices_lengths[seq_idx] >= FLAGS.sample_indices_limit-1):
# No pads remaining to the right, replace hole with prediction but don't insert new hole.
# batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], predictions[seq_idx].unsqueeze(0), batch[seq_idx][el_idx+1:]), 0)
batch[seq_idx][el_idx] = predictions[seq_idx]
else:
# Replace with prediction and keep hole.
batch[seq_idx] = torch.cat((batch[seq_idx][:el_idx], predictions[seq_idx].unsqueeze(0), batch[seq_idx][el_idx:][:-1]), 0)
new_hole[seq_idx] = True
q_idx = batch_idxs[seq_idx]
sample_indices[q_idx][el_idx] = predictions[seq_idx]
if indices_lengths is not None:
indices_lengths[seq_idx] += 1
return new_hole
def StepMaskSeq(self,
batch : torch.LongTensor,
batch_idxs : torch.LongTensor,
sample_indices : torch.LongTensor,
indices_lengths : torch.LongTensor,
prediction_scores : torch.LongTensor,
device,
) -> typing.Tuple[
bool,
torch.LongTensor,
np.array,
]:
"""
Applies sample step with mask predictions to input batch.
"""
# [seq_idx, hole_idx] of batch.
idxs, targets = torch.where(batch == self.tokenizer.maskToken)
# Predictions for these indices.
predictions = self.argmax(prediction_scores[(idxs, targets)])
for p_idx, (seq_idx, el_idx) in enumerate(zip(idxs.flip(dims = (0,)), targets.flip(dims = (0,)))):
# seq_idx -> indices within the batch
# el_idx -> element index within a sequence
# Casually replace the [MASK] with the single predicted token.
batch[seq_idx][el_idx] = predictions[idxs.size(0) - 1 - p_idx]
q_idx = batch_idxs[seq_idx]
sample_indices[q_idx][el_idx] = predictions[idxs.size(0) - 1 - p_idx]
if indices_lengths is not None:
indices_lengths[seq_idx] += 1
return torch.zeros(len(batch), dtype=np.bool)
| 22,498 | 44.178715 | 220 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/generation_utils.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from deeplearning.benchpress.util.pytorch import torch
class GenerationMixin:
"""
A class contraining all of the functions supporting generation, to be used as a mixin in
:class:`~transfomers.PreTrainedModel`.
"""
def prepare_inputs_for_generation(self, input_ids, **kwargs):
"""
Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to prepare inputs in the
generate method.
"""
return {"input_ids": input_ids}
def adjust_logits_during_generation(self, logits, **kwargs):
"""
Implement in subclasses of :class:`~transfomers.PreTrainedModel` for custom behavior to adjust the logits in
the generate method.
"""
return logits
def _use_cache(self, outputs, use_cache):
"""During generation, decide whether to pass the `past` variable to the next forward pass."""
if len(outputs) <= 1 or use_cache is False:
return False
if hasattr(self.config, "mem_len") and self.config.mem_len == 0:
return False
return True
def enforce_repetition_penalty_(self, lprobs, batch_size, num_beams, prev_output_tokens, repetition_penalty):
"""
Enforce the repetition penalty (from the `CTRL paper <https://arxiv.org/abs/1909.05858>`__).
"""
for i in range(batch_size * num_beams):
for previous_token in set(prev_output_tokens[i].tolist()):
# if score < 0 then repetition penalty has to multiplied to reduce the previous token probability
if lprobs[i, previous_token] < 0:
lprobs[i, previous_token] *= repetition_penalty
else:
lprobs[i, previous_token] /= repetition_penalty
def postprocess_next_token_scores(
self,
scores,
input_ids,
no_repeat_ngram_size,
bad_words_ids,
cur_len,
min_length,
max_length,
eos_token_id,
repetition_penalty,
batch_size,
num_beams,
):
# repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
if repetition_penalty != 1.0:
self.enforce_repetition_penalty_(
scores, batch_size, num_beams, input_ids, repetition_penalty,
)
# set eos token prob to zero if min_length is not reached
if eos_token_id is not None and cur_len < min_length:
scores[:, eos_token_id] = -float("inf")
if no_repeat_ngram_size > 0:
# calculate a list of banned tokens to prevent repetitively generating the same ngrams
num_batch_hypotheses = batch_size * num_beams
# from fairseq: https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345
banned_batch_tokens = calc_banned_ngram_tokens(
input_ids, num_batch_hypotheses, no_repeat_ngram_size, cur_len
)
for i, banned_tokens in enumerate(banned_batch_tokens):
scores[i, banned_tokens] = -float("inf")
if bad_words_ids is not None:
# Exclude EOS token (already processed)
bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids))
# calculate a list of banned tokens according to bad words
banned_tokens = calc_banned_bad_words_ids(input_ids.tolist(), bad_words_ids)
# Modify the scores in place by setting the banned tokens logits to `-inf`
set_scores_to_inf_for_banned_tokens(scores, banned_tokens)
return scores
@torch.no_grad()
def generate(
self,
input_ids: typing.Optional[torch.LongTensor] = None,
max_length: typing.Optional[int] = None,
min_length: typing.Optional[int] = None,
do_sample: typing.Optional[bool] = None,
early_stopping: typing.Optional[bool] = None,
num_beams: typing.Optional[int] = None,
temperature: typing.Optional[float] = None,
top_k: typing.Optional[int] = None,
top_p: typing.Optional[float] = None,
repetition_penalty: typing.Optional[float] = None,
bad_words_ids: typing.Optional[typing.Iterable[int]] = None,
bos_token_id: typing.Optional[int] = None,
pad_token_id: typing.Optional[int] = None,
eos_token_id: typing.Optional[int] = None,
length_penalty: typing.Optional[float] = None,
no_repeat_ngram_size: typing.Optional[int] = None,
num_return_sequences: typing.Optional[int] = None,
attention_mask: typing.Optional[torch.LongTensor] = None,
decoder_start_token_id: typing.Optional[int] = None,
use_cache: typing.Optional[bool] = None,
**model_specific_kwargs
) -> torch.LongTensor:
r"""
Generates sequences for models with a language modeling head. The method currently supports greedy decoding,
beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.
Adapted in part from `Facebook's XLM beam search code
<https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__.
Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the
attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values
indicated are the default values of those config.
Most of these parameters are explained in more detail in `this blog post
<https://huggingface.co/blog/how-to-generate>`__.
Parameters:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
The sequence used as a prompt for the generation. If :obj:`None` the method initializes
it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`.
max_length (:obj:`int`, `optional`, defaults to 20):
The maximum length of the sequence to be generated.
min_length (:obj:`int`, `optional`, defaults to 10):
The minimum length of the sequence to be generated.
do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use sampling ; use greedy decoding otherwise.
early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not.
num_beams (:obj:`int`, `optional`, defaults to 1):
Number of beams for beam search. 1 means no beam search.
temperature (:obj:`float`, `optional`, defaults tp 1.0):
The value used to module the next token probabilities.
top_k (:obj:`int`, `optional`, defaults to 50):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_p (:obj:`float`, `optional`, defaults to 1.0):
If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or
higher are kept for generation.
repetition_penalty (:obj:`float`, `optional`, defaults to 1.0):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
pad_token_id (:obj:`int`, `optional`):
The id of the `padding` token.
bos_token_id (:obj:`int`, `optional`):
The id of the `beginning-of-sequence` token.
eos_token_id (:obj:`int`, `optional`):
The id of the `end-of-sequence` token.
length_penalty (:obj:`float`, `optional`, defaults to 1.0):
Exponential penalty to the length. 1.0 means no penalty.
Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in
order to encourage the model to produce longer sequences.
no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0):
If set to int > 0, all ngrams of that size can only occur once.
bad_words_ids(:obj:`typing.List[int]`, `optional`):
typing.List of token ids that are not allowed to be generated. In order to get the tokens of the words that
should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`.
num_return_sequences(:obj:`int`, `optional`, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for
tokens that are not masked, and 0 for masked tokens.
If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_start_token_id (:obj:`int`, `optional`):
If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token.
use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should use the past last key/values attentions (if applicable to the model) to
speed up decoding.
model_specific_kwargs:
Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model.
Return:
:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`:
The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or
shorter if all batches finished early due to the :obj:`eos_token_id`.
Examples::
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
outputs = model.generate(max_length=40) # do greedy decoding
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.
input_context = 'The dog'
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling
for i in range(3): # 3 output sequences were generated
print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.
input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences
print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))
tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer
model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache.
input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl
bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]
input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated
"""
# We cannot generate if the model does not have a LM head
if self.get_output_embeddings() is None:
raise AttributeError(
"You tried to generate sequences with a model that does not have a LM Head."
"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )"
)
max_length = max_length if max_length is not None else self.config.max_length
min_length = min_length if min_length is not None else self.config.min_length
do_sample = do_sample if do_sample is not None else self.config.do_sample
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
use_cache = use_cache if use_cache is not None else self.config.use_cache
num_beams = num_beams if num_beams is not None else self.config.num_beams
temperature = temperature if temperature is not None else self.config.temperature
top_k = top_k if top_k is not None else self.config.top_k
top_p = top_p if top_p is not None else self.config.top_p
repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
no_repeat_ngram_size = (
no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size
)
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
)
decoder_start_token_id = (
decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id
)
if input_ids is not None:
batch_size = input_ids.shape[0] # overriden by the input batch_size
else:
batch_size = 1
assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer."
assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer."
assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean."
assert isinstance(use_cache, bool), "`use_cache` should be a boolean."
assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer."
assert temperature > 0, "`temperature` should be strictly positive."
assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
assert input_ids is not None or (
isinstance(bos_token_id, int) and bos_token_id >= 0
), "If input_ids is not defined, `bos_token_id` should be a positive integer."
assert pad_token_id is None or (
isinstance(pad_token_id, int) and (pad_token_id >= 0)
), "`pad_token_id` should be a positive integer."
assert (eos_token_id is None) or (
isinstance(eos_token_id, int) and (eos_token_id >= 0)
), "`eos_token_id` should be a positive integer."
assert length_penalty > 0, "`length_penalty` should be strictly positive."
assert (
isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0
), "`no_repeat_ngram_size` should be a positive integer."
assert (
isinstance(num_return_sequences, int) and num_return_sequences > 0
), "`num_return_sequences` should be a strictly positive integer."
assert (
bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)
), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated"
if input_ids is None:
assert isinstance(bos_token_id, int) and bos_token_id >= 0, (
"you should either supply a context to complete as `input_ids` input "
"or a `bos_token_id` (integer >= 0) as a first token to start the generation."
)
input_ids = torch.full(
(batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,
)
else:
assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
# not allow to duplicate outputs when greedy decoding
if do_sample is False:
if num_beams == 1:
# no_beam_search greedy generation conditions
assert (
num_return_sequences == 1
), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1"
else:
# beam_search greedy generation conditions
assert (
num_beams >= num_return_sequences
), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences"
# create attention mask if necessary
# TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140
if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):
attention_mask = input_ids.ne(pad_token_id).long()
elif attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
# set pad_token_id to eos_token_id if not set. Important that this is done after
# attention_mask is created
if pad_token_id is None and eos_token_id is not None:
pad_token_id = eos_token_id
# current position and vocab size
if hasattr(self.config, "vocab_size"):
vocab_size = self.config.vocab_size
elif (
self.config.is_encoder_decoder
and hasattr(self.config, "decoder")
and hasattr(self.config.decoder, "vocab_size")
):
vocab_size = self.config.decoder.vocab_size
# set effective batch size and effective batch multiplier according to do_sample
if do_sample:
effective_batch_size = batch_size * num_return_sequences
effective_batch_mult = num_return_sequences
else:
effective_batch_size = batch_size
effective_batch_mult = 1
if self.config.is_encoder_decoder:
if decoder_start_token_id is None:
# see if BOS token can be used for decoder_start_token_id
if bos_token_id is not None:
decoder_start_token_id = bos_token_id
elif hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id"):
decoder_start_token_id = self.config.decoder.bos_token_id
else:
raise ValueError(
"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation"
)
assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self)
assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder)
# get encoder and store encoder outputs
encoder = self.get_encoder()
encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
# Expand input ids if num_beams > 1 or num_return_sequences > 1
if num_return_sequences > 1 or num_beams > 1:
input_ids_len = input_ids.shape[-1]
input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)
attention_mask = attention_mask.unsqueeze(1).expand(
batch_size, effective_batch_mult * num_beams, input_ids_len
)
input_ids = input_ids.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
attention_mask = attention_mask.contiguous().view(
effective_batch_size * num_beams, input_ids_len
) # shape: (batch_size * num_return_sequences * num_beams, cur_len)
if self.config.is_encoder_decoder:
# create empty decoder_input_ids
input_ids = torch.full(
(effective_batch_size * num_beams, 1),
decoder_start_token_id,
dtype=torch.long,
device=next(self.parameters()).device,
)
cur_len = 1
assert (
batch_size == encoder_outputs[0].shape[0]
), f"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} "
# expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)
expanded_batch_idxs = (
torch.arange(batch_size)
.view(-1, 1)
.repeat(1, num_beams * effective_batch_mult)
.view(-1)
.to(input_ids.device)
)
# expand encoder_outputs
encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])
else:
encoder_outputs = None
cur_len = input_ids.shape[-1]
assert (
cur_len < max_length
), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`"
if num_beams > 1:
output = self._generate_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
early_stopping=early_stopping,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
num_return_sequences=num_return_sequences,
length_penalty=length_penalty,
num_beams=num_beams,
vocab_size=vocab_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
else:
output = self._generate_no_beam_search(
input_ids,
cur_len=cur_len,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
batch_size=effective_batch_size,
encoder_outputs=encoder_outputs,
attention_mask=attention_mask,
use_cache=use_cache,
model_specific_kwargs=model_specific_kwargs,
)
return output
def _generate_no_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example without beam search (num_beams == 1).
All returned sequence are generated independantly.
"""
# length of generated sentences / unfinished sentences
unfinished_sents = input_ids.new(batch_size).fill_(1)
sent_lengths = input_ids.new(batch_size).fill_(max_length)
past = (encoder_outputs, None) if encoder_outputs is not None else None
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs)
next_token_logits = outputs[0][:, -1, :]
scores = self.postprocess_next_token_scores(
scores=next_token_logits,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=1,
)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if do_sample:
# Temperature (higher temperature => more likely to sample low probability tokens)
if temperature != 1.0:
scores = scores / temperature
# Top-p/top-k filtering
next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p)
# Sample
probs = torch.nn.functional.softmax(next_token_logscores, dim=-1)
next_token = torch.multinomial(probs, num_samples=1, replacement = True).squeeze(1)
else:
# Greedy decoding
next_token = torch.argmax(next_token_logits, dim=-1)
# update generations and finished sentences
if eos_token_id is not None:
# pad finished sentences if eos_token_id exist
tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)
else:
tokens_to_add = next_token
# add token and increase length by one
input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
cur_len = cur_len + 1
if eos_token_id is not None:
eos_in_sents = tokens_to_add == eos_token_id
# if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length
is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()
sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len)
# unfinished_sents is set to zero if eos in sentence
unfinished_sents.mul_((~eos_in_sents).long())
# stop when there is a </s> in each sentence, or if we exceed the maximul length
if unfinished_sents.max() == 0:
break
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
return input_ids
def _generate_beam_search(
self,
input_ids,
cur_len,
max_length,
min_length,
do_sample,
early_stopping,
temperature,
top_k,
top_p,
repetition_penalty,
no_repeat_ngram_size,
bad_words_ids,
pad_token_id,
eos_token_id,
batch_size,
num_return_sequences,
length_penalty,
num_beams,
vocab_size,
encoder_outputs,
attention_mask,
use_cache,
model_specific_kwargs,
):
""" Generate sequences for each example with beam search.
"""
# generated hypotheses
generated_hyps = [
BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping)
for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
# for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times
if do_sample is False:
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# cache compute states
past = (encoder_outputs, None) if encoder_outputs is not None else None
# done sentences
done = [False for _ in range(batch_size)]
while cur_len < max_length:
model_inputs = self.prepare_inputs_for_generation(
input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_specific_kwargs
)
outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)
next_token_logits = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)
# if model has past, then set the past variable to speed up decoding
if self._use_cache(outputs, use_cache):
past = outputs[1]
if self.config.is_encoder_decoder and do_sample is False:
# TODO (PVP) still a bit hacky here - there might be a better solution
next_token_logits = self.adjust_logits_during_generation(
next_token_logits, cur_len=cur_len, max_length=max_length
)
scores = torch.nn.functional.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size)
scores = self.postprocess_next_token_scores(
scores=scores,
input_ids=input_ids,
no_repeat_ngram_size=no_repeat_ngram_size,
bad_words_ids=bad_words_ids,
cur_len=cur_len,
min_length=min_length,
max_length=max_length,
eos_token_id=eos_token_id,
repetition_penalty=repetition_penalty,
batch_size=batch_size,
num_beams=num_beams,
)
assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format(
scores.shape, (batch_size * num_beams, vocab_size)
)
if do_sample:
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# Temperature
if temperature != 1.0:
_scores = _scores / temperature
# Top-p/top-k filtering
_scores = top_k_top_p_filtering(
_scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together to sample from all beam_idxs
_scores = _scores.contiguous().view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
# Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search)
probs = torch.nn.functional.softmax(_scores, dim=-1)
next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2)
# Compute next scores
next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2)
# sort the sampled vector to make sure that the first num_beams samples are the best
next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1)
next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2)
else:
next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
next_scores = next_scores.view(
batch_size, num_beams * vocab_size
) # (batch_size, num_beams * vocab_size)
next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams)
# next batch beam content
next_batch_beam = []
# for each sentence
for batch_idx in range(batch_size):
# if we are done with this sentence, add a pad token
if done[batch_idx]:
assert (
len(generated_hyps[batch_idx]) >= num_beams
), "Batch can only be done if at least {} beams have been generated".format(num_beams)
assert (
eos_token_id is not None and pad_token_id is not None
), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined"
next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch
continue
# next sentence beam content, this will get added to next_batch_beam
next_sent_beam = []
# next tokens for this sentence
for beam_token_rank, (beam_token_id, beam_token_score) in enumerate(
zip(next_tokens[batch_idx], next_scores[batch_idx])
):
# get beam and token IDs
beam_id = beam_token_id // vocab_size
token_id = beam_token_id % vocab_size
effective_beam_id = batch_idx * num_beams + beam_id
# add to generated hypotheses if end of sentence
if (eos_token_id is not None) and (token_id.item() == eos_token_id):
# if beam_token does not belong to top num_beams tokens, it should not be added
is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams
if is_beam_token_worse_than_top_num_beams:
continue
generated_hyps[batch_idx].add(
input_ids[effective_beam_id].clone(), beam_token_score.item(),
)
else:
# add next predicted token since it is not eos_token
next_sent_beam.append((beam_token_score, token_id, effective_beam_id))
# once the beam for next step is full, don't add more tokens to it.
if len(next_sent_beam) == num_beams:
break
# Check if we are done so that we can save a pad step if all(done)
done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(
next_scores[batch_idx].max().item(), cur_len
)
# update next beam content
assert len(next_sent_beam) == num_beams, "Beam should always be full"
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step"
# stop when we are done with each sentence
if all(done):
break
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_tokens = input_ids.new([x[1] for x in next_batch_beam])
beam_idx = input_ids.new([x[2] for x in next_batch_beam])
# re-order batch and update current length
input_ids = input_ids[beam_idx, :]
input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1)
cur_len = cur_len + 1
# re-order internal states
if past is not None:
past = self._reorder_cache(past, beam_idx)
# extend attention_mask for new generated input if only decoder
if self.config.is_encoder_decoder is False:
attention_mask = torch.cat(
[attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
)
# finalize all open beam hypotheses and add to generated hypotheses
for batch_idx in range(batch_size):
if done[batch_idx]:
continue
# test that beam scores match previously calculated scores if not eos and batch_idx not done
if eos_token_id is not None and all(
(token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx]
):
assert torch.all(
next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx]
), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format(
next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx],
)
# need to add best num_beams hypotheses to generated hyps
for beam_id in range(num_beams):
effective_beam_id = batch_idx * num_beams + beam_id
final_score = beam_scores[effective_beam_id].item()
final_tokens = input_ids[effective_beam_id]
generated_hyps[batch_idx].add(final_tokens, final_score)
# depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch
output_batch_size = batch_size if do_sample else batch_size * num_return_sequences
output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences
# select the best hypotheses
sent_lengths = input_ids.new(output_batch_size)
best = []
# retrieve best hypotheses
for i, hypotheses in enumerate(generated_hyps):
sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0])
for j in range(output_num_return_sequences_per_batch):
effective_batch_idx = output_num_return_sequences_per_batch * i + j
best_hyp = sorted_hyps.pop()[1]
sent_lengths[effective_batch_idx] = len(best_hyp)
best.append(best_hyp)
# shorter batches are padded
if sent_lengths.min().item() != sent_lengths.max().item():
assert pad_token_id is not None, "`Pad_token_id` has to be defined"
sent_max_len = min(sent_lengths.max().item() + 1, max_length)
decoded = input_ids.new(output_batch_size, sent_max_len).fill_(pad_token_id)
# fill with hypothesis and eos_token_id if necessary
for i, hypo in enumerate(best):
decoded[i, : sent_lengths[i]] = hypo
if sent_lengths[i] < max_length:
decoded[i, sent_lengths[i]] = eos_token_id
else:
# none of the hypotheses have an eos_token
assert (len(hypo) == max_length for hypo in best)
decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)
return decoded
@staticmethod
def _reorder_cache(past: typing.Tuple, beam_idx: torch.Tensor) -> typing.Tuple[torch.Tensor]:
return tuple(layer_past.index_select(1, beam_idx) for layer_past in past)
def calc_banned_ngram_tokens(prev_input_ids: torch.Tensor, num_hypos: int, no_repeat_ngram_size: int, cur_len: int) -> None:
"""Copied from fairseq for no_repeat_ngram in beam_search"""
if cur_len + 1 < no_repeat_ngram_size:
# return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
return [[] for _ in range(num_hypos)]
generated_ngrams = [{} for _ in range(num_hypos)]
for idx in range(num_hypos):
gen_tokens = prev_input_ids[idx].tolist()
generated_ngram = generated_ngrams[idx]
for ngram in zip(*[gen_tokens[i:] for i in range(no_repeat_ngram_size)]):
prev_ngram_tuple = tuple(ngram[:-1])
generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
def _get_generated_ngrams(hypo_idx):
# Before decoding the next token, prevent decoding of ngrams that have already appeared
start_idx = cur_len + 1 - no_repeat_ngram_size
ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].tolist())
return generated_ngrams[hypo_idx].get(ngram_idx, [])
banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
return banned_tokens
def calc_banned_bad_words_ids(prev_input_ids: typing.Iterable[int], bad_words_ids: typing.Iterable[int]) -> typing.Iterable[int]:
banned_tokens = []
def _tokens_match(prev_tokens, tokens):
if len(tokens) == 0:
# if bad word tokens is just one token always ban it
return True
if len(tokens) > len(prev_tokens):
# if bad word tokens are longer than prev tokens they can't be equal
return False
if prev_tokens[-len(tokens) :] == tokens:
# if tokens match
return True
else:
return False
for prev_input_ids_slice in prev_input_ids:
banned_tokens_slice = []
for banned_token_seq in bad_words_ids:
assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format(
bad_words_ids
)
if _tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False:
# if tokens do not match continue
continue
banned_tokens_slice.append(banned_token_seq[-1])
banned_tokens.append(banned_tokens_slice)
return banned_tokens
def set_scores_to_inf_for_banned_tokens(scores: torch.Tensor, banned_tokens: typing.List[typing.List[int]]) -> None:
""" Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be
a list of list of banned tokens to ban in the format [[batch index, vocabulary position],...]
Args:
scores: logits distribution of shape (batch size, vocabulary size)
banned_tokens: list of list of tokens to ban of length (batch_size)
"""
banned_mask_list = []
for idx, batch_banned_tokens in enumerate(banned_tokens):
for token in batch_banned_tokens:
banned_mask_list.append([idx, token])
if not banned_mask_list:
return
banned_mask = torch.LongTensor(banned_mask_list)
indices = torch.ones(len(banned_mask))
# A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates:
# [ 0 1 1 ]
# [ 0 0 0 ]
# [ 1 0 0 ]
banned_mask = torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool()
scores.masked_fill_(banned_mask, -float("inf"))
def top_k_top_p_filtering(
logits: torch.Tensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.Tensor:
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits
class BeamHypotheses(object):
def __init__(self, num_beams, max_length, length_penalty, early_stopping):
"""
Initialize n-best list of hypotheses.
"""
self.max_length = max_length - 1 # ignoring bos_token
self.length_penalty = length_penalty
self.early_stopping = early_stopping
self.num_beams = num_beams
self.beams = []
self.worst_score = 1e9
def __len__(self):
"""
Number of hypotheses in the list.
"""
return len(self.beams)
def add(self, hyp, sum_logprobs):
"""
Add a new hypothesis to the list.
"""
score = sum_logprobs / len(hyp) ** self.length_penalty
if len(self) < self.num_beams or score > self.worst_score:
self.beams.append((score, hyp))
if len(self) > self.num_beams:
sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)])
del self.beams[sorted_scores[0][1]]
self.worst_score = sorted_scores[1][0]
else:
self.worst_score = min(score, self.worst_score)
def is_done(self, best_sum_logprobs, cur_len):
"""
If there are enough hypotheses and that none of the hypotheses being generated
can become better than the worst one in the heap, then we are done with this sentence.
"""
if len(self) < self.num_beams:
return False
elif self.early_stopping:
return True
else:
cur_score = best_sum_logprobs / cur_len ** self.length_penalty
ret = self.worst_score >= cur_score
return ret
| 45,882 | 44.160433 | 242 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/datasets.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import typing
import pickle
import functools
import json
import numpy as np
import pathlib
import glob
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import monitors
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.models import sequence_masking
from deeplearning.benchpress.models import lm_data_generator
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class OnlineDataset(torch.utils.data.Dataset):
r"""Online pre-processing dataset of raw corpus.
This dataset holds path to raw corpus and yields
pre-processed instances on the fly.
Arguments:
dataset (path): Path for raw dataset
func (callable): Function called to pre-process sequence.
"""
def __init__(self, dg: lm_data_generator.MaskLMDataGenerator, is_train: bool):
super(OnlineDataset, self).__init__()
full_dataset = self.load_data(dg.cache.path / "{}corpus.pkl".format("pre_" if dg.pre_train else ""))
"""
TODO you've better change is_train check to something more generic.
"""
if is_train:
self.dataset = full_dataset[:int(len(full_dataset) * (1 - (dg.config.validation_split / 100)))]
else:
self.dataset = full_dataset[int(len(full_dataset) * (1 - (dg.config.validation_split / 100))):]
self.feature_encoder = dg.feature_encoder
self.cache_path = dg.cache.path
self.size = len(self.dataset)
self.cur_step = 0
self.steps_per_epoch = dg.steps_per_epoch * dg.training_opts.batch_size
self.hlen_monitor = None
if is_train:
if (self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else "")).exists():
with open(self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'rb') as infile:
self.hlen_monitor = pickle.load(infile)
else:
self.hlen_monitor = monitors.NormalizedFrequencyMonitor(self.cache_path, "{}online_hole_length{}".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""))
"""
TODO, add custom config just like in lm_data_generator
for val sets / sample sets etc.
"""
if dg.config.HasField("mask"):
self.func = functools.partial(sequence_masking.MaskSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
pickled_tokenizer = dg.tokenizer,
training_opts = dg.training_opts,
is_torch = True,
config = dg.config,
)
elif dg.config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.hole, dg.cache.path, "hole_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
elif dg.config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.mask_seq, dg.cache.path, "mask_seq_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
return
def __len__(self):
return self.size
def __getitem__(self, idx):
self.cur_step += 1
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
if not self.feature_encoder:
k = self.func(self.dataset[idx])
else:
k = self.func(self.dataset[idx][0])
k['input_features'] = self.dataset[idx][1]
if self.hlen_monitor:
self.hlen_monitor.register([x for x in k['masked_lm_lengths'] if x >= 0])
if self.cur_step % self.steps_per_epoch == 0:
self.hlen_monitor.plot()
with open(self.cache_path / "hole_length_mon{}.pkl".format("_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'wb') as outf:
pickle.dump(self.hlen_monitor, outf)
# raise NotImplementedError("Fix a) init state of rngen)
return k
def load_data(self, dataset: pathlib.Path) -> typing.List[np.array]:
if dataset.exists():
with open(dataset, 'rb') as infile:
return pickle.load(infile)
else:
raise FileNotFoundError(dataset)
class LazyOnlineDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets
and instantiate them lazily, to avoid loading them all in
memory at the same time/
Arguments:
datasets (sequence): List of paths for datasets to be concatenated
"""
@staticmethod
def cumsum(sequence: typing.List[pathlib.Path], length_cache: pathlib.Path):
lts, r, s = None, [], 0 # Cached lengths list, cumulative lengths, current max length.
## If lengths cache exists, just load the dictionary.
if length_cache.exists():
with open(length_cache, 'r') as inf:
lts = json.load(inf)
## Iterate every dataset chunk, and fix the cumulative length distribution.
for e in sequence:
if lts:
lt = lts[pathlib.Path(e).name]
else:
with open(e, 'rb') as infile:
length = len(pickle.load(infile))
lt = length
assert lt > 0, "Dataset {} is empty".format(e)
r.append(lt + s)
s += lt
## If lengths cache had not been created, fix it now.
if not lts and environment.WORLD_RANK == 0:
lts = {}
s = 0
for e, rx in zip(sequence, r):
lts[pathlib.Path(e).name] = rx - s
s = rx
with open(length_cache, 'w') as outf:
json.dump(lts, outf)
return r
@property
def num_datasets(self):
return len(self.datasets)
def __init__(self, dg: lm_data_generator.MaskLMDataGenerator, is_train: bool):
super(LazyOnlineDataset, self).__init__()
self.datasets = glob.glob(str(dg.cache.path / "{}corpus_*.pkl".format("pre_" if dg.pre_train else "")))
self.cumulative_sizes = self.cumsum(self.datasets, dg.cache.path / "pre_lengths_cache.json")
self.feature_encoder = dg.feature_encoder
self.curr_dset_idx = None
self.dataset = None
self.is_train = is_train
"""
TODO you've better change is_train check to something more generic.
"""
self.vfactor = lambda l: int(l * (1 - (dg.config.validation_split / 100)))
self.cache_path = dg.cache.path
self.cur_step = 0
self.steps_per_epoch = dg.steps_per_epoch * dg.training_opts.batch_size
self.hlen_monitor = None
if is_train:
if (self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else "")).exists():
with open(self.cache_path / "{}hole_length_mon{}.pkl".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'rb') as infile:
self.hlen_monitor = pickle.load(infile)
else:
self.hlen_monitor = monitors.NormalizedFrequencyMonitor(self.cache_path, "{}online_hole_length{}".format("pre_" if dg.pre_train else "", "_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""))
"""
TODO, add custom config just like in lm_data_generator
for val sets / sample sets etc.
"""
self.tokenizer = dg.tokenizer
if dg.config.HasField("mask"):
self.func = functools.partial(sequence_masking.MaskSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
pickled_tokenizer = dg.tokenizer,
training_opts = dg.training_opts,
is_torch = True,
config = dg.config,
)
elif dg.config.HasField("hole"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.hole, dg.cache.path, "hole_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequence,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
elif dg.config.HasField("mask_seq"):
distribution = distributions.Distribution.FromHoleConfig(
dg.config.mask_seq, dg.cache.path, "mask_seq_length_online"
)
self.func = functools.partial(sequence_masking.HoleSequenceSeqMasks,
train_set = is_train,
max_predictions = dg.training_opts.max_predictions_per_seq,
masked_lm_prob = dg.training_opts.masked_lm_prob,
distribution = distribution,
tokenizer = dg.tokenizer,
)
return
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
self.cur_step += 1
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
import bisect
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if self.curr_dset_idx != dataset_idx:
self.curr_dset_idx = dataset_idx
with open(self.datasets[dataset_idx], 'rb') as infile:
self.dataset = pickle.load(infile)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
k = self.func(self.dataset[sample_idx])
if self.hlen_monitor:
self.hlen_monitor.register([x for x in k['masked_lm_lengths'] if x >= 0])
if self.cur_step % self.steps_per_epoch == 0:
self.hlen_monitor.plot()
with open(self.cache_path / "hole_length_mon{}.pkl".format("_{}".format(environment.WORLD_RANK) if environment.WORLD_SIZE > 1 else ""), 'wb') as outf:
pickle.dump(self.hlen_monitor, outf)
return k
class LazyConcatDataset(torch.utils.data.Dataset):
r"""Dataset as a concatenation of multiple datasets.
This class is useful to assemble different existing datasets
and instantiate them lazily, to avoid loading them all in
memory at the same time/
Arguments:
datasets (sequence): List of paths for datasets to be concatenated
"""
@staticmethod
def cumsum(sequence: typing.List[pathlib.Path]):
r, s = [], 0
for e in sequence:
lt = len(torch.load(e))
assert lt > 0, "Dataset {} is empty".format(e)
r.append(lt + s)
s += lt
return r
@property
def num_datasets(self):
return len(self.datasets)
def __init__(self, datasets: typing.List[pathlib.Path]):
super(LazyConcatDataset, self).__init__()
assert len(datasets) > 0, 'Empty list of datasets provided.'
self.datasets = datasets
self.cumulative_sizes = self.cumsum(self.datasets)
self.curr_dset_idx = None
self.dataset = None
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
import bisect
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if self.curr_dset_idx != dataset_idx:
self.curr_dset_idx = dataset_idx
self.dataset = torch.load(self.datasets[dataset_idx])
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.dataset[sample_idx]
class LazyRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly. If without replacement, then sample from a shuffled dataset.
If with replacement, then user can specify :attr:`num_samples` to draw.
Arguments:
data_source (Dataset): dataset to sample from
replacement (bool): samples are drawn with replacement if ``True``, default=``False``
num_samples (int): number of samples to draw, default=`len(dataset)`. This argument
is supposed to be specified only when `replacement` is ``True``.
generator (Generator): Generator used in sampling.
"""
def __init__(self, data_source, replacement = False, num_samples = None, generator = None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
self.distributed = True if environment.WORLD_SIZE > 1 else False
self.dataset_idx = self.__datasetIdx_iter__
self.epoch = None
if not isinstance(self.replacement, bool):
raise TypeError("replacement should be a boolean value, but got "
"replacement={}".format(self.replacement))
if self._num_samples is not None and not replacement:
raise ValueError("With replacement=False, num_samples should not be specified, "
"since a random permute will be performed.")
if not isinstance(self.num_samples, int) or self.num_samples <= 0:
raise ValueError("num_samples should be a positive integer "
"value, but got num_samples={}".format(self.num_samples))
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
@property
def num_datasets(self):
if isinstance(self.data_source, LazyConcatDataset) or isinstance(self.data_source, LazyOnlineDataset):
return self.data_source.num_datasets
else:
return 1
@property
def __datasetIdx_iter__(self):
dataset_idx = torch.randperm(self.num_datasets, generator = self.generator).tolist()
self.dataset_tensor = iter(dataset_idx)
return self.dataset_tensor
def __iter__(self):
try:
dataset_idx = next(self.dataset_tensor)
except StopIteration:
dataset_idx = next(self.__datasetIdx_iter__)
lb, ub = self.data_source.cumulative_sizes[dataset_idx - 1] if dataset_idx else 0, self.data_source.cumulative_sizes[dataset_idx]
if isinstance(self.data_source, LazyOnlineDataset):
clen = ub - lb
if self.data_source.is_train:
bounds = (lb, lb + self.data_source.vfactor(clen))
else:
bounds = (lb + self.data_source.vfactor(clen), ub)
else:
bounds = (lb, ub)
if self.distributed:
self.generator = torch.Generator()
self.generator.manual_seed(self.epoch)
if self.replacement:
if self._num_samples is None:
size = bounds[1] - bounds[0]
else:
size = self._num_samples // self.num_datasets
rand_tensor = torch.randint(low = bounds[0], high = bounds[1], size = (size,), generator = self.generator).tolist()
else:
rand_tensor = [x + bounds[0] for x in torch.randperm(bounds[1] - bounds[0], generator = self.generator).tolist()]
if self.distributed:
rounded_total = (len(rand_tensor) // environment.WORLD_SIZE) * environment.WORLD_SIZE
rand_tensor = rand_tensor[environment.WORLD_RANK:rounded_total:environment.WORLD_SIZE]
return iter(rand_tensor)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch: int) -> None:
"""
Sets epoch for deterministic runs across DDP.
"""
self.epoch = epoch
return
| 17,820 | 39.410431 | 221 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/torch_bert.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BenchPress language model training and sampling wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import shutil
import multiprocessing
import functools
import humanize
import typing
import pathlib
import datetime
import time
import numpy as np
from absl import flags
import tqdm
from collections import OrderedDict
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.samplers import sample_observers
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.proto import sampler_pb2
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.models.torch_bert import model
from deeplearning.benchpress.models.torch_bert import config
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.models.torch_bert.data_generator import torchLMDataGenerator
from deeplearning.benchpress.util import logging as l
from eupy.hermes import client
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"reward_compilation",
-1,
"Select to integrate LLVM compiler into training regime."
"During training, the target token will be asked to fill the first token of the hole."
"If this flag is selected to True, the model will fill entirely the hole, as in inference."
"The fully generated sample will be checked for syntactic correctness with LLVM."
"If the sample compiles, then loss will be zero-ed for that instance, hence will be rewarded."
"[Default: -1]: do not use comp-rewarded training."
"Any integer >= 0: Kick-in this mode after this training step. 0 uses this method from start."
)
flags.DEFINE_boolean(
"validate_per_epoch",
True,
"Calculate and plot validation loss per end of epoch."
)
flags.DEFINE_integer(
"eval_steps_per_epoch",
1000,
"Set validation steps at the end of epoch for validation loss calculation."
)
flags.DEFINE_boolean(
"is_trt",
False,
"Use TensorRT for the sampling model."
)
def worker(src, sequence_length, tokenizer):
src = list(tokenizer.TokenizeString(src))
src = [tokenizer.startToken] + src + [tokenizer.endToken]
src = src + [tokenizer.padToken] * max(0, sequence_length - len(src))
return src[:sequence_length]
class torchBert(backends.BackendBase):
class BertEstimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : torchLMDataGenerator
optimizer : typing.Any
scheduler : typing.Any
class SampleBertEstimator(typing.NamedTuple):
"""Named tuple for sampling BERT."""
model : typing.List[typing.TypeVar('nn.Module')]
data_generator : torchLMDataGenerator
@property
def hidden_state_size(self):
# return self.config.architecture.max_position_embeddings * self.config.architecture.hidden_size ## Get hidden state as is.
# return self.config.architecture.hidden_size ## Get probs from prediction logits for existing token.
return ((self.config.architecture.max_position_embeddings // 16) - 1) * ((self.config.architecture.hidden_size // 16) - 1) ## Apply pooling to hidden state.
def __repr__(self):
return "BenchPress"
def __init__(self, *args, **kwargs):
super(torchBert, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
if self.config.architecture.HasField("feature_encoder") and self.config.architecture.feature_encoder:
self.feature_encoder = True
self.feature_tokenizer = tokenizers.FeatureTokenizer.FromArgs(
self.config.architecture.feature_singular_token_thr,
self.config.architecture.feature_max_value_token,
self.config.architecture.feature_token_range
)
self.feature_sequence_length = self.config.architecture.feature_sequence_length
else:
self.feature_encoder = False
self.feature_tokenizer = None
self.feature_sequence_length = None
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.training.random_seed)
self.torch.cuda.manual_seed_all(self.config.training.random_seed)
self.bertAttrs = {}
self.featureAttrs = {}
self.bert_config = None
self.train = None
self.sample = None
self.predict_generator = None
self.sampler = None
self.train_batch_size = None
self.eval_batch_size = None
self.learning_rate = None
self.num_train_steps = None
self.ckpt_path = self.cache.path / "checkpoints"
self.sample_path = self.cache.path / "samples"
self.logfile_path = self.cache.path / "logs"
if self.config.HasField("pre_train_corpus"):
self.pre_logfile_path = self.logfile_path / "pre_train"
self.telemetry = telemetry.TrainingLogger(self.logfile_path)
if self.config.HasField("pre_train_corpus"):
self.pre_telemetry = telemetry.TrainingLogger(self.logfile_path / "pre_train")
self.is_validated = False
self.trained = False
l.logger().info("BERT Model config initialized in {}".format(self.cache.path))
return
def _ConfigModelParams(self, is_sampling):
"""General model hyperparameters initialization."""
self.bertAttrs = {
"vocab_size" : self.tokenizer.vocab_size,
"hidden_size" : self.config.architecture.hidden_size,
"num_hidden_layers" : self.config.architecture.num_hidden_layers,
"num_attention_heads" : self.config.architecture.num_attention_heads,
"intermediate_size" : self.config.architecture.intermediate_size,
"hidden_act" : self.config.architecture.hidden_act,
"hidden_dropout_prob" : self.config.architecture.hidden_dropout_prob,
"attention_probs_dropout_prob" : self.config.architecture.attention_probs_dropout_prob,
"max_position_embeddings" : self.config.architecture.max_position_embeddings,
"type_vocab_size" : self.config.architecture.type_vocab_size,
"initializer_range" : self.config.architecture.initializer_range,
"layer_norm_eps" : self.config.architecture.layer_norm_eps,
"pad_token_id" : self.tokenizer.padToken,
}
if self.feature_encoder:
self.featureAttrs = {
"feature_encoder" : self.feature_encoder,
"feature_sequence_length" : self.feature_sequence_length,
"feature_embedding_size" : self.config.architecture.feature_embedding_size,
"feature_pad_idx" : self.feature_tokenizer.padToken,
"feature_dropout_prob" : self.config.architecture.feature_dropout_prob,
"feature_vocab_size" : len(self.feature_tokenizer),
"feature_num_attention_heads" : self.config.architecture.feature_num_attention_heads,
"feature_transformer_feedforward" : self.config.architecture.feature_transformer_feedforward,
"feature_layer_norm_eps" : self.config.architecture.feature_layer_norm_eps,
"feature_num_hidden_layers" : self.config.architecture.feature_num_hidden_layers,
}
self.bert_config = config.BertConfig.from_dict(
self.bertAttrs,
**self.featureAttrs,
xla_device = self.torch_tpu_available,
reward_compilation = FLAGS.reward_compilation,
is_sampling = is_sampling,
)
return
def _ConfigTrainParams(self,
data_generator: torchLMDataGenerator,
pre_train: bool,
) -> None:
"""
Model parameter initialization for training and validation.
"""
self._ConfigModelParams(is_sampling = False)
self.train_batch_size = self.config.training.batch_size
self.eval_batch_size = self.config.training.batch_size
self.learning_rate = self.config.training.adam_optimizer.initial_learning_rate_micros / 1e6
self.num_warmup_steps = self.config.training.num_warmup_steps if not pre_train else self.config.training.num_prewarmup_steps
self.max_grad_norm = 1.0
self.steps_per_epoch = data_generator.steps_per_epoch
self.current_step = None
self.num_epochs = data_generator.num_epochs
self.num_train_steps = self.steps_per_epoch * self.num_epochs
self.max_eval_steps = FLAGS.max_eval_steps
self.validation_results_file = "val_results.txt"
self.validation_results_path = os.path.join(str(self.logfile_path if not pre_train else self.pre_logfile_path), self.validation_results_file)
m = model.BertForPreTraining(
self.bert_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask"
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
m = self.torch.nn.parallel.DistributedDataParallel(
m,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
find_unused_parameters = True,
)
elif self.pytorch.num_gpus > 1:
m = self.torch.nn.DataParallel(m)
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = m,
num_train_steps = self.num_train_steps,
warmup_steps = self.num_warmup_steps,
learning_rate = self.learning_rate,
)
self.train = torchBert.BertEstimator(
m, data_generator, opt, lr_scheduler
)
l.logger().info(self.GetShortSummary())
return
def _ConfigSampleParams(self,
data_generator: torchLMDataGenerator,
sampler: samplers.Sampler,
) -> None:
"""
Model parameter initialization for inference.
"""
self._ConfigModelParams(is_sampling = True)
self.sampler = sampler
self.temperature = sampler.temperature
if sampler.sequence_length > self.bertAttrs['max_position_embeddings']:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(sampler.sequence_length, self.bertAttrs['max_position_embeddings']))
if FLAGS.is_trt:
mdl = model.BertForPreTrainingTRT
else:
mdl = model.BertForPreTraining
m = mdl(
self.bert_config,
tokenizer = self.tokenizer,
use_categorical = FLAGS.categorical_sampling,
temperature = self.temperature,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask"
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes > 1:
m = self.torch.nn.parallel.DistributedDataParallel(
m,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
find_unused_parameters = True,
)
elif self.pytorch.num_gpus > 1:
m = self.torch.nn.DataParallel(m)
if FLAGS.is_trt:
for mdl_instance, dev in zip(m, d):
mdl_instance.init_engine(self.cache, dev.index, sampler.batch_size, sampler.sequence_length, self.tokenizer.vocab_size, self.config.architecture.max_position_embeddings)
self.sample = torchBert.SampleBertEstimator(m, data_generator)
l.logger().info("Initialized model sampler in {}".format(self.sampler.cache.path))
return
def samplesWithCategorical(self):
return FLAGS.categorical_sampling
def GetEncoderModule(self,
with_checkpoint : bool = False,
without_label_head : bool = True,
**kwargs,
) -> 'torch.nn.Module':
"""Initialize BERT as decoder."""
attrs = copy.copy(self.bertAttrs)
if not with_checkpoint:
attrs = {
k: v for k, v in kwargs.items()
}
elif len(kwargs.keys()) > 0:
l.logger().warn("Encoder module with_checkpoint will not override max position embeddings, pad and vocab size!")
generic_config = config.BertConfig.from_dict(
attrs,
# **self.featureAttrs,
xla_device = self.torch_tpu_available,
reward_compilation = -1,
# This is hard-coded to True to allow compile sampler to be initialized. This does not prohibit proper re-train.
is_sampling = False,
)
m = model.BertForPreTraining(
generic_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask",
without_label_head = without_label_head,
)
if with_checkpoint:
temp_estimator = torchBert.SampleBertEstimator(m, None)
self.loadCheckpoint(temp_estimator)
return temp_estimator.model
else:
return m
def GetDecoderModule(self,
with_checkpoint : bool = False,
without_label_head : bool = False,
**kwargs,
) -> 'torch.nn.Module':
"""Return internal BERT auto-encoder module."""
attrs = copy.copy(self.bertAttrs)
if not with_checkpoint:
attrs = {
k: v for k, v in kwargs.items()
}
elif len(kwargs.keys()) > 0:
l.logger().warn("Decoder module with_checkpoint will not override max position embeddings, pad and vocab size!")
generic_config = config.BertConfig.from_dict(
attrs,
xla_device = self.torch_tpu_available,
reward_compilation = -1,
# This is hard-coded to True to allow compile sampler to be initialized. This does not prohibit proper re-train.
is_sampling = False,
is_decoder = True,
add_cross_attention = True,
)
m = copy.deepcopy(model.BertForPreTraining(
generic_config,
tokenizer = self.tokenizer,
target_lm = "hole" if self.config.training.data_generator.HasField("hole") else "mask",
without_label_head = without_label_head,
))
if with_checkpoint:
temp_estimator = torchBert.SampleBertEstimator(m, None)
self.loadCheckpoint(temp_estimator, without_label_head = without_label_head, is_decoder = True)
return temp_estimator.model
else:
return m
def to_device(self, inputs) -> 'torch.Tensor':
"""
Move input tensors to torch device and return them.
"""
inputs['input_ids'] = inputs['input_ids'].to(self.pytorch.device)
inputs['input_mask'] = inputs['input_mask'].to(self.pytorch.device)
inputs['position_ids'] = inputs['position_ids'].to(self.pytorch.device)
inputs['mask_labels'] = inputs['mask_labels'].to(self.pytorch.device)
if 'input_features' in inputs:
inputs['input_features'] = inputs['input_features'].to(self.pytorch.device)
else:
inputs['input_features'] = None
return inputs
def model_step(self,
model : 'torch.nn.Module',
inputs : typing.Dict[str, 'torch.Tensor'],
is_validation : bool = False,
step : int = -1,
extract_hidden_state: bool = False,
) -> typing.Dict[str, 'torch.Tensor']:
"""
Perform a training step on a batch of inputs.
"""
outputs = model(
input_ids = inputs['input_ids'],
attention_mask = inputs['input_mask'],
position_ids = inputs['position_ids'],
input_features = inputs['input_features'],
masked_lm_labels = inputs['mask_labels'],
is_validation = is_validation,
step = step,
extract_hidden_state = extract_hidden_state,
)
return outputs
def sample_model_step(self,
model : typing.List['torch.nn.Module'],
inputs : typing.Dict[str, 'torch.Tensor'],
iteration : int = None,
extract_hidden_state : bool = False,
) -> typing.Dict[str, typing.List[typing.List[int]]]:
"""
Specialized forward function.
Dispatches model replicas across all GPUs, one process each.
Inputs must be three-dimensional:
workload_size x batch_size x sequence_length
"""
start = time.time()
outputs = {
'generated_samples': [], 'sample_indices': [],
'input_ids': [], 'masked_lm_lengths': []
}
if extract_hidden_state:
outputs['hidden_state'] = []
if iteration is not None:
desc = "Sampling iteration: {}".format(iteration)
else:
desc = "Sampling"
wload_size = len(inputs['input_ids']) * len(inputs['input_ids'][0])
inputs = self.to_device(inputs)
if environment.WORLD_RANK == 0:
bar = tqdm.auto.trange(wload_size, desc=desc, leave = False, position = 0)
samples, sample_indices = model(
workload = (
inputs['input_ids'],
inputs['input_mask'],
inputs['position_ids'],
inputs['input_features'],
),
bar = bar if environment.WORLD_RANK == 0 else None,
)
outputs['generated_samples'] = samples.detach()
outputs['sample_indices'] = sample_indices.detach()
outputs['input_ids'] = self.torch.reshape(inputs['input_ids'], tuple(samples.shape))
outputs['masked_lm_lengths'] = self.torch.reshape(inputs['masked_lm_lengths'].to(self.pytorch.device), (samples.shape[0], -1))
if extract_hidden_state:
outputs['hidden_state'] = self.ExtractHidden(samples)
outputs['generated_samples'] = list(outputs['generated_samples'].cpu().numpy())
outputs['sample_indices'] = list(outputs['sample_indices'].cpu().numpy())
outputs['input_ids'] = list(outputs['input_ids'].cpu().numpy())
outputs['masked_lm_lengths'] = list(outputs['masked_lm_lengths'].cpu().numpy())
if extract_hidden_state:
outputs['hidden_state'] = list(outputs['hidden_state'].cpu().numpy())
end = time.time()
return outputs, end-start
def PreTrain(self,
corpus,
test_sampler: typing.Optional[samplers.Sampler] = None,
**unused_kwargs
) -> None:
"""
Pre-training entry point.
"""
self.Train(corpus, test_sampler, pre_train = True)
return
def Train(self,
corpus,
test_sampler : typing.Optional[samplers.Sampler] = None,
pre_train : bool = False,
**unused_kwargs
) -> None:
"""
Main training entry point.
"""
if FLAGS.only_sample:
del self.train
self.train = None
return
self._ConfigTrainParams(
torchLMDataGenerator.TrainMaskLMBatchGenerator(
corpus, self.config.training,
self.cache.path,
self.config.training.num_pretrain_steps if pre_train else None,
pre_train,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
), pre_train
)
self.current_step = self.loadCheckpoint(self.train, pre_train = pre_train)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if self.current_step >= 0:
l.logger().info("Loaded checkpoint step {}".format(self.current_step))
self.current_step = max(0, self.current_step)
if self.current_step < self.num_train_steps:
self.train.model.zero_grad()
## Set batch size in case of TPU training or distributed training.
if self.torch_tpu_available:
total_train_batch_size = self.train_batch_size * self.pytorch.torch_xla.xrt_world_size()
else:
total_train_batch_size = (
self.train_batch_size
* (self.torch.distributed.get_world_size() if self.pytorch.num_nodes > 1 else 1)
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
self.train.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
loader = self.train.data_generator.dataloader
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero():
train_hook = hooks.tensorMonitorHook(
self.logfile_path if not pre_train else self.pre_logfile_path, self.current_step, min(self.steps_per_epoch, FLAGS.monitor_frequency)
)
if FLAGS.reward_compilation >= 0 and not pre_train:
correct_sample_obs = sample_observers.SamplesDatabaseObserver(
self.logfile_path / "correct_samples.db"
)
else:
correct_sample_obs = None
total_steps = self.config.training.num_pretrain_steps if pre_train else self.config.training.num_train_steps
l.logger().info(
"Splitting {} steps into {} equivalent epochs, {} steps each. Rejected {} redundant step(s)".format(
self.num_train_steps, self.num_epochs,
self.steps_per_epoch, total_steps - self.num_train_steps
)
)
try:
self.train.model.train()
epoch_iter = tqdm.auto.trange(self.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.num_epochs)
for epoch in epoch_iter:
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(epoch)
if epoch < self.current_step // self.steps_per_epoch:
continue # Stupid bar won't resume.
batch_iter = tqdm.auto.trange(self.steps_per_epoch, desc="Batch", leave = False) if self.is_world_process_zero() else range(self.steps_per_epoch)
for step in batch_iter:
if self.is_world_process_zero():
start = datetime.datetime.utcnow()
try:
inputs = next(batch_iterator)
except StopIteration:
# dataloader has different len() than steps_per_epoch.
# This is the easiest way to infinite-loop dataloaders in pytorch.
batch_iterator = iter(loader)
inputs = next(batch_iterator)
self.current_step += 1
# Move inputs to torch device.
inputs = self.to_device(inputs)
# Run model step on batch
step_out = self.model_step(self.train.model, inputs, step = epoch * self.steps_per_epoch + step)
# Collect losses and backpropagate
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(self.train.model.parameters(), self.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(self.train.optimizer)
else:
self.train.optimizer.step()
self.train.scheduler.step()
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
total_loss = [self.torch.zeros(tuple(step_out['total_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
masked_lm_loss = [self.torch.zeros(tuple(step_out['masked_lm_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# next_sentence_loss = [self.torch.zeros(tuple(step_out['next_sentence_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
masked_lm_lengths = [self.torch.zeros(tuple(inputs ['masked_lm_lengths' ].shape), dtype = self.torch.int64 ).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(masked_lm_loss, step_out["masked_lm_loss"])
# self.torch.distributed.all_gather(next_sentence_loss, step_out["next_sentence_loss"])
self.torch.distributed.all_gather(masked_lm_lengths, inputs['masked_lm_lengths'].to(self.pytorch.device))
self.torch.distributed.all_gather(total_loss, step_out['total_loss'])
else:
total_loss = step_out['total_loss' ].unsqueeze(0).cpu()
masked_lm_loss = step_out['masked_lm_loss' ].unsqueeze(0).cpu()
# next_sentence_loss = step_out['next_sentence_loss'].unsqueeze(0).cpu()
masked_lm_lengths = inputs['masked_lm_lengths' ].cpu()
if self.is_world_process_zero():
exec_time_ms = int(round((datetime.datetime.utcnow() - start).total_seconds() * 1000))
if FLAGS.reward_compilation >= 0 and FLAGS.reward_compilation <= epoch * self.steps_per_epoch + step and not pre_train:
## Logging when compiler reward is enabled in training.
## This is not compatible with using DDP, and basically compiler-rewarded training is deprecated and proven to be wrong and inefficient.
correct_samples = [(x, y) for en, (x, y) in enumerate(zip(inputs['input_ids'].cpu().numpy(), step_out['generated_samples'].cpu().numpy())) if step_out['compile_status'][en] == 1]
for s in correct_samples:
feature_vector = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(s[1]))
correct_sample_obs.OnSample(model_pb2.Sample(
train_step = self.current_step,
sample_feed = self.tokenizer.tokensToString(s[0], ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
text = self.tokenizer.tokensToString(s[1], ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
encoded_text = ",".join([str(t) for t in s[1]]),
sample_indices = '',
encoded_sample_indices = '',
sample_time_ms = int(round(exec_time_ms / self.train_batch_size)),
feature_vector = "\n".join(["{}:{}".format(k, v) for (k, v) in feature_vector.items()]),
num_tokens = len([x for x in s[1] if x != self.tokenizer.padToken]),
categorical_sampling = False,
compile_status = True,
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
)
if not pre_train:
## Fine-tuning logging.
train_hook.step(
masked_lm_loss = sum([ml.mean().item() for ml in masked_lm_loss]) / len(masked_lm_loss),
# next_sentence_loss = sum([nsl.mean().item() for nsl in next_sentence_loss]) / len(next_sentence_loss),
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
learning_rate = self.train.scheduler.get_last_lr()[0],
num_correct_samples = (correct_sample_obs.sample_id if correct_sample_obs is not None else None),
batch_avg_hole_len = sum([sum([int(l) for l in b if l != -1]) / len([int(l) for l in b if l != -1])
for b in masked_lm_lengths]) / len(masked_lm_lengths),
batch_execution_time_ms = exec_time_ms,
time_per_sample_ms = exec_time_ms / self.train_batch_size,
)
else:
## Pre-training logging.
train_hook.step(
masked_lm_loss = sum([ml.mean().item() for ml in masked_lm_loss]) / len(masked_lm_loss),
# next_sentence_loss = sum([nsl.mean().item() for nsl in next_sentence_loss]) / len(next_sentence_loss),
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
learning_rate = self.train.scheduler.get_last_lr()[0],
batch_avg_hole_len = sum([sum([int(l) for l in b if l != -1]) / len([int(l) for l in b if l != -1])
for b in masked_lm_lengths]) / len(masked_lm_lengths),
batch_execution_time_ms = exec_time_ms,
time_per_sample_ms = exec_time_ms / self.train_batch_size,
)
self.train.model.zero_grad()
if self.current_step == 0:
l.logger().info("Starting Loss: {}".format(sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
# End of Epoch
self.saveCheckpoint(self.train, pre_train)
if self.is_world_process_zero():
set_mail = "Epoch {} Loss: {}\n".format(self.current_step // self.steps_per_epoch, train_hook.epoch_loss)
l.logger().info("Epoch {} Loss: {}".format(self.current_step // self.steps_per_epoch, train_hook.epoch_loss))
if FLAGS.validate_per_epoch > 0 and self.train.data_generator.config.validation_split > 0:
val_ml_loss = self.Validate(per_epoch = True, pre_train = pre_train)
if self.is_world_process_zero():
train_hook.end_epoch(
val_masked_lm_loss = val_ml_loss,
# val_next_sentence_loss = val_nsp_loss,
val_total_loss = val_ml_loss # + val_nsp_loss,
)
set_mail += "Validation Loss: {}\n".format(val_ml_loss)
elif self.is_world_process_zero():
train_hook.end_epoch()
if FLAGS.notify_me:
client.getClient().send_message("clgen:torch_bert", set_mail)
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
if FLAGS.sample_per_epoch > 0:
if self.is_world_process_zero():
sampler, observers = self._getTestSampler(test_sampler, self.config.training.sequence_length)
self.InitSampling(sampler, self.config.training.random_seed)
for _ in range(FLAGS.sample_per_epoch):
start_time = datetime.datetime.utcnow()
self.InitSampleBatch(sampler)
org_inputs, input_ids, samples, indices = self.SampleNextIndices()
end_time = datetime.datetime.utcnow()
for org, inp, sample, idxs in zip(org_inputs, input_ids, samples, indices):
try:
stdout = opencl.Compile(self.tokenizer.ArrayToCode(sample))
compile_flag = 1
except ValueError:
compile_flag = 0
feature_vector = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(sample))
sample_proto = model_pb2.Sample(
train_step = self.current_step,
sample_feed = sampler.start_text,
original_input = self.tokenizer.tokensToString(org, with_formatting = True, ignore_token = self.tokenizer.padToken),
text = self.tokenizer.tokensToString(sample, with_formatting = True, ignore_token = self.tokenizer.padToken).replace("\\n", "\n"),
encoded_text = ",".join([str(t) for t in sample]),
sample_indices = ','.join([self.tokenizer.decoder[idx].replace('\n', '\\n') for idx in idxs]).replace('\n', '\\n'),
encoded_sample_indices = ','.join([str(idx) for idx in idxs]),
sample_time_ms = int(round(1000 * ((end_time - start_time) / sampler.batch_size).total_seconds())),
feature_vector = "\n".join(["{}:{}".format(k, v) for (k, v) in feature_vector.items()]),
num_tokens = len(sample),
compile_status = compile_flag,
categorical_sampling = self.samplesWithCategorical(),
date_added = datetime.datetime.utcnow().strftime("%m/%d/%Y, %H:%M:%S"),
)
for obs in observers:
obs.OnSample(sample_proto)
distrib.barrier()
except KeyboardInterrupt:
pass
if not FLAGS.force_eval:
_ = self.Validate(pre_train = pre_train)
if FLAGS.force_eval and not self.is_validated:
_ = self.Validate(pre_train = pre_train)
del self.train
self.train = None
return
def TrainBatch(self, inputs) -> None:
raise NotImplementedError
return
def Validate(self, per_epoch = False, pre_train = False) -> float:
"""
Validation function for torch BERT.
Arguments:
per_epoch: Set True if is called at the end of (pre)training epoch.
If true, no analytical results are appended to database.
Instead, only loss is monitored and plotted.
"""
if ( (per_epoch and FLAGS.eval_steps_per_epoch <= 0)
or (not per_epoch and FLAGS.max_eval_steps <= 0)
or self.config.training.data_generator.validation_split == 0):
l.logger().info("Skipping BERT Validation.")
return None, None
avg_mask_loss = []
avg_nsp_loss = []
preds = None
label_ids = None
self.train.model.eval()
for set_idx, (set_name, dataloader) in enumerate(self.train.data_generator.eval_dataloaders()):
l.logger().info("BERT Validation on {}".format(set_name))
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
loader = dataloader
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(set_idx)
if not per_epoch and self.is_world_process_zero():
val_hook = hooks.validationSampleHook(
url = "sqlite:///{}".format(str((self.logfile_path if not pre_train else self.pre_logfile_path) / "validation_samples.db")),
tokenizer = self.tokenizer,
model_step = self.current_step
)
eval_iterator = iter(loader)
eval_steps = FLAGS.max_eval_steps if not per_epoch else FLAGS.eval_steps_per_epoch
try:
eval_iter = tqdm.auto.trange(self.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.num_epochs)
for step in eval_iter:
try:
inputs = next(eval_iterator)
except StopIteration:
eval_iterator = iter(loader)
inputs = next(eval_iterator)
inputs = self.to_device(inputs)
with self.torch.no_grad():
step_out = self.model_step(self.train.model, inputs, is_validation = True)
if not per_epoch and self.is_world_process_zero():
val_hook.step(inputs, step_out)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
masked_lm_loss = [self.torch.zeros(tuple(step_out['masked_lm_loss' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# next_sentence_loss = [self.torch.zeros(tuple(step_out['next_sentence_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(masked_lm_loss, step_out["masked_lm_loss"])
# self.torch.distributed.all_gather(next_sentence_loss, step_out["next_sentence_loss"])
else:
masked_lm_loss = step_out['masked_lm_loss' ].cpu()
# next_sentence_loss = step_out['next_sentence_loss'].cpu()
avg_mlm_loss = [x.mean().item() for x in masked_lm_loss]
avg_mask_loss.append(sum(avg_mlm_loss) / len(avg_mlm_loss))
# avg_nsp_loss.append(next_sentence_loss.mean().item())
except KeyboardInterrupt:
pass
if self.is_world_process_zero() and avg_mask_loss and not per_epoch:
val_hook.final(set_name, sum(avg_mask_loss) / len(avg_mask_loss))
if self.pytorch.torch_tpu_available:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
self.pytorch.torch_xla_model.master_print(self.pytorch.torch_xla_met.metrics_report())
if not per_epoch:
self.is_validated = True
try:
return sum(avg_mask_loss) / len(avg_mask_loss)
except ZeroDivisionError:
return float('inf'), float('inf')
def InitSampling(self,
sampler : samplers.Sampler,
seed : typing.Optional[int] = None,
corpus = None,
) -> None:
"""This is called only once. Performs basic initialization of sampling"""
sample_batch_size = sampler.batch_size
if self.pytorch.num_nodes == 1 and self.pytorch.num_gpus > 1 and sample_batch_size < self.pytorch.num_gpus:
l.logger().warn("Sampler's batch size {}, too small for {} GPUs. Increasing to {}".format(
sample_batch_size,
self.pytorch.num_gpus,
self.pytorch.num_gpus
)
)
sample_batch_size = self.pytorch.num_gpus
data_generator = torchLMDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, self.config.training.random_seed, sample_batch_size,
self.config.architecture.max_position_embeddings, self.cache.path, corpus,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
)
self._ConfigSampleParams(data_generator, sampler)
ckpt_step = self.loadCheckpoint(self.sample)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if ckpt_step >= 0:
l.logger().info("Loaded checkpoint step {}".format(ckpt_step))
self.step_inputs = None
self.loader = None
self.pred_iterator = None
l.logger().info("Initialized model samples in {}".format(self.sample_path / self.sampler.hash))
return
def InitSampleBatch(self, sampler: samplers.Sampler, **kwargs) -> None:
"""Batch-specific initialization. Called once when a new batch is going to be generated"""
workload_size = kwargs.get('workload_size', None)
if sampler.is_live:
# For live sampling, start text must be re-instated at each iteration.
self.sample = self.sample._replace(
data_generator = torchLMDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, 0, sampler.batch_size,
self.config.architecture.max_position_embeddings, self.cache.path,
self.feature_encoder,
self.feature_tokenizer,
self.feature_sequence_length,
)
)
self.step_inputs, self.loader, self.pred_iterator = None, None, None
if self.loader is None:
if self.torch_tpu_available:
self.loader = self.pytorch.torch_ploader.ParallelLoader(
self.sample.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
self.loader = self.sample.data_generator.dataloader
if not sampler.is_active:
if self.pred_iterator is None:
self.pred_iterator = iter(self.loader)
try:
inputs = next(self.pred_iterator)
except StopIteration:
self.pred_iterator = iter(self.loader)
inputs = next(self.pred_iterator)
if workload_size is None:
## I think this dictionary holds tensors of the following size:
## [num_gpus x batch_size x seq_len] if only one node works.
## Otherwise, [1 x batch_size x seq_len] since each process manages its own GPU.
padded_wsize = self.pytorch.num_gpus if environment.WORLD_SIZE == 1 and self.pytorch.num_gpus > 1 else 1
else:
## If a workload is specified, then after you pad to the dimension of GPU or num processes
## Divide the size by GPU size or num processes size.
padded_wsize = (
(max(1, workload_size // (self.pytorch.num_gpus * sampler.batch_size))) * self.pytorch.num_gpus
if environment.WORLD_SIZE == 1 and self.pytorch.num_gpus > 1
else max(1, (workload_size // (self.pytorch.num_nodes * sampler.batch_size)) * self.pytorch.num_nodes))
self.step_inputs = {
x: inputs[x].unsqueeze(0).repeat(padded_wsize, 1, 1)
for x in inputs
}
# This loop below is purely for proper printing reasons:
sample_text = set(
[self.tokenizer.tokensToString(
seq.cpu().numpy(), ignore_token = self.tokenizer.padToken
) for seq in inputs['input_ids']]
)
for seq in sample_text:
self.sampler.setStartText(seq)
self.sampler.Specialize(self.tokenizer)
return
def SampleNextIndices(
self, *unused_args, **unused_kwargs
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""Called iteratively to build a single batch of samples, until termination criteria stops calling"""
del unused_kwargs
del unused_args
if self.sample is None:
raise ValueError("Bert sampler has not been initialized.")
with self.torch.no_grad():
if self.sampler.is_active:
try:
return self.sample.data_generator.ActiveGeneration(self, self.sample)
except StopIteration:
raise StopIteration
else:
if self.sampler.is_live and self.feature_encoder:
batch_features = []
for _ in range(self.sampler.batch_size):
feat_space = ""
while feat_space not in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
feat_space = input("Select feature space: [g/a/i]/[GreweFeatures/AutophaseFeatures/InstCountFeatures]: ")
if feat_space == "a":
feat_space = "AutophaseFeatures"
elif feat_space == "g":
feat_space = "GreweFeatures"
elif feat_space == "i":
feat_space = "InstCountFeatures"
input_features = {
k: -1 for k in extractor.extractors[feat_space].KEYS()
}
for k in input_features.keys():
if k not in {"F2:coalesced/mem", "F4:comp/mem"}:
prompt = input("{}: ".format(k))
if prompt == 0:
val = 0
else:
val = int(prompt)
input_features[k] = val
batch_features.append(
self.feature_tokenizer.TokenizeFeatureVector(input_features, feat_space, self.feature_sequence_length)
)
self.step_inputs['input_features'] = self.torch.LongTensor(batch_features).unsqueeze(0)
elif self.feature_encoder and 'input_features' not in self.step_inputs:
feat_space = "GreweFeatures"
batch_features = []
for _ in range(self.sampler.batch_size):
input_features = {
k: -1 for k in extractor.extractors[feat_space].KEYS()
}
for k in input_features.keys():
if k not in {"F2:coalesced/mem", "F4:comp/mem"}:
input_features[k] = int(np.random.poisson(8))
print(input_features)
try:
input_features["F2:coalesced/mem"] = input_features["coalesced"] / input_features["mem"]
except ZeroDivisionError:
input_features["F2:coalesced/mem"] = 0
try:
input_features["F4:comp/mem"] = input_features["comp"] / input_features["mem"]
except ZeroDivisionError:
input_features["F4:comp/mem"] = 0
batch_features.append(
self.feature_tokenizer.TokenizeFeatureVector(input_features, feat_space, self.feature_sequence_length)
)
self.step_inputs['input_features'] = self.torch.LongTensor(batch_features).unsqueeze(0)
step_out, time = self.sample_model_step(
self.sample.model,
self.step_inputs,
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
generated_samples = [self.torch.zeros(tuple(step_out['generated_samples'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
sample_indices = [self.torch.zeros(tuple(step_out['sample_indices' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(generated_samples, step_out["generated_samples"])
self.torch.distributed.all_gather(sample_indices, step_out["sample_indices"])
raise NotImplementedError("This will not work because generated_samples and sample indices are lists and not tensors")
else:
generated_samples = step_out['generated_samples']
sample_indices = step_out['sample_indices']
return (
self.step_inputs['original_input'].cpu().view(-1, self.step_inputs['original_input'].shape[2]).numpy(),
self.step_inputs['input_ids'].cpu().view(-1, self.sampler.sequence_length).numpy(),
generated_samples,
sample_indices
)
def EncodeInputs(self, srcs: typing.List[np.array]) -> typing.List[np.array]:
"""
According to each LM's rules, encode a list of source codes to encoded arrays
ready to be fed into the model.
Args:
src: List of source codes.
Returns:
A list of encoded numpy arrays.
"""
sequence_length = self.config.architecture.max_position_embeddings
pool = multiprocessing.Pool(min(os.cpu_count(), len(srcs)))
encoded = []
it = pool.imap(functools.partial(worker, sequence_length = sequence_length, tokenizer = self.tokenizer), srcs, chunksize = 256)
for enc in tqdm.tqdm(it, total = len(srcs), desc = "Encode Inputs", leave = False):
encoded.append(enc)
pool.close()
return encoded
def ExtractHidden(self, encoded: typing.List[np.array]) -> np.array:
"""
Extract hidden state from backend language model.
Args:
encoded: A list of input ids that will be provided to the LM.
Has to be two-dimensional: [num_sequences X sequence_length]
Returns:
The hidden state of the provided inputs.
"""
if not isinstance(encoded, self.torch.Tensor):
workload_input_ids = self.torch.LongTensor(encoded).to(self.pytorch.device)
else:
workload_input_ids = encoded
hidden_states = self.torch.zeros(
[workload_input_ids.shape[0], self.hidden_state_size],
dtype = self.torch.float32,
)
bar = tqdm.tqdm(total = workload_input_ids.shape[0], desc = "Extract Hidden State", leave = False)
with self.torch.no_grad():
for idx in range(0, workload_input_ids.shape[0], self.sampler.batch_size):
input_ids = workload_input_ids[idx : idx + self.sampler.batch_size].to(self.pytorch.device)
input_mask = (input_ids != self.tokenizer.padToken)
position_ids = self.torch.arange(input_ids.shape[-1], dtype = self.torch.int64).unsqueeze(0).repeat(input_ids.shape[0], 1).to(self.pytorch.device)
prediction_scores, hidden_state = self.sample.model(
input_ids = input_ids,
attention_mask = input_mask,
position_ids = position_ids,
extract_hidden_state = True,
)
real_batch_size = input_ids.shape[0]
###########################################
"""
TODO Research: Hidden states are collected from prediction_scores and have a shape of [seq_length x 1].
At each index lies the prob of the respective token in the input sequence.
"""
# sequence_length = workload_input_ids.shape[-1]
# hidden_states[idx: idx + real_batch_size] = prediction_scores[:, range(sequence_length), input_ids][range(real_batch_size), range(real_batch_size)].detach().cpu()
"""
TODO Research: Hidden states are collected from the encoder's outputs [seq_len x hidden_size]. Flatten everything out.
"""
# hidden_states[idx: idx + real_batch_size] = hidden_state.reshape((real_batch_size, -1)).detach().cpu()
"""
TODO Research: Hidden states are collected from the encoder's input (seq_len x hidden_size) and then they are avg pooled to easily reduce dimensions.
"""
hidden_states[idx: idx + real_batch_size] = self.torch.nn.AvgPool2d(32, stride = 16, count_include_pad = False)(hidden_state.detach()).reshape(real_batch_size, -1).cpu() # hidden_state.reshape((real_batch_size, -1)).detach().cpu()
###########################################
bar.update(real_batch_size)
return hidden_states
def _getTestSampler(self, test_sampler, sequence_length):
if test_sampler is None or test_sampler.is_live or test_sampler.is_active:
if self.config.training.data_generator.HasField("hole"):
sampler_str = [
"start_text: \"[START]kernel void A([HOLE]}[END]\"",
"batch_size: 2",
"sequence_length: {}".format(sequence_length),
"temperature_micros: 700000",
]
elif self.config.training.data_generator.HasField("mask_seq"):
sampler_str = [
"start_text: \"[START]kernel void A(" + ''.join(["[MASK]"] * (sequence_length - 7)) + "}[END]\"",
"batch_size: 2",
"sequence_length: {}".format(sequence_length),
"temperature_micros: 700000",
]
mock_config = pbutil.FromString('\n'.join(sampler_str), sampler_pb2.Sampler())
sampler = samplers.Sampler(mock_config, sample_db_name = "epoch_samples.db")
else:
sampler = test_sampler
if sampler.isFixedStr:
sampler.Specialize(self.tokenizer)
observers = [sample_observers.PrintSampleObserver()]
if FLAGS.store_samples_db:
observers.append(sample_observers.SamplesDatabaseObserver(
self.sample_path / sampler.hash / sampler.sample_db_name
)
)
sampler.symlinkModelDB(
self.sample_path / sampler.hash,
self.hash
)
return sampler, observers
def saveCheckpoint(self, estimator, pre_train):
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: self.ckpt_path / "{}{}-{}.pt".format("pre_" if pre_train else "", x, self.current_step)
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(estimator.model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(estimator.model, self.torch.nn.DataParallel):
self.torch.save(estimator.model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(estimator.model.state_dict(), ckpt_comp("model"))
self.torch.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("{}train_step: {}\n".format("pre_" if pre_train else "", self.current_step))
if pre_train:
mf = open(self.ckpt_path / "checkpoint.meta", 'r')
cf = mf.read()
mf.close()
if "train_step: 0" not in cf:
with open(self.ckpt_path / "checkpoint.meta", 'w') as mf:
mf.write(cf + "train_step: 0\n")
for x in {"model"}:
shutil.copyfile(str(ckpt_comp(x)), str(self.ckpt_path / "{}-0.pt".format(x)))
return
def loadCheckpoint(self,
estimator: typing.Union[
typing.TypeVar('torchBert.BertEstimator'),
typing.TypeVar('torchBert.SampleBertEstimator')
],
pre_train : bool = False,
without_label_head : bool = False,
is_decoder : bool = False,
) -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'r') as mf:
if pre_train:
key = "pre_train_step"
exclude = "None"
else:
key = "train_step"
exclude = "pre_train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x and exclude not in x})
if FLAGS.select_checkpoint_step == -1 or pre_train:
ckpt_step = max(entries)
else:
if FLAGS.select_checkpoint_step in entries:
ckpt_step = FLAGS.select_checkpoint_step
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: self.ckpt_path / "{}{}-{}.pt".format("pre_" if pre_train else "", x, ckpt_step)
if isinstance(estimator.model, self.torch.nn.DataParallel):
try:
if without_label_head:
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if "cls.predictions." not in k:
new_state_dict[k] = v
estimator.model.module.load_state_dict(new_state_dict, strict = False)
else:
estimator.model.module.load_state_dict(
self.torch.load(ckpt_comp("model")),
strict = False if is_decoder else True,
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
if not without_label_head or (without_label_head and "cls.predictions." not in name):
new_state_dict[name] = v
estimator.model.module.load_state_dict(new_state_dict, strict = False if is_decoder or without_label_head else True)
else:
try:
if without_label_head:
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if "cls.predictions." not in k:
new_state_dict[k] = v
estimator.model.load_state_dict(new_state_dict, strict = False)
else:
estimator.model.load_state_dict(
self.torch.load(ckpt_comp("model")),
strict = False if is_decoder else True,
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model"), map_location = lambda storage, loc: storage).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
if not without_label_head or (without_label_head and "cls.predictions." not in name):
new_state_dict[name] = v
estimator.model.load_state_dict(new_state_dict, strict = False if without_label_head or is_decoder else True)
if isinstance(estimator, torchBert.BertEstimator):
if estimator.optimizer is not None and estimator.scheduler is not None and ckpt_step > 0:
estimator.optimizer.load_state_dict(
self.torch.load(ckpt_comp("optimizer"), map_location=self.pytorch.device)
)
estimator.scheduler.load_state_dict(
self.torch.load(ckpt_comp("scheduler"), map_location=self.pytorch.device)
)
estimator.model.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def count_parameters(self, model) -> int:
"""
Count and print the number of trainable parameters for the model.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def find_unused_parameters(self, model: 'torch.nn.Module') -> None:
"""
Find parameters that are unused for loss computation.
"""
param_names = []
for name, param in model.named_parameters():
if param.grad is None:
param_names.append(name)
if param_names:
l.logger().warn("Unused parameters:\n{}".format('\n'.join(param_names)))
else:
l.logger().info("No unused parameters found for grad computation.")
return
def GetShortSummary(self) -> str:
return (
"\n"
f"{model_pb2.NetworkArchitecture.Backend.Name(self.config.architecture.backend)} "
"network: "
"\n"
f" Total trainable parameters: {humanize.intcomma(self.count_parameters(self.train.model))}"
"\n"
f" hidden_size: {self.config.architecture.hidden_size}"
"\n"
f" #hidden_layers: {self.config.architecture.num_hidden_layers}"
"\n"
f" #attention_heads: {self.config.architecture.num_attention_heads}"
"\n"
f" intermediate_size: {self.config.architecture.intermediate_size}"
"\n"
f" hidden_act: {self.config.architecture.hidden_act}"
"\n"
) + (self.train.data_generator.GetShortSummary() if self.train else "")
def InferenceManifest(self) -> typing.List[pathlib.Path]:
"""Return the list of files which are required for model inference.
Returns:
A list of absolute paths.
"""
# The TensorFlow save file.
paths = [ path.absolute() for path in (self.cache.path / "checkpoints").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "logs").iterdir() ]
paths += [ path.absolute() for path in (self.cache.path / "samples").iterdir() ]
# paths += self.data_generator.InferenceManifest # TODO
return sorted(paths)
| 61,629 | 44.889799 | 238 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/activations.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from deeplearning.benchpress.util.pytorch import torch
def swish(x):
return x * torch.sigmoid(x)
def _gelu_python(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
This is now written in C in torch.nn.functional
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
if torch.__version__ < "1.4.0":
gelu = _gelu_python
else:
gelu = torch.nn.functional.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
ACT2FN = {
"relu": torch.nn.functional.relu,
"swish": swish,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
| 2,059 | 31.1875 | 111 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/torch_bert/optimizer.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,217 | 40 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/example_api.py | import typing
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
# signals the start of a document
BOS = "<|endoftext|>"
# signals the end of a generated infill
EOM = "<|endofmask|>"
def make_sentinel(i):
# signals (1) a location to insert an infill and (2) the start of the infill generation
return f"<|mask:{i}|>"
def generate(model: torch.nn.Module, inp: str, tokenizer, max_to_generate: int=128, temperature: float=0.2):
"""
Do standard left-to-right completion of the prefix `input` by sampling from the model
"""
input_ids = tokenizer(inp, return_tensors="pt").input_ids.to(pytorch.device)
max_length = max_to_generate + input_ids.flatten().size(0)
if max_length > 2048:
print("warning: max_length {} is greater than the context window {}".format(max_length, 2048))
with torch.no_grad():
output = model.generate(input_ids=input_ids, do_sample=True, top_p = 0.95, temperature=temperature, max_length=max_length)
detok_hypo_str = tokenizer.decode(output.flatten())
if detok_hypo_str.startswith(BOS):
detok_hypo_str = detok_hypo_str[len(BOS):]
return detok_hypo_str
def infill(model, inp: str, tokenizer, max_to_generate: int=128, temperature: float=0.7, extra_sentinel: bool=True, max_retries: int=1):
"""
Generate infills to complete a partial document, e.g.
[A C E] -> [A B C D E], where B and D are infills that have been generated.
parts: str. One string instance to input for sampling.
max_to_generate: int. maximum number of tokens to generate. Keep in mind
that the model context size is 2048.
temperature: float. temperature parameter for sampling.
extra_sentinel: bool. we recommend setting this to True, as it makes it
easier for the model to end generated infills. See the footnote in
section 2.2 of our paper for details.
max_retries: int. if > 1, use rejection sampling to keep sampling infills until
all infills sample a completion token.
returns a dictionary containing the following:
text: str, the completed document (with infills inserted)
parts: List[str], length N. Same as passed to the method
infills: List[str], length N-1. The list of infills generated
retries_attempted: number of retries used (if max_retries > 1)
"""
parts = inp.split('<insert>')
assert isinstance(parts, list)
retries_attempted = 0
done = False
while (not done) and (retries_attempted < max_retries):
retries_attempted += 1
infills = []
complete = []
## (1) build the prompt
if len(parts) == 1:
raise OSError
prompt = parts[0]
completion = generate(model, prompt, tokenizer, max_to_generate, temperature)
# completion = completion[len(prompt):]
if EOM not in completion:
completion += EOM
completion = completion[:completion.index(EOM) + len(EOM)]
infilled = completion[:-len(EOM)]
infills.append(infilled)
return {
'text': completion, # str, the completed document (with infills inserted)
'parts': parts, # List[str], length N. Same as passed to the method
'infills': infills, # List[str], length N-1. The list of infills generated
'retries_attempted': retries_attempted, # number of retries used (if max_retries > 1)
}
else:
prompt = ""
# encode parts separated by sentinel
for sentinel_ix, part in enumerate(parts):
prompt += part
if extra_sentinel or (sentinel_ix < len(parts) - 1):
prompt += make_sentinel(sentinel_ix)
done = True
## (2) generate infills
for sentinel_ix, part in enumerate(parts[:-1]):
complete.append(part)
prompt += make_sentinel(sentinel_ix)
# TODO: this is inefficient as it requires re-encoding prefixes repeatedly
completion = generate(model, prompt, tokenizer, max_to_generate, temperature)
completion = completion[len(prompt):]
if EOM not in completion:
completion += EOM
done = False
completion = completion[:completion.index(EOM) + len(EOM)]
infilled = completion[:-len(EOM)]
infills.append(infilled)
complete.append(infilled)
prompt += completion
complete.append(parts[-1])
text = ''.join(complete)
return {
'text': text, # str, the completed document (with infills inserted)
'parts': parts, # List[str], length N. Same as passed to the method
'infills': infills, # List[str], length N-1. The list of infills generated
'retries_attempted': retries_attempted, # number of retries used (if max_retries > 1)
}
| 4,614 | 40.954545 | 136 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/incoder.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API Calls to FAIR-Incoder."""
import typing
import time
import tqdm
import transformers
import numpy as np
from absl import flags
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.preprocessors import opencl
from deeplearning.benchpress.util import plotter
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.models import backends
from deeplearning.benchpress.models import telemetry
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.models.incoder import example_api
from deeplearning.benchpress.models.incoder.data_generator import IncoderDataGenerator
from deeplearning.benchpress.util import logging as l
transformers.set_seed(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
FLAGS = flags.FLAGS
flags.DEFINE_string(
"custom_incoder_ckpt",
None,
"Select your own path to Incoder version instead of using the standard HF ones."
)
class Incoder(backends.BackendBase):
"""
API Class for incoder collected from huggingface.
"""
class TrainEstimator(typing.NamedTuple):
"""Named tuple to wrap Incoder pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : IncoderDataGenerator
optimizer : typing.Any
scheduler : typing.Any
class SampleEstimator(typing.NamedTuple):
"""Named tuple for sampling Incoder."""
model : typing.List[typing.TypeVar('nn.Module')]
data_generator : IncoderDataGenerator
@property
def hidden_state_size(self):
return -1
def __init__(self, *args, **kwargs):
super(Incoder, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
self.torch.cuda.manual_seed_all(np.random.RandomState().randint(0, 2**32-1) % (1 + environment.WORLD_RANK))
self.incoder_version = kwargs.pop("incoder_version")
self.train = None
self.sample = None
self.predict_generator = None
self.sampler = None
self.train_batch_size = None
self.eval_batch_size = None
self.learning_rate = None
self.num_train_steps = None
self.ckpt_path = self.cache.path / "checkpoints"
self.sample_path = self.cache.path / "samples"
self.logfile_path = self.cache.path / "logs"
if self.config.HasField("pre_train_corpus"):
self.pre_logfile_path = self.logfile_path / "pre_train"
self.telemetry = telemetry.TrainingLogger(self.logfile_path)
if self.config.HasField("pre_train_corpus"):
self.pre_telemetry = telemetry.TrainingLogger(self.logfile_path / "pre_train")
self.is_validated = False
self.trained = False
l.logger().info("{} initialized".format(self.incoder_version))
return
def _ConfigModelParams(self, is_sampling):
"""General model hyperparameters initialization."""
##! Placeholder for now. If need be, will be populated.
return
def _ConfigSampleParams(self,
data_generator: IncoderDataGenerator,
sampler: samplers.Sampler,
) -> None:
"""
Model parameter initialization for inference.
"""
self._ConfigModelParams(is_sampling = True)
self.sampler = sampler
self.temperature = sampler.temperature
kwargs = {}
if self.incoder_version == "facebook/incoder-6B":
# the arguments added below will load a half precision version of the model,
# which requires less RAM than loading the full float32 version. this
# should fit in ~16GB of RAM
# NOTE: half precision should *not* be used if you plan to fine-tune the
# model. You'll need full precision and a lot of GPU memory. We have not
# tested fine-tuning in `transformers` (the model was trained in fairseq)
kwargs = dict(
revision = "float16",
torch_dtype = self.torch.float16,
low_cpu_mem_usage = True,
)
if FLAGS.custom_incoder_ckpt is None:
m = transformers.AutoModelForCausalLM.from_pretrained(
self.incoder_version, **kwargs
).to(self.pytorch.offset_device)
else:
l.logger().warn("Using custom Incoder checkpoint at {}".format(FLAGS.custom_incoder_ckpt))
m = transformers.AutoModelForCausalLM.from_pretrained(
FLAGS.custom_incoder_ckpt, **kwargs
).to(self.pytorch.offset_device)
if self.pytorch.num_nodes == 1 and self.pytorch.num_gpus > 1:
l.logger().warn("HuggingFace 'generate' function does not support DataParallel. If you want multi-GPU sampling, go to DDP.")
self.sample = Incoder.SampleEstimator(m, data_generator)
l.logger().info("Initialized model sampler in {}".format(self.sampler.cache.path))
return
def samplesWithCategorical(self) -> bool:
return True
def model_step(self) -> 'torch.Tensor':
raise NotImplementedError
return
def sample_model_step(self,
model : typing.List[typing.TypeVar('torch.nn.Module')],
inputs : typing.Dict[str, typing.TypeVar('torch.Tensor')],
is_live : bool = False,
iteration : int = None,
) -> typing.Dict[str, typing.List[typing.List[int]]]:
"""
Specialized forward function.
Dispatches model replicas across all GPUs, one process each.
Inputs must be three-dimensional:
workload_size x batch_size x sequence_length
"""
start = time.time()
total_seqs = inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]
max_to_generate = self.sampler.sequence_length - 3
outputs = {
'generated_samples': self.torch.zeros((total_seqs, self.sampler.sequence_length), dtype = self.torch.int64).to(self.pytorch.device),
'sample_indices': self.torch.zeros((total_seqs, max_to_generate), dtype = self.torch.int64).to(self.pytorch.device),
'input_ids': [], 'masked_lm_lengths': []
}
if iteration is not None:
desc = "Sampling iteration: {}".format(iteration)
else:
desc = "Sampling"
s_idx = 0
if environment.WORLD_RANK == 0:
bar = tqdm.tqdm(total = total_seqs, desc = desc)
else:
bar = None
for batch in inputs['input_ids']:
for seq in batch:
seq = [x for x in seq if x != self.tokenizer.padToken]
incode = self.tokenizer.ArrayToCode(seq).replace("<|mask:0|>", "<insert>") # This is a text where pad has been stripped off.
incode = "<| file ext=.cl |>\n{}\n<|/ file |>".format(incode)
incoded = example_api.infill(
model,
incode,
self.tokenizer.get_hf_tokenizer(),
max_to_generate = max_to_generate - len(seq) - 13,
temperature = self.temperature,
extra_sentinel = True,
max_retries = 1,
)
try:
# Dis a proper hack right here.
opening = lambda x: "<| file ext=.cl |>\n{}void".format(x)
if opening("") in incoded['text']:
incoded['text'] = opening("kernel ") + incoded['text'][len(opening("")):]
incoded['text'] = incoded['text'].replace("kernel A(", "kernel void A(")
text = opencl.ExtractSingleKernels(incoded['text'])[0] # Collect only the first kernel generated, ignore the rest.
except IndexError:
l.logger().warn(incoded['text'], ddp_nodes = True)
text = incoded['text']
text = text.replace("<| file ext=.cl |>\n", "").replace("\n<|/ file |>", "")
while "\n\n" in text:
text = text.replace("\n\n", "\n")
while text[-1] == "\n":
text = text[:-1]
sample = self.tokenizer.TokenizeString(text)[:self.sampler.sequence_length]
sample += [self.tokenizer.padToken] * (self.sampler.sequence_length - len(sample))
sample = self.torch.LongTensor(sample).to(self.pytorch.device)
indices = self.tokenizer.TokenizeString(incoded['infills'][0])[:max_to_generate]
indices += [self.tokenizer.padToken] * (max_to_generate - len(indices))
indices = self.torch.LongTensor(indices).to(self.pytorch.device)
outputs['generated_samples'][s_idx] = sample
outputs['sample_indices'][s_idx] = indices
s_idx += 1
if environment.WORLD_RANK == 0:
bar.update(1)
outputs['input_ids'] = inputs['input_ids'].reshape(-1, self.sampler.sequence_length).to(self.pytorch.device)
outputs['masked_lm_lengths'] = inputs['masked_lm_lengths'].reshape(-1, 1).to(self.pytorch.device)
outputs['generated_samples'] = list(outputs['generated_samples'].cpu().numpy())
outputs['sample_indices'] = list(outputs['sample_indices'].cpu().numpy())
outputs['input_ids'] = list(outputs['input_ids'].cpu().numpy())
outputs['masked_lm_lengths'] = list(outputs['masked_lm_lengths'].cpu().numpy())
end = time.time()
return outputs, end-start
def PreTrain(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def Train(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def Validate(self, *args, **kwargs) -> None:
l.logger().warn("Pre-training is not supported yet for Incoder. Moving on.")
return
def InitSampling(self,
sampler : samplers.Sampler,
seed : typing.Optional[int] = None,
corpus = None,
) -> None:
"""This is called only once. Performs basic initialization of sampling"""
sample_batch_size = sampler.batch_size
##! TODO: Replace with incoder data generator
data_generator = IncoderDataGenerator.SampleMaskLMBatchGenerator(
self.config.training, sampler, self.tokenizer, seed, sample_batch_size,
sampler.sequence_length, self.cache.path, corpus)
##! TODO: Maybe initialize inline here instead of elaborating in separate function.
self._ConfigSampleParams(data_generator, sampler)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
self.step_inputs = None
self.loader = None
self.pred_iterator = None
l.logger().info("Initialized model samples in {}".format(self.sample_path / self.sampler.hash))
return
def InitSampleBatch(self, sampler: samplers.Sampler, **kwargs) -> None:
"""Batch-specific initialization. Called once when a new batch is going to be generated"""
workload_size = kwargs.get('workload_size', None)
if self.loader is None:
if self.torch_tpu_available:
self.loader = self.pytorch.torch_ploader.ParallelLoader(
self.sample.data_generator.dataloader, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
else:
self.loader = self.sample.data_generator.dataloader
if not sampler.is_active:
if self.pred_iterator is None:
self.pred_iterator = iter(self.loader)
try:
inputs = next(self.pred_iterator)
except StopIteration:
self.pred_iterator = iter(self.loader)
inputs = next(self.pred_iterator)
if workload_size is None:
## I think this dictionary holds tensors of the following size:
## [num_gpus x batch_size x seq_len] if only one node works.
## Otherwise, [1 x batch_size x seq_len] since each process manages its own GPU.
padded_wsize = self.pytorch.num_gpus if environment.WORLD_SIZE == 1 else 1
else:
## If a workload is specified, then after you pad to the dimension of GPU or num processes
## Divide the size by GPU size or num processes size.
padded_wsize = (
(max(1, workload_size // (self.pytorch.num_gpus * sampler.batch_size))) * self.pytorch.num_gpus
if environment.WORLD_SIZE == 1
else (workload_size // (self.pytorch.num_nodes * sampler.batch_size)) * self.pytorch.num_nodes)
self.step_inputs = {
x: inputs[x].unsqueeze(0).repeat(padded_wsize, 1, 1)
for x in inputs
}
# This loop below is purely for proper printing reasons:
sample_text = set(
[self.tokenizer.tokensToString(
seq.cpu().numpy(), ignore_token = self.tokenizer.padToken
) for seq in inputs['input_ids']]
)
for seq in sample_text:
self.sampler.setStartText(seq)
self.sampler.Specialize(self.tokenizer)
return
def SampleNextIndices(
self, *unused_args, **unused_kwargs
) -> typing.Tuple[np.array, np.array, np.array, np.array]:
"""Called iteratively to build a single batch of samples, until termination criteria stops calling"""
del unused_kwargs
del unused_args
if self.sample is None:
raise ValueError("Incoder sampler has not been initialized.")
with self.torch.no_grad():
if self.sampler.is_active:
try:
return self.sample.data_generator.ActiveGeneration(self, self.sample)
except StopIteration:
raise StopIteration
else:
##!TODO: just call model's forward function. No need to do more.
step_out, time = self.sample_model_step(
self.sample.model,
self.step_inputs,
is_live = self.sampler.is_live
)
if self.pytorch.num_nodes > 1:
distrib.barrier()
generated_samples = [self.torch.zeros(tuple(step_out['generated_samples'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
sample_indices = [self.torch.zeros(tuple(step_out['sample_indices' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(generated_samples, step_out["generated_samples"])
self.torch.distributed.all_gather(sample_indices, step_out["sample_indices"])
raise NotImplementedError("This will not work because generated_samples and sample indices are lists and not tensors")
else:
generated_samples = step_out['generated_samples']
sample_indices = step_out['sample_indices']
if self.sampler.is_live and input("Show logits figure ? [y/!y]") == "y":
if self.pytorch.num_nodes > 1:
prediction_scores = [self.torch.zeros(tuple(step_out['prediction_scores'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
distrib.barrier()
self.torch.distributed.all_gather(prediction_scores, step_out["prediction_scores"])
else:
prediction_scores = step_out['prediction_scores'].cpu()
for hole, indcs in zip(prediction_scores, sample_indices):
plotter.LogitsStepsDistrib(
x = self.torch.nn.Softmax(dim = 1)(self.torch.FloatTensor(hole[:10])).numpy(),
atoms = [self.tokenizer.decoder[i] for i in range(self.tokenizer.vocab_size)],
sample_indices = [self.tokenizer.decoder[i] for i in indcs[0]][:10],
plot_name = "sampling_distrib",
title = "Sampling distribution dim 1",
x_name = "Probs / sample step",
)
return (
self.step_inputs['original_input'].cpu().view(-1, self.step_inputs['original_input'].shape[2]).numpy(),
self.step_inputs['input_ids'].cpu().view(-1, self.sampler.sequence_length).numpy(),
generated_samples,
sample_indices
)
class Incoder1B(Incoder):
"""
Specified class for 'small' 1B parameter Incoder.
"""
def __init__(self, *args, **kwargs):
kwargs["incoder_version"] = "facebook/incoder-1B"
super(Incoder1B, self).__init__(*args, **kwargs)
return
def __repr__(self) -> str:
return "Incoder1B"
class Incoder6B(Incoder):
"""
Specified class for regular 6B parameter Incoder.
"""
def __init__(self, *args, **kwargs):
kwargs["incoder_version"] = "facebook/incoder-6B"
super(Incoder6B, self).__init__(*args, **kwargs)
return
def __repr__(self) -> str:
return "Incoder6B"
| 17,189 | 41.339901 | 202 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/incoder/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import typing
import numpy as np
import math
import pathlib
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.samplers import samplers
from deeplearning.benchpress.proto import model_pb2
from deeplearning.benchpress.corpuses import corpuses
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import active_feed_database
from deeplearning.benchpress.models.torch_bert import data_generator as torch_data_generator
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
class IncoderDataGenerator(torch_data_generator.torchLMDataGenerator):
"""Data generator subclass designed for Incoder model."""
@classmethod
def TrainMaskLMBatchGenerator(cls,
corpus : corpuses.Corpus,
training_opts : model_pb2.TrainingOptions,
cache_path : pathlib.Path,
num_train_steps : int = None,
pre_train : bool = False,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> 'IncoderDataGenerator':
"""Initializes data generator for training."""
d = super(IncoderDataGenerator, IncoderDataGenerator()).TrainMaskLMBatchGenerator(
corpus, training_opts, cache_path, num_train_steps, pre_train,
feature_encoder, feature_tokenizer, feature_sequence_length,
)
return d
@classmethod
def SampleMaskLMBatchGenerator(cls,
model_opts : model_pb2.TrainingOptions,
sampler : samplers.Sampler,
tokenizer : tokenizers.TokenizerBase,
seed : int,
sample_batch_size : int,
max_position_embeddings : int,
cache_path : pathlib.Path,
corpus : corpuses.Corpus = None,
feature_encoder : bool = False,
feature_tokenizer : tokenizers.FeatureTokenizer = None,
feature_sequence_length : int = None,
) -> 'IncoderDataGenerator':
"""Initializes data generator for inference."""
d = super(IncoderDataGenerator, IncoderDataGenerator()).SampleMaskLMBatchGenerator(
model_opts, sampler, tokenizer, seed,
sample_batch_size, max_position_embeddings, cache_path, corpus,
feature_encoder, feature_tokenizer, feature_sequence_length
)
return d
def __init__(self):
super(IncoderDataGenerator, self).__init__()
return
def initOrGetQueue(self, target_features: typing.Dict[str, float] = None) -> np.array:
"""
If feed queue is not initialized, initialize it by getting new datapoint.
Otherwise, don't do anything as feed_queue is already loaded from checkpoint.
Adds datapoint to InputFeed table of database.
Returns:
Starting input feed of sampling.
"""
if not self.feed_queue:
if FLAGS.start_from_cached and target_features is not None:
cached_samples = [[x.sample, {':'.join(f.split(':')[:-1]): float(f.split(':')[-1]) for f in x.output_features.split('\n')}, -1] for x in self.active_db.get_data]
if len(cached_samples) == 0:
return self.initOrGetQueue()
else:
for idx, cs in enumerate(cached_samples):
cached_samples[idx][-1] = self.feat_sampler.calculate_distance(cs[1])
sorted_cache_samples = sorted(cached_samples, key = lambda x: x[-1])
for scs in sorted_cache_samples[:self.sampler.config.sample_corpus.corpus_config.active.active_search_width]:
tokenized = self.tokenizer.TokenizeString(scs[0])
padded = self._padToMaxPosition(tokenized)[:self.sampler.sequence_length]
if padded[0] == self.tokenizer.padToken:
l.logger().error("Pad token was found again at the beginning of the sequence.")
l.logger().error(scs[0])
l.logger().error(tokenized)
l.logger().error(padded)
encoded = self._padToMaxPosition([int(x) for x in tokenized])[:self.sampler.sequence_length]
assert encoded[0] != self.tokenizer.padToken, encoded
self.feed_queue.append(
torch_data_generator.ActiveSampleFeed(
input_feed = encoded,
input_features = scs[1],
input_score = scs[-1],
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = encoded, input_features = scs[1],
)
)
else:
try:
cf = next(self.loader).squeeze(0)
except StopIteration:
self.loader = iter(self.dataloader)
cf = next(self.loader).squeeze(0)
cf = [int(x) for x in cf]
assert cf[0] != self.tokenizer.padToken, cf
self.feed_queue.append(
torch_data_generator.ActiveSampleFeed(
input_feed = cf,
input_features = extractor.ExtractFeatures(self.tokenizer.ArrayToCode(cf), [self.feat_sampler.feature_space])[self.feat_sampler.feature_space],
input_score = math.inf,
gen_id = 0,
)
)
self.addToDB(
active_feed_database.ActiveInput.FromArgs(
tokenizer = self.tokenizer, id = self.active_db.input_count,
input_feed = cf, input_features = self.feed_queue[-1].input_features,
)
)
l.logger().info("Feed queue input scores: {}".format(', '.join([str(round(c.input_score, 3)) for c in self.feed_queue])))
return self.feed_queue[0].input_feed | 7,533 | 48.565789 | 169 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/models/tf_bert/model.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
from deeplearning.benchpress.util.tf import tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by the layer normalization layers.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.compat.v1.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.compat.v1.variable_scope(scope, default_name="bert"):
with tf.compat.v1.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob,
layer_norm_eps=config.layer_norm_eps,
)
with tf.compat.v1.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
layer_norm_eps=config.layer_norm_eps,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.compat.v1.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.compat.v1.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def _get_masked_lm_output(bert_config,
input_tensor,
output_weights,
positions,
label_ids,
label_weights
):
"""Get loss and log probs for the masked LM."""
input_tensor = _gather_indexes(input_tensor, positions)
with tf.compat.v1.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.compat.v1.variable_scope("transform"):
input_tensor = tf.compat.v1.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=get_activation(bert_config.hidden_act),
kernel_initializer=create_initializer(
bert_config.initializer_range))
input_tensor = layer_norm(input_tensor, eps = bert_config.layer_norm_eps)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.compat.v1.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def _get_next_sentence_output(bert_config,
input_tensor,
labels
):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.compat.v1.variable_scope("cls/seq_relationship"):
output_weights = tf.compat.v1.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=create_initializer(bert_config.initializer_range))
output_bias = tf.compat.v1.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def _gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, eps = 1e-12, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.keras.layers.LayerNormalization(
axis = -1, epsilon=eps, dtype=tf.float32, name = name
)(input_tensor)
def layer_norm_and_dropout(input_tensor, dropout_prob, eps = 1e-12, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, eps = eps, name = name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.compat.v1.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1,
layer_norm_eps=1e-12,
):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.compat.v1.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.debugging.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.compat.v1.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob, layer_norm_eps)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.compat.v1.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-12,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.compat.v1.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.compat.v1.variable_scope("attention"):
attention_heads = []
with tf.compat.v1.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.compat.v1.variable_scope("output"):
attention_output = tf.compat.v1.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input, eps = layer_norm_eps)
# The activation is only applied to the "intermediate" hidden layer.
with tf.compat.v1.variable_scope("intermediate"):
intermediate_output = tf.compat.v1.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.compat.v1.variable_scope("output"):
layer_output = tf.compat.v1.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output, eps = layer_norm_eps)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.compat.v1.get_variable_scope().name
raise ValueError("For the tensor `{}` in scope `{}`, the actual rank `{}` (shape = {}) is not equal to the expected rank `{}`"
.format(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| 42,818 | 38.464516 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/pytorch.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A wrapper module to include pytorch with some options"""
from absl import flags
import os
import datetime
from deeplearning.benchpress.util import gpu
from deeplearning.benchpress.util import environment
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"pt_cpu_only",
False,
"Do not use GPU/TPU in pytorch session."
)
import torch
try:
import torch_xla.core.xla_model
import torch_xla.debug.metrics
import torch_xla.distributed.parallel_loader
torch_xla = torch_xla.core.xla_model
torch_xla_met = torch_xla.debug.metrics
torch_ploader = torch_xla.distributed.parallel_loader
torch_tpu_available = True
except ImportError:
torch_tpu_available = False
offset_device = None
devices = None
device = None
num_gpus = None
num_nodes = None
initialized = False
def initPytorch() -> None:
global torch_tpu_available
global offset_device
global devices
global device
global num_gpus
global num_nodes
global initialized
if FLAGS.pt_cpu_only:
device = torch.device("cpu")
num_gpus = 0
num_nodes = 1
elif torch_tpu_available:
device = torch_xla.xla_device()
num_gpus = 0
elif environment.WORLD_SIZE == 1 and torch.cuda.is_available():
# if num_gpus is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
offset_device = torch.device("cuda:0")
device = torch.device("cuda:0")
available_gpus = gpu.getGPUID()
devices = ["cuda:{}".format(str(x['id'])) for x in available_gpus]
num_nodes = 1
num_gpus = torch.cuda.device_count()
if device.type == "cuda":
torch.cuda.set_device(device)
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs.
# This branch will trigger DistributedDataParalel instead of simple DP.
# Distributed training prohibits manual selection of GPUs and takes for granted that cuda is available.
ddp_backend = "nccl" if torch.cuda.is_available() else "gloo"
tcp_store = torch.distributed.TCPStore(
environment.MASTER_ADDR,
environment.MASTER_PORT,
environment.WORLD_SIZE,
environment.WORLD_RANK == 0
)
torch.distributed.init_process_group(
backend = ddp_backend,
store = tcp_store,
rank = environment.WORLD_RANK,
world_size = environment.WORLD_SIZE,
timeout = datetime.timedelta(days = 3)
)
num_nodes = torch.distributed.get_world_size()
num_gpus = torch.cuda.device_count()
if num_gpus == 0:
device = torch.device('cpu', environment.LOCAL_RANK)
offset_device = torch.device('cpu', environment.LOCAL_RANK)
else:
device = torch.device("cuda", environment.LOCAL_RANK)
offset_device = torch.device("cuda", environment.LOCAL_RANK)
torch.cuda.set_device(device)
initialized = True
return | 3,833 | 32.33913 | 107 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/util/distrib.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cluster node handling for Distributed model training and sampling"""
import glob
import os
import sys
import pickle
import time
import pathlib
import typing
import functools
import tqdm
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.util import pytorch
torch = pytorch.torch
MASTER_PORT = environment.MASTER_PORT
MASTER_ADDR = environment.MASTER_ADDR
LOCAL_RANK = environment.LOCAL_RANK
WORLD_RANK = environment.WORLD_RANK
WORLD_SIZE = environment.WORLD_SIZE
PATH = None
LOCK_TYPES = [
'barrier-lock-',
'barrier-escape-',
'critical-lock-',
'index-',
'msg-'
]
def barrier(fn: typing.Callable = None) -> None:
"""
Node processes are blocked until all nodes have reached this checkpoint.
!!Warning!!: This function must not be called under a child process or thread.
"""
if environment.WORLD_SIZE > 1:
if pytorch.num_gpus > 0:
torch.distributed.barrier(device_ids = [environment.LOCAL_RANK])
else:
torch.distributed.barrier()
return
else:
return
# if WORLD_SIZE > 1:
# if PATH is None:
# raise FileNotFoundError("Distributed env path has not been set!")
# with open(PATH / "barrier-lock-{}".format(WORLD_RANK), 'w') as outf:
# outf.write("{}\n".format(WORLD_RANK))
# outf.flush()
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# while len(barriers) < WORLD_SIZE:
# if fn:
# fn()
# time.sleep(0.5)
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# with open(PATH / "barrier-escape-{}".format(WORLD_RANK), 'w') as outf:
# outf.write("{}\n".format(WORLD_RANK))
# outf.flush()
# while len(barriers) > 0:
# barriers = glob.glob(str(PATH / "barrier-lock-*"))
# escapes = glob.glob(str(PATH / "barrier-escape-*"))
# if WORLD_RANK == 0 and len(escapes) == WORLD_SIZE:
# for be in escapes:
# os.remove(str(be))
# for b in barriers:
# os.remove(str(b))
# else:
# time.sleep(0.2)
# time.sleep(0.5)
return
def lock() -> None:
"""
#####!!!! DEPRECATED. WILL BE REMOVED SOON.
Acquire lockfile to proceed to critical section.
"""
## Corner-case where no DDP is used.
if WORLD_SIZE == 1:
return
## Busy waiting to acquire lock.
while len(glob.glob(str(PATH / "critical-lock-*"))) > 0:
time.sleep(0.5)
## Register lockfile.
if (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
raise ValueError("Node {} lock already exists.".format(WORLD_RANK))
with open(PATH / "critical-lock-{}".format(WORLD_RANK), 'w') as outf:
outf.write("{}\n".format(WORLD_RANK))
outf.flush()
## Maybe more than one processes are here already. Prioritize by id.
## Unlock and Re-lock if you are not the minimum privileged id.
locks = glob.glob(str(PATH / "critical-lock-*"))
if len(locks) > 1:
min_id = min([int(x.split('critical-lock-')[-1]) for x in locks])
if WORLD_RANK != min_id:
unlock()
lock()
return
def unlock() -> None:
"""
#####!!!! DEPRECATED. WILL BE REMOVED SOON.
Release node lock.
"""
if WORLD_SIZE == 1:
return
if not (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
raise FileNotFoundError("Node {} lock missing.".format(WORLD_RANK))
exc_counter = 0
while (PATH / "critical-lock-{}".format(WORLD_RANK)).exists():
try:
os.remove(PATH / "critical-lock-{}".format(WORLD_RANK))
except FileNotFoundError as e:
exc_counter += 1
if exc_counter > 500:
raise e
time.sleep(0.5)
return
def broadcast(msg: str = None) -> None:
"""
Node broadcasts a message to all other nodes.
This function is not process-safe. User must ensure one node calls it
and all reads have been complete before re-writing.
"""
if environment.WORLD_SIZE == 1:
return msg
msg = [msg] * environment.WORLD_SIZE
torch.distributed.broadcast_object_list(msg, src = 0)
return msg[0]
def get_consistent(msg: typing.Any) -> typing.Any:
"""
All nodes become consistent on a set of discrete chunks of data.
All nodes must get updated with the same merged blob.
"""
if environment.WORLD_SIZE == 1:
return msg
consistent_array = [None for _ in range(environment.WORLD_SIZE)]
torch.distributed.all_gather_object(consistent_array, [msg])
return [i for rank in consistent_array for i in rank[0]]
def init(path: pathlib.Path) -> None:
"""
Initialize parent directory for distrib coordination.
"""
global PATH
if isinstance(path, str):
PATH = pathlib.Path(path).resolve()
else:
PATH = path
cleanup()
return
def cleanup() -> None:
"""
Cleanup any distributed lock files used.
"""
for tp in LOCK_TYPES:
for f in glob.glob(str(PATH / "{}{}".format(tp, WORLD_RANK))):
os.remove(f)
barrier()
return
class ProgressBar(object):
"""
Creates a distributed progressbar.
All nodes write their current index to a distinct file.
Only master node reads the indices and updates the progressbar.
"""
def __init__(self, total: int, offset: int, desc: str = ""):
self.total = total
self.offset = offset
self.path = PATH
self.n = 0 # tqdm compatibility getter.
if self.path is None:
raise FileNotFoundError("Distributed env path has not been set!")
if WORLD_RANK == 0:
self.bar = tqdm.tqdm(total = total, desc = desc, leave = True)
return
def _fetch_indices(self, idx: int) -> int:
"""
Master node reads current workload indices of all nodes.
"""
total = idx - self.offset
for n in range(1, WORLD_SIZE):
if (self.path / "index-{}".format(n)).exists():
try:
with open(self.path / "index-{}".format(n), 'r') as inf:
total += int(inf.read())
except Exception:
pass
return total
def _write_index(self, idx: int) -> None:
"""
Update personal node dictionary with current index.
"""
with open(self.path / "index-{}".format(WORLD_RANK), 'w') as outf:
outf.write(str(idx - self.offset))
outf.flush()
return
def update(self, idx: int, flush: bool = False) -> None:
"""
Master node updates the bar,
slave nodes update their indices.
"""
if (idx - self.offset) % 100 == 0 or flush:
if WORLD_RANK == 0:
total_idx = self._fetch_indices(idx)
self.bar.update(total_idx - self.bar.n)
self.bar.refresh()
else:
self._write_index(idx)
return
def finalize(self, idx: int) -> None:
"""
Do a final bar update and cleanup progressbar object.
"""
fn = functools.partial(self.update, idx = idx, flush = True)
barrier(fn)
if WORLD_RANK == 0:
indices = glob.glob(str(PATH / "index-*"))
for ip in indices:
os.remove(str(ip))
self.bar.close()
return
| 7,491 | 28.380392 | 80 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/feature_transformer.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Experimental transformer for feature space mapping.
"""
import math
import typing
import copy
import time
import pathlib
import typing
import tqdm
import multiprocessing
import pickle
from absl import app
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.corpuses import encoded
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import distributions
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.experiments import workers
torch = pytorch.torch
ENCODED_DB_PATH = "/home/foivos/unique_encoded.db"
TOKENIZER_PATH = "/home/foivos/backup_tokenizer.pkl"
class TransformerModel(torch.nn.Module):
def __init__(self, ntoken: int, d_model: int, nhead: int, d_hid: int,
nlayers: int, pad_idx, dropout: float = 0.5):
super().__init__()
self.model_type = 'Transformer'
self.embed = torch.nn.Embedding(ntoken, d_model, padding_idx = pad_idx)
self.pos_encoder = PositionalEncoding(d_model, dropout)
self.target_embed = torch.nn.Embedding(ntoken, d_model)
self.target_pos_encoder = PositionalEncoding(d_model, dropout)
self.d_model = d_model
encoder_layers = torch.nn.TransformerEncoderLayer(d_model, nhead, d_hid, dropout, batch_first = True)
encoder_norm = torch.nn.LayerNorm(d_model, eps=1e-5)
self.transformer_encoder = torch.nn.TransformerEncoder(encoder_layers, nlayers, encoder_norm)
decoder_layer = torch.nn.TransformerDecoderLayer(d_model, nhead, d_hid, dropout, batch_first = True)
decoder_norm = torch.nn.LayerNorm(d_model, eps=1e-5)
self.transformer_decoder = torch.nn.TransformerDecoder(decoder_layer, nlayers, decoder_norm)
self.linear = torch.nn.Linear(d_model, ntoken)
self.init_weights()
def init_weights(self) -> None:
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.zero_()
# self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src: torch.Tensor, target: torch.Tensor, src_mask: torch.Tensor = None, src_key_padding_mask = None) -> torch.Tensor:
"""
Args:
src: Tensor, shape [seq_len, batch_size]
src_mask: Tensor, shape [seq_len, seq_len]
Returns:
output Tensor of shape [seq_len, batch_size, ntoken]
"""
src1 = self.embed(src) * math.sqrt(self.d_model)
src2 = self.pos_encoder(src1)
output1 = self.transformer_encoder(src2, mask = src_mask, src_key_padding_mask = src_key_padding_mask)
tgt1 = self.embed(target) * math.sqrt(self.d_model)
tgt2 = self.pos_encoder(tgt1)
output2 = self.transformer_decoder(tgt2, output1)
output3 = self.linear(output2)
# print(src.shape)
# print(src1.shape)
# print(src2.shape)
# print(output1.shape)
# print(output2.shape)
# print(output3.shape)
# input()
return output3
class PositionalEncoding(torch.nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = torch.nn.Dropout(p=dropout)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x: Tensor, shape [seq_len, batch_size, embedding_dim]
"""
x = x + self.pe[:x.size(0)]
return self.dropout(x)
class FeatureDataset(torch.utils.data.Dataset):
def __init__(self, corpus: typing.List[typing.Dict[str, typing.Dict[str, float]]]) -> None:
self.corpus = corpus
self.feat_tokenizer = tokenizers.FeatureTokenizer.FromFeatures(768, 65536, 2048)
self.dataset = self.compute_dataset()
return
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx: int):
if idx < 0:
if -idx > len(self):
raise ValueError
idx = len(self) + idx
return self.dataset[idx]
def compute_dataset(self):
seq_len = 256
f_len = {
"GreweFeatures": 6,
"AutophaseFeatures": 56,
"InstCountFeatures": 70,
}
pad_len = seq_len - sum(list(f_len.values()))
dataset = []
for dp in self.corpus:
for fspace in {"GreweFeatures", "AutophaseFeatures", "InstCountFeatures"}:
inp = []
for n, x in dp[fspace].items():
if n not in {"F2:coalesced/mem", "F4:comp/mem"}:
try:
x = int(x)
except Exception:
continue
inp.append(self.feat_tokenizer.TokenizeFeature(int(x)))
assert len(inp) == f_len[fspace], len(inp)
target_feats = dp["AutophaseFeatures"]
target = []
for x in target_feats.values():
try:
x = int(x)
except Exception:
continue
target.append(self.feat_tokenizer.TokenizeFeature(int(x)))
assert len(target) == f_len["AutophaseFeatures"], len(target)
if fspace == "GreweFeatures":
d = {
'inputs' : torch.LongTensor(inp + [self.feat_tokenizer.padToken] * (f_len["AutophaseFeatures"] + f_len["InstCountFeatures"] + pad_len)),
'target' : torch.LongTensor(target)
}
elif fspace == "AutophaseFeatures":
d = {
'inputs' : torch.LongTensor([self.feat_tokenizer.padToken] * f_len["GreweFeatures"] + inp + [self.feat_tokenizer.padToken] * (f_len["InstCountFeatures"] + pad_len)),
'target' : torch.LongTensor(target)
}
else:
d = {
'inputs' : torch.LongTensor([self.feat_tokenizer.padToken] * (f_len["GreweFeatures"] + f_len["AutophaseFeatures"]) + inp + [self.feat_tokenizer.padToken] * pad_len),
'target' : torch.LongTensor(target)
}
d['padding_mask'] = d['inputs'] == self.feat_tokenizer.padToken
dataset.append(d)
return dataset
def generate_square_subsequent_mask(sz: int) -> torch.Tensor:
"""Generates an upper-triangular matrix of -inf, with zeros on diag."""
return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)
def data_process(raw_text_iter: torch.utils.data.dataset.IterableDataset) -> torch.Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data)))
def batchify(data: torch.Tensor, bsz: int) -> torch.Tensor:
"""Divides the data into bsz separate sequences, removing extra elements
that wouldn't cleanly fit.
Args:
data: Tensor, shape [N]
bsz: int, batch size
Returns:
Tensor of shape [N // bsz, bsz]
"""
seq_len = data.size(0) // bsz
data = data[:seq_len * bsz]
data = data.view(bsz, seq_len).t().contiguous()
return data.to(device)
def get_data_features(db, tokenizer, size_limit = None) -> typing.List[typing.Dict[str, typing.Dict[str, float]]]:
"""
Get or set feature with data list of tuples.
"""
datapoints = []
db_feats = db.get_data_features(tokenizer, size_limit)
for inp in tqdm.tqdm(db_feats, total = len(db_feats), desc = "Fetch data"):
feats = workers.ContentFeat(inp)
if len(inp) == 2:
src, _ = inp
include = ""
else:
src, include, _ = inp
try:
datapoints.append({
"GreweFeatures" : feats["GreweFeatures"],
"AutophaseFeatures" : feats["AutophaseFeatures"],
"InstCountFeatures" : feats["InstCountFeatures"],
})
except KeyError as e:
l.logger().warn(e)
return datapoints
def Train(feat_vecs):
size = len(feat_vecs)
train_data, val_data = feat_vecs[:(9 * size) // 10], feat_vecs[(9 * size) // 10:]
device = 'cuda'
num_epochs = 30
batch_size = 32
num_warmup_steps = 5000
learning_rate = 45 / 1e6
train_dataset = FeatureDataset(train_data)
val_dataset = FeatureDataset(val_data)
vocab_size = len(train_dataset.feat_tokenizer)
emsize = 64 # embedding dimension
d_hid = 128 # dimension of the feedforward network model in nn.TransformerEncoder
nlayers = 2 # number of nn.TransformerEncoderLayer in nn.TransformerEncoder
nhead = 2 # number of heads in nn.MultiheadAttention
dropout = 0.1 # dropout probability
model = TransformerModel(
vocab_size,
emsize,
nhead,
d_hid,
nlayers,
train_dataset.feat_tokenizer.padToken,
dropout
).to(device)
## Define dataloaders.
train_loader = torch.utils.data.dataloader.DataLoader(
dataset = train_dataset,
batch_size = batch_size,
sampler = torch.utils.data.RandomSampler(train_dataset, replacement = False),
num_workers = 0,
drop_last = False,
)
val_loader = torch.utils.data.dataloader.DataLoader(
dataset = val_dataset,
batch_size = batch_size,
sampler = torch.utils.data.RandomSampler(val_dataset, replacement = False),
num_workers = 0,
drop_last = False,
)
## Also create scheduler and optmizer.
opt, scheduler = optimizer.create_optimizer_and_scheduler(
model = model,
num_train_steps = (num_epochs * len(train_dataset)) // batch_size,
warmup_steps = num_warmup_steps,
learning_rate = learning_rate,
)
loss_fn = torch.nn.CrossEntropyLoss()
model.zero_grad()
hook_path = pathlib.Path("./feat_reconstruction").resolve()
hook_path.mkdir(exist_ok = True, parents = True)
train_hook = hooks.tensorMonitorHook(hook_path, 0, 50)
val_hook = hooks.tensorMonitorHook(pathlib.Path("./feat_reconstruction").resolve(), 0, 10)
for ep in tqdm.tqdm(range(num_epochs), desc = "Epoch", leave = False):
model.train()
for batch in tqdm.tqdm(train_loader, total = len(train_loader), desc = "Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device), src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
opt.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
opt.step()
scheduler.step()
train_hook.step(total_loss = loss.item())
l.logger().info("Epoch {} loss {}".format(ep, train_hook.epoch_loss))
train_hook.end_epoch()
model.eval()
for batch in tqdm.tqdm(train_loader, total = len(train_loader), desc = "Val Train Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device), src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
euclids = []
accuracy = []
for bi in range(output.size(0)):
raw_out = torch.argmax(output[bi], dim = 1).cpu()
targ = target[bi].cpu()
assert len(raw_out) == len(targ), "{}/{}".format(len(raw_out), len(targ))
dist = 0.0
for vi in range(len(targ)):
dist += (targ[vi] - raw_out[vi])**2
euclids.append(math.sqrt(dist))
accuracy.append(len(torch.where(targ == raw_out)[0]) / len(targ))
mean_dist = sum(euclids) / len(euclids)
mean_accuracy = sum(accuracy) / len(accuracy)
val_hook.step(val_train_loss = loss.item(), val_train_dist = mean_dist, val_train_accuracy = mean_accuracy)
for batch in tqdm.tqdm(val_loader, total = len(val_loader), desc = "Val Batch", leave = False):
inp, att, target = batch['inputs'], batch['padding_mask'], batch['target']
output = model(inp.to(device), target.to(device) ) #, src_key_padding_mask = att.to(device))
loss = loss_fn(output.view(-1, len(train_dataset.feat_tokenizer)), target.to(device).view(-1))
euclids = []
accuracy = []
for bi in range(output.size(0)):
raw_out = torch.argmax(output[bi], dim = 1).cpu()
targ = target[bi].cpu()
assert len(raw_out) == len(targ), "{}/{}".format(len(raw_out), len(targ))
dist = 0.0
for vi in range(len(targ)):
dist += (targ[vi] - raw_out[vi])**2
euclids.append(math.sqrt(dist))
accuracy.append(len(torch.where(targ == raw_out)[0]) / len(targ))
mean_dist = sum(euclids) / len(euclids)
mean_accuracy = sum(accuracy) / len(accuracy)
val_hook.step(val_loss = loss.item(), val_dist = mean_dist, val_accuracy = mean_accuracy)
return
def Validate(model, tokenizer, train_loader, val_loader):
return
def main(*args):
db = encoded.EncodedContentFiles(url = "sqlite:///{}".format(ENCODED_DB_PATH), must_exist = True)
tokenizer = tokenizers.TokenizerBase.FromFile(pathlib.Path(TOKENIZER_PATH).resolve())
feat_vecs = get_data_features(db, tokenizer)
Train(feat_vecs)
return
if __name__ == "__main__":
app.run(main)
| 13,876 | 35.518421 | 185 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/scratchpad/test_torch_sampler.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file defines the streaming generators for model training data.
We train models on overlapping one-hot encoded text sequences. For a corpus of
a reasonable size, the full training data may not fit in memory. This modules
provides Python Generator classes for use by a sequential Keras model's
fit_generator() method to stream batches of training data.
"""
import torch
WORLD_SIZE = 16
dataset_tensor = [0, 1, 2, 3]
cumulative_sizes = [100000, 200000, 300000, 400000]
dset_iter = iter(dataset_tensor)
def get_rand_tensor(epoch, dset_idx, world_rank):
# global dataset_tensor
# global dset_iter
# try:
# dataset_idx = next(dset_iter)
# except StopIteration:
# dset_iter = iter(dataset_tensor)
# dataset_idx = next(dset_iter)
dataset_idx = dset_idx
lb, ub = cumulative_sizes[dataset_idx - 1] if dataset_idx else 0, cumulative_sizes[dataset_idx]
bounds = (lb, ub)
generator = torch.Generator()
generator.manual_seed(epoch)
size = bounds[1] - bounds[0]
rand_tensor = [x + bounds[0] for x in torch.randperm(bounds[1] - bounds[0], generator = generator).tolist()]
rounded_total = (len(rand_tensor) // WORLD_SIZE) * WORLD_SIZE
# print(rounded_total, rand_tensor, world_rank, rounded_total, WORLD_SIZE)
rand_tensor = rand_tensor[world_rank:rounded_total:WORLD_SIZE]
return rand_tensor
for y in range(20):
idx = y
l1, l2, l3, l4 = get_rand_tensor(0, idx%4, 0), get_rand_tensor(0, idx%4, 1), get_rand_tensor(0, idx%4, 2), get_rand_tensor(0, idx%4, 3)
visited = set()
for x in l1 + l2 + l3 + l4:
if x in visited:
print(visited)
print(x)
raise ValueError("Ton ipiame!")
else:
visited.add(x)
print("Ok")
| 2,289 | 33.179104 | 137 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/data_generator.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data generators for active learning committee.
"""
import typing
import copy
import pathlib
import numpy as np
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
class ListTrainDataloader(torch.utils.data.Dataset):
"""
Modular dataloading class for downstream tasks.
"""
def __init__(self,
dataset : typing.List[typing.Tuple[typing.List, typing.List]],
lazy : bool = False,
):
super(ListTrainDataloader, self).__init__()
## The dataset here should be a list, and each entry
## must be a tuple containing the input and the target vector.
if len(dataset) <= 0 and not lazy:
l.logger().warn("Active learning committee dataset is empty. Make sure this is expected behavior.")
self.compute_dataset(dataset)
return
def compute_dataset(self, dataset) -> None:
"""
Convert list dataset to torch tensors.
"""
self.dataset = []
for dp in dataset:
if len(dp) == 2:
inp, targ = dp
self.dataset.append(
{
'input_ids' : torch.FloatTensor(inp),
'target_ids': torch.LongTensor(targ),
}
)
elif len(dp) == 3:
inp, targ, idx = dp
self.dataset.append(
{
'input_ids' : torch.FloatTensor(inp),
'target_ids': torch.LongTensor(targ),
'idx' : torch.LongTensor(idx),
}
)
return
def get_batched_dataset(self) -> typing.Dict[str, np.array]:
"""
Batch the whole dataset by keys and return it.
"""
return {
'input_ids' : np.asarray([x['input_ids'].numpy() for x in self.dataset]),
'target_ids' : np.asarray([x['target_ids'].numpy() for x in self.dataset]),
}
def get_random_subset(self, num: int, seed: int = None) -> 'ListTrainDataloader':
"""
Get a sample of num random samples from dataset.
"""
ret = ListTrainDataloader([], lazy = True)
num = min(num, len(self.dataset))
if seed:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
rand = set(torch.randperm(len(self.dataset), generator = None).tolist()[:num])
ret.dataset = [x for idx, x in enumerate(self.dataset) if idx in rand]
return ret
def get_sliced_subset(self, l: int = None, r: int = None) -> 'ListTrainDataloader':
"""
Implement slice operation of current List Dataset.
"""
ret = ListTrainDataloader([], lazy = True)
if l is None:
l = 0
if r is None:
r = len(self.dataset)
ret.dataset = self.dataset[l:r]
return ret
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
return self.dataset[idx]
def __add__(self, dl: 'ListTrainDataloader') -> 'ListTrainDataloader':
ret = ListTrainDataloader([], lazy = True)
ret.dataset = copy.copy(self.dataset)
if dl:
ret.dataset += dl.dataset
return ret
class DictPredictionDataloader(torch.utils.data.Dataset):
"""
Dataloading class that takes datapoint dictionary.
"""
def __init__(self,
dataset: typing.List[typing.Dict[str, typing.List]],
lazy : bool = False,
):
super(DictPredictionDataloader, self).__init__()
if len(dataset) <= 0 and not lazy:
raise ValuError("Sample dataset is empty.")
self.compute_dataset(dataset)
return
def compute_dataset(self,
dataset: typing.List[typing.Dict[str, typing.List]]
) -> None:
"""
Batch the whole dataset by keys and return it.
"""
self.dataset = []
for idx, dp in enumerate(dataset):
self.dataset.append(
{
'idx' : torch.LongTensor([idx]),
'static_features' : torch.FloatTensor(dp['static_features']),
'runtime_features' : torch.LongTensor(dp['runtime_features']),
'input_ids' : torch.FloatTensor(dp['input_ids']),
}
)
return
def get_batched_dataset(self) -> typing.Dict[str, np.array]:
"""
Batch the whole dataset by keys and return it.
"""
return {
'idx' : np.asarray([x['idx'].numpy() for x in self.dataset]),
'static_features' : np.asarray([x['static_features'].numpy() for x in self.dataset]),
'runtime_features' : np.asarray([x['runtime_features'].numpy() for x in self.dataset]),
'input_ids' : np.asarray([x['input_ids'].numpy() for x in self.dataset]),
}
def get_random_subset(self, num: int, seed: int = None) -> 'DictPredictionDataloader':
"""
Get a sample of num random samples from dataset.
"""
ret = DictPredictionDataloader([], lazy = True)
num = min(num, len(self.dataset))
if seed:
generator = torch.Generator()
generator.manual_seed(seed)
else:
generator = None
rand = set(torch.randperm(len(self.dataset), generator = generator).tolist()[:num])
ret.dataset = [x for idx, x in enumerate(self.dataset) if idx in rand]
return ret
def get_sliced_subset(self, l: int = None, r: int = None) -> 'DictPredictionDataloader':
"""
Implement slice operation of current List Dataset.
"""
ret = DictPredictionDataloader([], lazy = True)
if l is None:
l = 0
if r is None:
r = len(self.dataset)
ret.dataset = self.dataset[l:r]
return ret
def __len__(self) -> int:
return len(self.dataset)
def __getitem__(self, idx: int) -> typing.Dict[str, torch.Tensor]:
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
return self.dataset[idx]
def __add__(self, dl: 'DictPredictionDataloader') -> 'DictPredictionDataloader':
ret = DictPredictionDataloader([], lazy = True)
ret.dataset = copy.copy(self.dataset)
if dl:
ret.dataset += dl.dataset
return ret
| 6,852 | 31.478673 | 105 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/active_models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Active Learning feature space models."""
import pathlib
import typing
from deeplearning.benchpress.util import pbutil
from deeplearning.benchpress.util import commit
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.active_models import downstream_tasks
from deeplearning.benchpress.active_models.committee import active_committee
from deeplearning.benchpress.active_models.committee import config as com_config
from deeplearning.benchpress.active_models.expected_error_reduction import eer
from deeplearning.benchpress.active_models.expected_error_reduction import config as eer_config
from deeplearning.benchpress.proto import active_learning_pb2
from absl import flags
from deeplearning.benchpress.util import logging as l
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
"disable_active_learning",
False,
"Set True to disable active learner from learning feature space."
"All candidate feature vectors have equal likelihood of being important"
)
flags.DEFINE_integer(
"num_active_samples",
256,
"Select number of points you want to sample with active learner."
)
def AssertConfigIsValid(config: active_learning_pb2.ActiveLearner) -> active_learning_pb2.ActiveLearner:
"""
Parse proto description and check for validity.
"""
pbutil.AssertFieldConstraint(
config,
"downstream_task",
lambda x: x in downstream_tasks.TASKS,
"Downstream task has to be one of {}".format(', '.join([str(x) for x in downstream_tasks.TASKS]))
)
pbutil.AssertFieldIsSet(config, "training_corpus")
pbutil.AssertFieldIsSet(config, "num_train_steps")
pbutil.AssertFieldIsSet(config, "random_seed")
if config.downstream_task in {"Grewe", "FeatureLessGrewe"}:
pbutil.AssertFieldIsSet(config, "top_k")
p = pathlib.Path(config.training_corpus).resolve()
if not p.exists() and config.num_train_steps > 0:
raise FileNotFoundError(p)
if config.HasField("query_by_committee"):
com_config.AssertConfigIsValid(config.query_by_committee)
elif config.HasField("expected_error_reduction"):
eer_config.AssertConfigIsValid(config.expected_error_reduction)
else:
raise NotImplementedError(config)
return config
class Model(object):
"""Predictive models for active learning.
Please note model instances should be treated as immutable. Upon
instantiation, a model's properties are used to determine its hash. If you
modify a property after instantiation, the hash will be out of date, which
can lead to bad things happening.
"""
def __init__(self,
config : active_learning_pb2.ActiveLearner,
cache_path : pathlib.Path,
):
"""Instantiate a model.
Args:
config: A Model message.
Raises:
TypeError: If the config argument is not a Model proto.
UserError: In case on an invalid config.
"""
# Error early, so that a cache isn't created.
if not isinstance(config, active_learning_pb2.ActiveLearner):
t = type(config).__name__
raise TypeError(f"Config must be an ActiveLearner proto. Received: '{t}'")
self.config = active_learning_pb2.ActiveLearner()
# Validate config options.
self.config.CopyFrom(AssertConfigIsValid(config))
self.cache_path = cache_path / "active_model"
if environment.WORLD_RANK == 0:
self.cache_path.mkdir(exist_ok = True, parents = True)
(self.cache_path / "samples").mkdir(exist_ok = True)
distrib.barrier()
(self.cache_path / "downstream_task").mkdir(exist_ok = True, parents = True)
self.downstream_task = downstream_tasks.DownstreamTask.FromTask(
self.config.downstream_task,
pathlib.Path(self.config.training_corpus).resolve(),
self.cache_path / "downstream_task",
self.config.random_seed,
top_k = self.config.top_k if self.config.HasField("top_k") else None,
test_db = pathlib.Path(self.config.test_db).resolve() if self.config.HasField("test_db") else None
)
if environment.WORLD_RANK == 0:
## Store current commit
commit.saveCommit(self.cache_path)
if self.config.HasField("query_by_committee"):
self.backend = active_committee.QueryByCommittee(self.config, self.cache_path, self.downstream_task)
elif self.config.HasField("expected_error_reduction"):
self.backend = eer.ExpectedErrorReduction(self.config, self.cache_path, self.downstream_task)
l.logger().info("Initialized {} in {}".format(self.backend, self.cache_path))
return
def Train(self, **kwargs) -> "Model":
"""Train the model.
Returns:
The model instance.
Raises:
UnableToAcquireLockError: If the model is locked (i.e. there is another
process currently modifying the model).
"""
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip training.")
else:
self.backend.Train(**kwargs)
return self
def UpdateLearn(self, update_dataloader: 'torch.utils.data.Dataset') -> None:
"""
Train-update active learner with new generated datapoints.
"""
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip update training.")
else:
self.Train(update_dataloader = update_dataloader)
return
def Sample(self, num_samples: int = None) -> typing.List[typing.Dict[str, float]]:
"""
Sample the active learner.
Knowing a downstream task, the active learning model samples
and returns the datapoints that are deemed valuable.
"""
sample_set = self.downstream_task.sample_space(num_samples = FLAGS.num_active_samples if num_samples is None else num_samples)
if FLAGS.disable_active_learning:
l.logger().warn("Active learning has been disabled. Skip update training.")
l.logger().warn("This is passive learning mode to illustrate AL's significance.")
l.logger().warn("Instead of querying, a random datapoint is returned.")
return [
{
'idx' : int(x['idx']),
'static_features' : self.downstream_task.VecToStaticFeatDict(x['static_features'].numpy()),
'runtime_features': self.downstream_task.VecToRuntimeFeatDict(x['runtime_features'].numpy()),
'input_features' : self.downstream_task.VecToInputFeatDict(x['input_ids'].numpy()),
} for x in
sample_set.get_random_subset(num = len(sample_set), seed = self.config.random_seed).dataset
]
else:
return self.backend.Sample(sample_set = sample_set)
def SamplerCache(self, sampler: 'samplers.Sampler') -> pathlib.Path:
"""Get the path to a sampler cache.
Args:
sampler: A Sampler instance.
Returns:
A path to a directory. Note that this directory may not exist - it is
created only after a call to Sample().
"""
return self.cache_path / "samples" / sampler.hash
@property
def is_trained(self) -> bool:
return self.backend.is_trained
| 7,599 | 37.77551 | 130 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/downstream_tasks.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module specifies the range of available
downstream tasks that the committee can be trained on.
The input and output features per downstream task are defined.
"""
import pathlib
import pickle
import math
import functools
import typing
import tqdm
import copy
import multiprocessing
import time
import numpy as np
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models import downstream_data
from deeplearning.benchpress.experiments import cldrive
from deeplearning.benchpress.features import extractor
from deeplearning.benchpress.features import grewe
from deeplearning.benchpress.features import hidden_state
from deeplearning.benchpress.corpuses import tokenizers
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import http_server
from deeplearning.benchpress.util import crypto
from deeplearning.benchpress.util import logging as l
from deeplearning.benchpress.models.torch_bert.data_generator import JSON_to_ActiveSample
from deeplearning.benchpress.models.torch_bert.data_generator import ActiveSample_to_JSON
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"server_tokenizer",
None,
"Set path for tokenizer to be used by downstream server."
)
flags.DEFINE_string(
"server_cldrive_cache",
None,
"Set path for cldrive_cache to be used by downstream server."
)
flags.DEFINE_boolean(
"only_optimal_gsize",
False,
"If True, only the best matching global size to transferred_bytes will be executed. Otherwise, everything."
)
def ExtractorWorker(cldrive_entry: cldrive.CLDriveSample, fspace: str):
"""
Worker that extracts features and buffers cldrive entry, to maintain consistency
among multiprocessed data.
"""
features = extractor.ExtractFeatures(cldrive_entry.source, [fspace])
if fspace in features and features[fspace]:
return features[fspace], cldrive_entry
return None
class DownstreamTask(object):
"""
Downstream Task generic class.
"""
@classmethod
def FromTask(cls,
task : str,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
**kwargs
) -> "DownstreamTask":
return TASKS[task](corpus_path, cache_path, random_seed, **kwargs)
def __init__(self,
name : str,
cache_path : pathlib.Path,
task_type : typing.Callable,
random_seed : int,
use_as_server : bool
) -> None:
self.name = name
self.random_seed = random_seed
self.cache_path = cache_path
if environment.WORLD_RANK == 0 and not use_as_server:
self.downstream_data = downstream_data.DownstreamData(
"sqlite:///{}/downstream_data.db".format(cache_path),
task_type = task_type,
must_exist = False,
)
return
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
raise NotImplementedError("Abstract Class")
def saveCheckpoint(self) -> None:
raise NotImplementedError("Abstract Class")
def loadCheckpoint(self) -> None:
raise NotImplementedError("Abstract Class")
class GreweAbstract(DownstreamTask):
"""
An abstract class for Grewe CPU vs GPU -related downstream tasks.
"""
@property
def runtime_features_size(self) -> int:
return 2
@property
def static_features_size(self) -> int:
return len(self.static_features_labels)
@property
def output_size(self) -> int:
return 2
@property
def output_labels(self) -> typing.Tuple[str, str]:
return ["CPU", "GPU"]
@property
def output_ids(self) -> typing.Tuple[str, str]:
return [0, 1]
@property
def test_set(self) -> 'torch.Dataset':
if self.test_db:
if not self.test_dataset:
data = [x for x in self.test_db.get_valid_data(dataset = "GPGPU_benchmarks")]
features_iter = extractor.ExtractFeaturesIter([x.source for x in data], [self.feature_space])[self.feature_space]
test_data = []
for dp, features in tqdm.tqdm(zip(data, features_iter), total = len(data), desc = "Test Set"):
test_data.append(
(
self.InputtoEncodedVector(features, dp.transferred_bytes, dp.local_size),
[self.TargetLabeltoID(dp.status)],
[int(dp.id)],
)
)
self.test_dataset = data_generator.ListTrainDataloader(test_data)
self.saveCheckpoint()
return self.test_dataset
else:
return None
def __init__(self,
name : str,
cache_path : pathlib.Path,
task_type : typing.Callable,
random_seed : int,
top_k : int,
use_as_server : bool,
test_db : pathlib.Path = None,
) -> None:
super(GreweAbstract, self).__init__(
name,
cache_path,
task_type,
random_seed,
use_as_server,
)
if not use_as_server:
self.top_k = top_k
if test_db:
self.test_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(test_db)), must_exist = True)
else:
self.test_db = None
self.test_dataset = None
return
def setup_server(self) -> None:
"""
This is server mode.
In server mode, initialize the serving process.
"""
if environment.WORLD_RANK == 0:
self.cl_proc, self.work_flag, self.read_queue, self.write_queues, self.reject_queues = http_server.start_server_process()
return
def TargetIDtoLabels(self, id: int) -> str:
"""
Integer ID to label of predictive model.
"""
return {
0: "CPU",
1: "GPU",
}[id]
def TargetLabeltoID(self, label: str) -> int:
"""
Predictive label to ID.
"""
return {
"CPU": 0,
"GPU": 1,
}[label]
def TargetLabeltoEncodedVector(self, label: str) -> typing.List[int]:
"""
Label to target vector.
"""
return {
"CPU": [1, 0],
"GPU": [0, 1],
}[label]
def StaticFeatDictToVec(self, static_feats: typing.Dict[str, float]) -> typing.List[float]:
"""
Process grewe static features dictionary into list of floats to be passed as tensor.
"""
return [static_feats[key] for key in self.static_features_labels]
def VecToStaticFeatDict(self, feature_values: typing.List[float]) -> typing.Dict[str, float]:
"""
Process float vector of feature values to dictionary of features.
"""
return {key: val for key, val in zip(self.static_features_labels, feature_values)}
def VecToRuntimeFeatDict(self, runtime_values: typing.List[int]) -> typing.Dict[str, int]:
"""
Process runtime int values to runtime features dictionary.
"""
trb, ls = runtime_values
return {
'transferred_bytes' : int(trb),
'local_size' : int(ls),
}
def VecToInputFeatDict(self, input_ids: typing.List[float]) -> typing.Dict[str, float]:
"""
Convert to dictionary of predictive model input features.
"""
return {
k: v for k, v in zip(self.input_labels, input_ids)
}
def CollectSingleRuntimeFeature(self,
sample: 'ActiveSample',
tokenizer: 'tokenizers.TokenizerBase',
store_rejects: bool = False,
) -> typing.Tuple[typing.List['ActiveSample'], typing.List['ActiveSample']]:
"""
Overloaded function to compute runtime features for a single instance.
"""
def create_sample(s: 'ActiveSample', cached: cldrive.CLDriveSample, trb: int, gs: int) -> typing.List['ActiveSample']:
nrfeats = copy.deepcopy(s.runtime_features)
nrfeats['transferred_bytes'] = trb
nrfeats['global_size'] = int(2**gs)
nrfeats['label'] = cached.status
if nrfeats['label'] in {"CPU", "GPU"}:
nrfeats['cpu_transfer_ns'] = self.corpus_db.reduce_execution_times(cached.cpu_transfer_time_ns)
nrfeats['cpu_kernel_ns'] = self.corpus_db.reduce_execution_times(cached.cpu_kernel_time_ns)
nrfeats['gpu_transfer_ns'] = self.corpus_db.reduce_execution_times(cached.gpu_transfer_time_ns)
nrfeats['gpu_kernel_ns'] = self.corpus_db.reduce_execution_times(cached.gpu_kernel_time_ns)
return s._replace(runtime_features = nrfeats)
exp_tr_bytes = sample.runtime_features['transferred_bytes']
local_size = sample.runtime_features['local_size']
found = False
found_bytes = None
gsize = int(max(1, math.log2(local_size)))
opt_gsize = gsize
code = tokenizer.ArrayToCode(sample.sample)
new_samples = []
rejects = []
last_cached = None
while not found and gsize <= 20:
sha256 = crypto.sha256_str(code + "BenchPress" + str(2**gsize) + str(local_size))
if sha256 in self.corpus_db.status_cache:
cached = self.corpus_db.get_entry(code, "BenchPress", int(2**gsize), int(local_size))
else:
## If not cached, compute.
cached = self.corpus_db.update_and_get(
code,
sample.features,
"BenchPress",
global_size = int(2**gsize),
local_size = int(local_size),
num_runs = 10000,
timeout = 60,
)
if cached is not None and cached.status in {"CPU", "GPU"}:
## If element execution has succeeeded.
tr_bytes = cached.transferred_bytes
if FLAGS.only_optimal_gsize:
## only_optimal_size means you compute only one gsize combination.
## The one that falls closest to the targeted transferred_bytes.
if tr_bytes < exp_tr_bytes or found_bytes is None or abs(exp_tr_bytes - tr_bytes) < abs(exp_tr_bytes - found_bytes):
## If bytes still slide below than expected,
## OR bytes are more than expected but it's the first successful execution,
## OR if bytes just surpassed the expected tr bytes and the distance from target is closer than the previous tr_bytes,
## Then update the optimal global size and the found bytes.
opt_gsize = gsize
found_bytes = tr_bytes
last_cached = cached
if tr_bytes >= exp_tr_bytes:
## Set this to True only when you surpass the expected.
## Only then you can be sure that you got as close as possible to the optimal.
found = True
else:
s = create_sample(
s = sample,
cached = cached,
trb = tr_bytes,
gs = gsize
)
if s.runtime_features['label'] in {"CPU", "GPU"}:
new_samples.append(s)
elif store_rejects:
rejects.append(s)
elif cached is not None:
## If failed, store to rejects and set transferred bytes to None.
if store_rejects:
rejects.append(
create_sample(
s = sample,
cached = cached,
trb = exp_tr_bytes,
gs = gsize,
)
)
gsize += 1
if FLAGS.only_optimal_gsize:
## If only the optimal size is needed and the execution has succeeded,
## create a new copy of the sample
if found_bytes:
s = create_sample(sample, last_cached, found_bytes, opt_gsize)
if s.runtime_features['label'] in {"CPU", "GPU"}: ## This check is redundant, but better safe than sorry.
new_samples = [s]
elif store_rejects:
rejects.append(
s = sample,
cached = last_cached,
trb = exp_tr_bytes,
gs = gsize,
)
return new_samples, rejects
def CollectRuntimeFeatures(self,
samples : typing.List['ActiveSample'],
tokenizer : 'tokenizers.TokenizerBase',
) -> typing.List['ActiveSample']:
"""
Collect the top_k samples that can run on CLDrive and set their global size
to the appropriate value so it can match the transferred bytes.
Args:
samples:
List of Active Samples collected from LM inference.
tokenizer:
Tokenizer.
"""
if FLAGS.use_http_server:
## For server mode, master node, sleep while the backend is still working.
if environment.WORLD_RANK == 0:
new_samples = []
while int(http_server.client_status_request()[1]) >= 300: # While the backend is WORKING
## Backend is working.
time.sleep(2)
while int(http_server.client_status_request()[1]) != 200:
## While more samples.
new_samples += http_server.client_get_request()
time.sleep(1)
if environment.WORLD_SIZE > 1:
distrib.broadcast(new_samples)
else:
# Else synchronize with new data.
new_samples = distrib.broadcast()
distrib.barrier()
new_samples = [JSON_to_ActiveSample(x) for x in new_samples]
if self.top_k != -1:
## Return only the results that come from the top_k code samples.
top_k_codes = set()
return_samples = []
for s in sorted([x for x in new_samples if x.runtime_features['label'] in {"CPU", "GPU"}], key = lambda x: x.score):
key = ''.join([str(x) for x in s.sample])
if key not in top_k_codes and len(top_k_codes) < self.top_k:
top_k_codes.add(key)
return_samples.append(s)
elif key in top_k_codes:
return_samples.append(s)
l.logger().warn("Collected {} new samples from {} top_k code".format(len(return_samples), len(top_k_codes)))
return return_samples
else:
l.logger().warn("Collected {} new samples from http server".format(len(new_samples)))
return sorted([x for x in new_samples if x.runtime_features['label'] in {"CPU", "GPU"}], key = lambda x: x.score)
else:
## If not server mode, compute locally labels for each sample.
new_samples = []
total = 0
for sample in tqdm.tqdm(sorted(samples, key = lambda x: x.score), total = len(samples), desc = "CLDrive", leave = False):
ret, rej = self.CollectSingleRuntimeFeature(sample, tokenizer)
if len(ret) > 0:
total += 1
for s in ret:
if s.runtime_features['label'] in {"CPU", "GPU"}:
new_samples.append(s)
if self.top_k != -1 and total >= self.top_k:
return new_samples
return new_samples
def UpdateDataGenerator(self,
new_samples : typing.List['ActiveSample'],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> data_generator.ListTrainDataloader:
"""
Collect new generated samples, find their runtime features and processs to a torch dataset.
"""
new_samples = self.CollectRuntimeFeatures(new_samples, tokenizer)
self.UpdateDownstreamDatabase(new_samples, target_features, tokenizer)
updated_dataset = [
(
self.InputtoEncodedVector(entry.features,
entry.runtime_features['transferred_bytes'],
entry.runtime_features['local_size']
),
[self.TargetLabeltoID(entry.runtime_features['label'])]
) for entry in new_samples
]
if len(updated_dataset) == 0:
l.logger().warn("Update dataset is empty.")
return updated_dataset, data_generator.ListTrainDataloader(updated_dataset, lazy = True)
def UpdateTrainDataset(self, updated_dataloader: data_generator.ListTrainDataloader) -> None:
"""
After active learner has been updated, store updated samples to original train dataset.
"""
self.data_generator = self.data_generator + updated_dataloader
self.saveCheckpoint()
def step_generation(self, candidates: typing.List['ActiveSample']) -> None:
"""
End of LM generation's epoch hook.
"""
if FLAGS.use_http_server:
serialized = []
for cand in candidates:
serialized.append(
ActiveSample_to_JSON(cand)
)
http_server.client_put_request(serialized)
return
def ServeRuntimeFeatures(self, tokenizer: 'tokenizers.TokenizerBase') -> None:
"""
In server mode, listen to the read queue, collect runtime features,
append to local cache and publish to write queue for the client to fetch.
This has been easily implemented only for HTTP server and not socket.
"""
try:
while self.cl_proc.is_alive():
if not self.read_queue.empty():
self.work_flag.value = True
source, serialized = self.read_queue.get()
sample = JSON_to_ActiveSample(serialized)
ret, rej = self.CollectSingleRuntimeFeature(sample, tokenizer, store_rejects = True)
for x in ret:
self.write_queues[source].append(ActiveSample_to_JSON(x))
for x in rej:
self.reject_queues[source].append(ActiveSample_to_JSON(x))
else:
self.work_flag.value = False
time.sleep(1)
except KeyboardInterrupt:
pass
return
def saveCheckpoint(self) -> None:
"""
Store data generator.
"""
if environment.WORLD_RANK == 0:
with open(self.cache_path / "downstream_task_dg.pkl", 'wb') as outf:
pickle.dump(
{
'data_generator': self.data_generator,
'rand_generator': self.rand_generator.get_state(),
'test_dataset' : self.test_dataset,
},
outf
)
return
def loadCheckpoint(self) -> 'torch.Dataset':
"""
Load state of downstream task.
"""
if (self.cache_path / "downstream_task_dg.pkl").exists():
distrib.lock()
with open(self.cache_path / "downstream_task_dg.pkl", 'rb') as infile:
data = pickle.load(infile)
infile.close()
while not infile.closed:
time.sleep(1)
if environment.WORLD_SIZE > 1:
time.sleep(30)
distrib.unlock()
return data
else:
return None
class Grewe(GreweAbstract):
"""
Specification class for Grewe et al. CGO 2013 predictive model.
This class is responsible to fetch the raw data and act as a tokenizer
for the data. Reason is, the data generator should be agnostic of the labels.
"""
@property
def input_size(self) -> int:
return 4
@property
def static_features_labels(self) -> typing.List[str]:
return grewe.KEYS
@property
def input_labels(self) -> typing.List[str]:
return [
"tr_bytes/(comp+mem)",
"coalesced/mem",
"localmem/(mem+wgsize)",
"comp/mem"
]
@property
def feature_space(self) -> str:
return "GreweFeatures"
def __init__(self,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
top_k : int,
use_as_server : bool = False,
test_db : pathlib.Path = None,
**unused_kwargs,
) -> None:
del unused_kwargs
super(Grewe, self).__init__(
"Grewe",
cache_path,
downstream_data.GreweInstance,
random_seed,
top_k,
use_as_server,
test_db,
)
self.corpus_path = corpus_path
self.corpus_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(self.corpus_path)))
if use_as_server:
self.setup_server()
else:
## Setup random seed np random stuff
self.rand_generator = None
self.gen_bounds = {
'comp' : (1, 300),
'rational' : (0, 50),
'mem' : (1, 50),
'localmem' : (0, 50),
'coalesced' : (0, 10),
'atomic' : (0, 10),
'transferred_bytes': (1, 31), # 2**pow,
'local_size' : (1, 10), # 2**pow,
}
return
def __repr__(self) -> str:
return "Grewe"
def setup_dataset(self, num_train_steps: int = None) -> None:
"""
Fetch data and preprocess into corpus for Grewe's predictive model.
"""
checkpointed = self.loadCheckpoint()
if checkpointed:
self.data_generator = checkpointed['data_generator']
self.rand_generator = np.random.RandomState()
self.test_dataset = checkpointed['test_dataset']
self.rand_generator.set_state(checkpointed['rand_generator'])
self.dataset = self.data_generator.dataset
else:
self.rand_generator = np.random
self.rand_generator.seed(self.random_seed)
self.dataset = []
data = [x for x in self.corpus_db.get_valid_data(dataset = "GitHub")] ## TODO: Here you must get original training dataset instead of random github benchmarks.
pool = multiprocessing.Pool()
it = pool.imap_unordered(functools.partial(ExtractorWorker, fspace = self.feature_space), data)
idx = 0
try:
loop = tqdm.tqdm(it, total = len(data), desc = "Grewe corpus setup", leave = False) if environment.WORLD_RANK == 0 else it
for dp in loop:
if dp:
feats, entry = dp
self.dataset.append(
(
self.InputtoEncodedVector(feats, entry.transferred_bytes, entry.local_size),
[self.TargetLabeltoID(entry.status)]
)
)
idx += 1
# if idx >= 100:
# break
pool.close()
except Exception as e:
pool.terminate()
raise e
# pool.terminate()
if num_train_steps:
self.data_generator = data_generator.ListTrainDataloader(self.dataset[:num_train_steps])
else:
self.data_generator = data_generator.ListTrainDataloader(self.dataset)
self.saveCheckpoint()
return
def UpdateDownstreamDatabase(self,
new_samples : typing.List[typing.Dict[str, typing.Any]],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> None:
"""
Update exported database of downstream task.
"""
if environment.WORLD_RANK == 0:
cur_sample_ep = self.downstream_data.sampling_epoch
self.downstream_data.add_epoch(
new_samples, cur_sample_ep, target_features, tokenizer
)
distrib.barrier()
return
def sample_space(self, num_samples: int = 512) -> data_generator.DictPredictionDataloader:
"""
Go fetch Grewe Predictive model's feature space and randomly return num_samples samples
to evaluate. The predictive model samples are mapped as a value to the static features
as a key.
"""
samples = []
samples_hash = set()
for x in range(num_samples):
fvec = {
k: self.rand_generator.randint(self.gen_bounds[k][0], self.gen_bounds[k][1])
for k in self.static_features_labels if k not in {"F2:coalesced/mem", "F4:comp/mem"}
}
try:
fvec['F2:coalesced/mem'] = fvec['coalesced'] / fvec['mem']
except ZeroDivisionError:
fvec['F2:coalesced/mem'] = 0.0
try:
fvec['F4:comp/mem'] = fvec['comp'] / fvec['mem']
except ZeroDivisionError:
fvec['F4:comp/mem'] = 0.0
transferred_bytes = 2**self.rand_generator.randint(self.gen_bounds['transferred_bytes'][0], self.gen_bounds['transferred_bytes'][1])
local_size = 2**self.rand_generator.randint(self.gen_bounds['local_size'][0], self.gen_bounds['local_size'][1])
inp_ids = self.InputtoEncodedVector(fvec, transferred_bytes, local_size)
if str(inp_ids) not in samples_hash:
samples.append(
{
'static_features' : self.StaticFeatDictToVec(fvec),
'runtime_features' : [transferred_bytes, local_size],
'input_ids' : inp_ids,
}
)
samples_hash.add(str(inp_ids))
return data_generator.DictPredictionDataloader(samples)
def InputtoEncodedVector(self,
static_feats : typing.Dict[str, float],
transferred_bytes : int,
local_size : int,
) -> typing.List[float]:
"""
Encode consistently raw features to Grewe's predictive model inputs.
"""
try:
i1 = transferred_bytes / (static_feats['comp'] + static_feats['mem'])
except ZeroDivisionError:
i1 = 0.0
try:
i2 = static_feats['coalesced'] / static_feats['mem']
except ZeroDivisionError:
i2 = 0.0
try:
i3 = (static_feats['localmem'] / static_feats['mem']) * local_size
except ZeroDivisionError:
i3 = 0.0
try:
i4 = static_feats['comp'] / static_feats['mem']
except ZeroDivisionError:
i4 = 0.0
return [i1, i2, i3, i4]
class FeatureLessGrewe(GreweAbstract):
"""
A feature-less implementation of Grewe's CPU vs GPU model.
This task uses the language model's hidden outpus as features
instead of manually selecting the compiler features.
"""
@property
def input_size(self) -> int:
return self.static_features_size + self.runtime_features_size
@property
def static_features_labels(self) -> typing.List[str]:
return hidden_state.KEYS
@property
def input_labels(self) -> typing.List[str]:
return self.static_features_labels + ["transferred_bytes", "local_size"]
@property
def feature_space(self) -> str:
return "HiddenState"
def __init__(self,
corpus_path : pathlib.Path,
cache_path : pathlib.Path,
random_seed : int,
top_k : int,
use_as_server : bool = False,
test_db : pathlib.Path = None,
**unused_kwargs,
) -> None:
del unused_kwargs
super(FeatureLessGrewe, self).__init__(
"FeatureLessGrewe",
cache_path,
downstream_data.FeatureLessGreweInstance,
random_seed,
top_k,
use_as_server,
test_db,
)
self.corpus_path = corpus_path
self.corpus_db = cldrive.CLDriveExecutions(url = "sqlite:///{}".format(str(self.corpus_path)))
if use_as_server:
self.setup_server()
else:
## Setup random seed np random stuff
self.dataset = None
self.data_generator = None
self.rand_generator = None
self.gen_bounds = {
'transferred_bytes': (1, 31), # 2**pow,
'local_size' : (1, 10), # 2**pow,
}
return
def __repr__(self) -> str:
return "FeatureLessGrewe"
def setup_dataset(self, **kwargs) -> None:
"""
Function that initializes all initial data/data types needed for downstream task.
The predictive model will not be trained on initial data, therefore data generator
is initialized here as empty.
Test set is needed for this task, which will be the CSV file for the labelled
human written benchmarks. This is going to be the evaluator
"""
checkpointed = self.loadCheckpoint()
if checkpointed:
self.data_generator = checkpointed['data_generator']
self.rand_generator = np.random.RandomState()
self.rand_generator.set_state(checkpointed['rand_generator'])
self.test_dataset = checkpointed['test_dataset']
self.dataset = self.data_generator.dataset
else:
## For Expected Error Reduction, no human benchmarks are used for initial training.
self.data_generator = data_generator.ListTrainDataloader([])
self.dataset = []
self.rand_generator = np.random
self.rand_generator.seed(self.random_seed)
self.saveCheckpoint()
return
def sample_space(self, num_samples: int = 128) -> data_generator.DictPredictionDataloader:
"""
Go fetch the hidden state's feature space [1xhidden_state_size] where N~[0, 1] and
randomly return num_samples samples to evaluate. The predictive model samples are
mapped as a value to the static features as a key.
"""
samples = []
samples_hash = set()
for _ in range(num_samples):
random_values = self.rand_generator.uniform(-1, 1, self.static_features_size)
fvec = {
k: v
for k, v in zip(self.static_features_labels, random_values)
}
transferred_bytes = 2**self.rand_generator.randint(self.gen_bounds['transferred_bytes'][0], self.gen_bounds['transferred_bytes'][1])
local_size = 2**self.rand_generator.randint(self.gen_bounds['local_size'][0], self.gen_bounds['local_size'][1])
inp_ids = self.InputtoEncodedVector(fvec, transferred_bytes, local_size)
if str(inp_ids) not in samples_hash:
samples.append(
{
'static_features' : self.StaticFeatDictToVec(fvec),
'runtime_features' : [transferred_bytes, local_size],
'input_ids' : inp_ids,
}
)
samples_hash.add(str(inp_ids))
return data_generator.DictPredictionDataloader(samples)
def UpdateDownstreamDatabase(self,
new_samples : typing.List[typing.Dict[str, typing.Any]],
target_features : typing.Dict[str, float],
tokenizer : 'tokenizers.TokenizerBase',
) -> None:
"""
Update exported database of downstream task.
"""
if environment.WORLD_RANK == 0:
cur_sample_ep = self.downstream_data.sampling_epoch
extended_samples = []
memo = {}
for sample in new_samples:
key = ','.join([str(x) for x in sample.sample])
if key not in memo:
src = tokenizer.ArrayToCode(sample.sample)
memo[key] = extractor.ExtractFeatures(src, ["GreweFeatures"])["GreweFeatures"]
extended_samples.append((sample, memo[key]))
self.downstream_data.add_epoch(
extended_samples, cur_sample_ep, target_features, tokenizer
)
distrib.barrier()
return
def InputtoEncodedVector(self,
static_feats : typing.Dict[str, float],
transferred_bytes : int,
local_size : int,
) -> typing.List[float]:
"""
Encode consistently LM's hidden output features to Grewe's predictive model inputs.
"""
return [
static_feats[l] for l in self.static_features_labels
] + [math.log2(transferred_bytes), math.log2(local_size)]
def VecToRuntimeFeatDict(self, runtime_values: typing.List[int]) -> typing.Dict[str, int]:
"""
Process runtime int values to runtime features dictionary.
"""
trb, ls = runtime_values
return {
'transferred_bytes' : int(trb),
'local_size' : int(ls),
}
TASKS = {
"Grewe" : Grewe,
"FeatureLessGrewe" : FeatureLessGrewe,
}
def main(*args, **kwargs) -> None:
if FLAGS.server_tokenizer is None:
raise ValueError("Please define --server_tokenizer")
if FLAGS.server_cldrive_cache is None:
raise ValueError("Please define --server_cldrive_cache")
tokenizer_path = pathlib.Path(FLAGS.server_tokenizer).resolve()
cldrive_cache = pathlib.Path(FLAGS.server_cldrive_cache).resolve()
if not tokenizer_path.exists():
raise FileNotFoundError(tokenizer_path)
# if not cldrive_cache.exists():
# raise FileNotFoundError(cldrive_cache)
if not FLAGS.use_http_server and not FLAGS.use_socket_server:
raise ValueError("This booting point is supposed to work as server. Set your flags appropriately.")
tokenizer = tokenizers.TokenizerBase.FromFile(tokenizer_path)
task = DownstreamTask.FromTask("FeatureLessGrewe", cldrive_cache, "/tmp/", 0, top_k = -1, use_as_server = True)
task.ServeRuntimeFeatures(tokenizer)
return
if __name__ == "__main__":
app.run(main)
exit()
| 32,769 | 35.451613 | 165 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/active_committee.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Array of NN models used for Active Learning Query-By-Committee.
This module handles
a) the passive training of the committee,
b) the confidence level of the committee for a datapoint (using entropy)
"""
import typing
import datetime
import tqdm
import pathlib
import pickle
import copy
import math
import copy
import numpy as np
from deeplearning.benchpress.models.torch_bert import optimizer
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.active_models import backends
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models.committee import models
from deeplearning.benchpress.active_models.committee import config
from deeplearning.benchpress.active_models.committee import committee_database
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
class QueryByCommittee(backends.BackendBase):
class TrainingOpts(typing.NamedTuple):
"""Wrapper class for training options"""
train_batch_size : int
learning_rate : float
num_warmup_steps : int
max_grad_norm : float
steps_per_epoch : int
num_epochs : int
num_train_steps : int
n_clusters : int
init : str
n_init : int
max_iter : int
tol : float
algorithm : str
n_neighbors : int
weights : str
leaf_size : int
p : float
class CommitteeEstimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : 'torch.utils.data.Dataset'
optimizer : typing.Any
scheduler : typing.Any
training_opts : 'TrainingOpts'
sha256 : str
config : config.ModelConfig
train_fn : typing.Callable
sample_fn : typing.Callable
def __repr__(self):
return "QueryByCommittee"
def __init__(self, *args, **kwargs):
super(QueryByCommittee, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.random_seed)
self.torch.cuda.manual_seed_all(self.config.random_seed)
self.ckpt_path = self.cache_path / "checkpoints"
self.sample_path = self.cache_path / "samples"
self.logfile_path = self.cache_path / "logs"
self.validation_results_file = "val_results.txt"
self.validation_results_path = self.logfile_path / self.validation_results_file
self.committee = None
self.is_validated = False
self.is_trained = False
self.committee_samples = committee_database.CommitteeSamples(
url = "sqlite:///{}".format(str(self.sample_path / "samples.db")),
must_exist = False,
)
self.sample_epoch = self.committee_samples.cur_sample_epoch
l.logger().info("Active Committee config initialized in {}".format(self.cache_path))
return
def _ConfigModelParams(self,
data_generator : 'torch.utils.data.Dataset' = None,
is_sampling : bool = False
) -> None:
"""
Model parameter initialization.
"""
if not self.committee:
self.committee = []
self.committee_configs = config.ModelConfig.FromConfig(
self.config.query_by_committee, self.downstream_task, self.config.num_train_steps
)
for idx, cconfig in enumerate(self.committee_configs):
training_opts = QueryByCommittee.TrainingOpts(
train_batch_size = cconfig.batch_size,
learning_rate = cconfig.learning_rate,
num_warmup_steps = cconfig.num_warmup_steps,
max_grad_norm = cconfig.max_grad_norm,
steps_per_epoch = cconfig.steps_per_epoch,
num_epochs = cconfig.num_epochs,
num_train_steps = cconfig.num_train_steps,
n_clusters = cconfig.n_clusters,
init = cconfig.init,
n_init = cconfig.n_init,
max_iter = cconfig.max_iter,
tol = cconfig.tol,
algorithm = cconfig.algorithm,
n_neighbors = cconfig.n_neighbors,
weights = cconfig.weights,
leaf_size = cconfig.leaf_size,
p = cconfig.p,
)
cm = models.CommitteeModels.FromConfig(idx, cconfig)
if not is_sampling and isinstance(cm, self.torch.nn.Module):
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = cm,
num_train_steps = 10**5,
warmup_steps = training_opts.num_warmup_steps,
learning_rate = training_opts.learning_rate,
)
else:
opt, lr_scheduler = None, None
self.committee.append(
QueryByCommittee.CommitteeEstimator(
model = cm,
data_generator = copy.deepcopy(data_generator),
optimizer = opt,
scheduler = lr_scheduler,
training_opts = training_opts,
sha256 = cconfig.sha256,
config = cconfig,
train_fn = self.TrainNNMember if isinstance(cm, self.torch.nn.Module) else self.TrainUnsupervisedMember,
sample_fn = self.SampleNNMember if isinstance(cm, self.torch.nn.Module) else self.SampleUnsupervisedMember,
)
)
(self.ckpt_path / cconfig.sha256).mkdir(exist_ok = True, parents = True),
(self.logfile_path / cconfig.sha256).mkdir(exist_ok = True, parents = True),
l.logger().info(self.GetShortSummary())
for member in self.committee:
self.committee_samples.add_member(
member_id = member.model.id,
member_name = member.config.name,
type = "supervised" if isinstance(member.model, self.torch.nn.Module) else "unsupervised",
configuration = member.config.config,
)
return
def model_step(self,
model: 'torch.nn.module',
inputs: typing.Dict[str, 'torch.Tensor'],
is_sampling: bool = False
) -> float:
"""
Run forward function for member model.
"""
outputs = model(
input_ids = inputs['input_ids'].to(self.pytorch.device),
target_ids = inputs['target_ids'].to(self.pytorch.device) if not is_sampling else None,
is_sampling = is_sampling,
)
return outputs
def TrainNNMember(self, member: 'QueryByCommittee.CommitteeEstimator', **kwargs) -> None:
"""
Member-dispatching function for loading checkpoint, training and saving back.
"""
update_dataloader = kwargs.get('update_dataloader', None)
model = member.model.to(self.pytorch.offset_device)
model_name = "{}-{}".format(member.config.name, member.model.id)
data_generator = (
member.data_generator
if update_dataloader is None
else update_dataloader
# + member.data_generator.get_random_subset(
# max(0, abs(len(update_dataloader) - member.training_opts.num_train_steps)))
)
if len(data_generator) == 0:
return
optimizer = member.optimizer
scheduler = member.scheduler
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
# if self.pytorch.num_nodes > 1:
# distrib.barrier()
# model = self.torch.nn.parallel.DistributedDataParallel(
# model,
# device_ids = [self.pytorch.offset_device],
# output_device = self.pytorch.offset_device,
# )
if self.pytorch.num_gpus > 1:
model = self.torch.nn.DataParallel(model)
current_step = self.loadCheckpoint(model, member_path, optimizer, scheduler)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step >= 0:
l.logger().info("{}: Loaded checkpoint step {}".format(model_name, current_step))
current_step = max(0, current_step)
num_train_steps = min((len(data_generator) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size, member.training_opts.num_train_steps) if update_dataloader is None else ((len(update_dataloader) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size) + current_step
if current_step < num_train_steps:
model.zero_grad()
# if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.RandomSampler(data_generator, replacement = False)
# else:
# sampler = self.torch.utils.data.DistributedSampler(
# data_generator,
# num_replicas = self.pytorch.num_nodes,
# rank = self.pytorch.torch.distributed.get_rank()
# )
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = data_generator,
batch_size = member.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = data_generator,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False # if environment.WORLD_SIZE == 1 else True,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero():
train_hook = hooks.tensorMonitorHook(
member_log_path, current_step, min((len(data_generator) + member.training_opts.train_batch_size) // member.training_opts.train_batch_size, member.training_opts.steps_per_epoch, 50)
)
try:
with self.torch.enable_grad():
model.train()
# epoch_iter = tqdm.auto.trange(member.training_opts.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(member.training_opts.num_epochs)
epoch = num_train_steps // member.training_opts.steps_per_epoch
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
# if self.pytorch.num_nodes > 1:
# loader.sampler.set_epoch(epoch)
batch_iter = tqdm.tqdm(batch_iterator, desc="Batch", leave = False) if self.is_world_process_zero() else batch_iterator
for inputs in batch_iter:
if self.is_world_process_zero():
start = datetime.datetime.utcnow()
# Run model step on inputs
step_out = self.model_step(model, inputs)
# Backpropagate losses
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(model.parameters(), member.training_opts.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
## Collect tensors for logging.
# if self.pytorch.num_nodes > 1:
# total_loss = [self.torch.zeros(tuple(step_out['total_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
# self.torch.distributed.all_gather(total_loss, step_out["total_loss"])
# else:
total_loss = step_out['total_loss'].unsqueeze(0).cpu()
if self.is_world_process_zero():
train_hook.step(
train_step = current_step,
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
)
model.zero_grad()
if current_step == 0:
l.logger().info("{}: Starting Loss: {}".format(model_name, sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
current_step += 1
# End of epoch
self.saveCheckpoint(
model,
member_path,
optimizer = optimizer,
scheduler = scheduler,
step = current_step
)
# if self.pytorch.num_nodes > 1:
# loader.sampler.set_epoch(epoch)
if self.is_world_process_zero():
try:
l.logger().info("{}: Epoch {} Loss: {}".format(model_name, current_step // member.training_opts.steps_per_epoch, train_hook.epoch_loss))
except ZeroDivisionError:
l.logger().error(
"Hook has crashed again: current_step: {}, step_freq: {}, flush_freq: {}, train_step: {}".format(
train_hook.current_step, train_hook.step_freq, train_hook.flush_freq,
current_step
)
)
train_hook.end_epoch()
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
except KeyboardInterrupt:
pass
return
def TrainUnsupervisedMember(self, member: 'QueryByCommittee.CommitteeEstimator', **kwargs) -> None:
"""
Train non-NeuralNetwork based architectures, such as DecisionTrees or KMeans.
"""
update_dataloader = kwargs.get('update_dataloader', None)
model = member.model
model_name = "{}-{}".format(member.config.name, member.model.id)
data_generator = member.data_generator + update_dataloader
if len(data_generator) == 0:
return
train_dataset = data_generator.get_batched_dataset()
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
current_step = self.loadCheckpoint(model, member_path)
if current_step >= 0:
l.logger().info("{}: Loaded checkpoint step {}".format(model_name, current_step))
if current_step < 0 or update_dataloader is not None:
current_step = max(0, current_step)
outputs = model(
input_ids = train_dataset['input_ids'],
target_ids = train_dataset['target_ids'],
is_sampling = False,
)
self.saveCheckpoint(
model,
member_path,
step = current_step + 1,
)
l.logger().info("{}: Trained with {} instances".format(model_name, len(train_dataset['input_ids'])))
return
def Train(self, **kwargs) -> None:
"""
Training point of active learning committee.
"""
# Configure committee members.
update_dataloader = kwargs.get('update_dataloader', None)
if update_dataloader is None:
l.logger().info("Initial committee training.")
self._ConfigModelParams(self.downstream_task.data_generator)
if not self.is_trained or update_dataloader is not None:
if self.is_world_process_zero():
for member in self.committee:
member.train_fn(member, update_dataloader = update_dataloader)
self.is_trained = True
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
return
def Validate(self) -> None:
"""
Perform validation for committee members.
"""
raise NotImplementedError
return
def SampleNNMember(self,
member : 'QueryByCommittee.CommitteeEstimator',
sample_set : 'torch.utils.data.Dataset',
) -> typing.Dict[str, typing.List]:
"""
Sample member of committee. Return predicted label.
"""
model = member.model.to(self.pytorch.offset_device)
model_name = "{}-{}".format(member.config.name, member.model.id)
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
if self.pytorch.num_nodes > 1:
distrib.barrier()
model = self.torch.nn.parallel.DistributedDataParallel(
model,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
model = self.torch.nn.DataParallel(model)
current_step = self.loadCheckpoint(model, member_path)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step < 0:
l.logger().warn("{}: You are trying to sample an untrained model.".format(model_name))
current_step = max(0, current_step)
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.SequentialSampler(sample_set)
else:
sampler = self.torch.utils.data.DistributedSampler(
sample_set,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank(),
shuffle = False,
drop_last = False,
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = sample_set,
batch_size = member.training_opts.train_batch_size,
sampler = (sampler
if self.pytorch.num_nodes <= 1 or not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = sample_set,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False # True if environment.WORLD_SIZE > 1 else False,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
sample_set, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
model.eval()
predictions = {
'train_step' : current_step,
'idx' : None,
'static_features' : None,
'runtime_features': None,
'input_ids' : None,
'predictions' : None,
}
it = tqdm.tqdm(loader, desc="Sample member", leave = False) if self.is_world_process_zero() else loader
for batch in it:
out = self.model_step(model, batch, is_sampling = True)
for key in set(predictions.keys()) - set({'train_step'}):
r = batch[key] if key != "predictions" else out['output_label']
if predictions[key] is None:
predictions[key] = r
else:
predictions[key] = self.torch.cat(
(predictions[key], r),
0
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
idx = [self.torch.zeros(tuple(predictions['idx' ].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
static_features = [self.torch.zeros(tuple(predictions['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
runtime_features = [self.torch.zeros(tuple(predictions['runtime_features'].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
input_ids = [self.torch.zeros(tuple(predictions['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
output_label = [self.torch.zeros(tuple(predictions['predictions' ].shape), dtype = self.torch.int64).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(idx, predictions["idx" ].to(self.pytorch.device))
self.torch.distributed.all_gather(static_features, predictions["static_features" ].to(self.pytorch.device))
self.torch.distributed.all_gather(runtime_features, predictions["runtime_features"].to(self.pytorch.device))
self.torch.distributed.all_gather(input_ids, predictions["input_ids" ].to(self.pytorch.device))
self.torch.distributed.all_gather(output_label, predictions["predictions" ])
predictions['idx'] = self.torch.cat(idx)
predictions['static_features'] = self.torch.cat(static_features)
predictions['runtime_features'] = self.torch.cat(runtime_features)
predictions['input_ids'] = self.torch.cat(input_ids)
predictions['predictions'] = self.torch.cat(output_label)
idx = self.torch.zeros(tuple(predictions['idx' ].shape), dtype = self.torch.int64).to(self.pytorch.device)
static_features = self.torch.zeros(tuple(predictions['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device)
runtime_features = self.torch.zeros(tuple(predictions['runtime_features'].shape), dtype = self.torch.int64).to(self.pytorch.device)
input_ids = self.torch.zeros(tuple(predictions['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device)
output_label = self.torch.zeros(tuple(predictions['predictions' ].shape), dtype = self.torch.int64).to(self.pytorch.device)
for x, i in enumerate(predictions['idx']):
idx [int(i)] = predictions['idx'] [x]
static_features [int(i)] = predictions['static_features'] [x]
runtime_features[int(i)] = predictions['runtime_features'][x]
input_ids [int(i)] = predictions['input_ids'] [x]
output_label [int(i)] = predictions['predictions'] [x]
predictions['idx'] = idx
predictions['static_features'] = static_features
predictions['runtime_features'] = runtime_features
predictions['input_ids'] = input_ids
predictions['predictions'] = output_label
for key in set(predictions.keys()) - set({'train_step'}):
if key == 'predictions':
predictions[key] = [self.downstream_task.TargetIDtoLabels(int(x)) for x in predictions[key].cpu().numpy()]
elif key == "runtime_features":
predictions[key] = [[int(y) for y in x.cpu().numpy()] for x in predictions[key]]
elif key == "idx":
predictions[key] = [int(x.cpu().numpy()) for x in predictions[key]]
else:
predictions[key] = [[float(y) for y in x.cpu().numpy()] for x in predictions[key]]
return predictions
def SampleUnsupervisedMember(self,
member : 'QueryByCommittee.CommitteeEstimator',
sample_set : 'torch.utils.data.Dataset',
) -> typing.Dict[str, typing.List]:
"""
Sample non-NeuralNetwork based architectures, such as DecisionTrees or KMeans.
"""
model = member.model
model_name = "{}-{}".format(member.config.name, member.model.id)
sample_dataset = sample_set.get_batched_dataset()
member_path = self.ckpt_path / member.sha256
member_log_path = self.logfile_path / member.sha256
current_step = self.loadCheckpoint(model, member_path)
if current_step < 0:
l.logger().warn("{}: You are trying to sample an untrained model.".format(model_name))
current_step = max(0, current_step)
if self.is_world_process_zero():
outputs = model(
input_ids = sample_dataset['input_ids'],
is_sampling = True,
)
predictions = {
'train_step' : current_step,
'idx' : [int(x) for x in sample_dataset['idx']],
'static_features' : sample_dataset['static_features'],
'runtime_features': sample_dataset['runtime_features'],
'input_ids' : sample_dataset['input_ids'],
'predictions' : [self.downstream_task.TargetIDtoLabels(i) for i in outputs['predicted_labels']],
}
distrib.broadcast(predictions)
else:
predictions = distrib.broadcast()
distrib.barrier()
return predictions
def SampleCommittee(self,
sample_set: 'torch.utils.data.Dataset',
) -> typing.Dict[
'QueryByCommittee.CommitteeEstimator',
typing.Dict[str, 'torch.Tensor']
]:
"""
Sample committee with a set of inputs.
Return a dictionary mapped from each member to the
total workload computed by a committee member.
"""
self._ConfigModelParams()
committee_predictions = {}
for member in self.committee:
key = "{}_{}".format(member.config.name, member.model.id)
committee_predictions[key] = member.sample_fn(member, sample_set)
return committee_predictions
def Sample(self, sample_set: 'torch.Dataset') -> typing.List[typing.Dict[str, float]]:
"""
Active learner sampling.
This method queries all committee members and measures their cross-entropy to validate
the usefulness of parts of the feature space.
"""
# Ask the committee for their predictions.
committee_predictions = self.SampleCommittee(sample_set)
space_samples = []
for nsample in range(len(sample_set)):
# Get the feature vectors for each sample.
for model, samples in committee_predictions.items():
if nsample != samples['idx'][nsample]:
raise ValueError("{} Mismatch in sample output: Expected {} but had {}".format(model, nsample, samples['idx'][nsample]))
static_feats = self.downstream_task.VecToStaticFeatDict(samples['static_features'][nsample])
run_feats = self.downstream_task.VecToRuntimeFeatDict(samples['runtime_features'][nsample])
input_feats = self.downstream_task.VecToInputFeatDict(samples['input_ids'][nsample])
break
# Calculate entropy for that sample.
ent = self.entropy([x['predictions'][nsample] for x in committee_predictions.values()])
# Save the dictionary entry.
space_samples.append({
'train_step' : {k: v['train_step'] for k, v in committee_predictions.items()},
'static_features' : static_feats,
'runtime_features' : run_feats,
'input_features' : input_feats,
'member_predictions' : {k: v['predictions'][nsample] for k, v in committee_predictions.items()},
'entropy' : ent,
})
# Add everything to database.
self.committee_samples.add_samples(self.sample_epoch, space_samples)
self.sample_epoch += 1
return sorted(space_samples, key = lambda x: x['entropy'], reverse = True)
def entropy(self, labels, base=None):
""" Computes entropy of label distribution. """
if len(labels) <= 1:
return 0
value,counts = np.unique(labels, return_counts=True)
probs = counts / len(labels)
n_classes = np.count_nonzero(probs)
if n_classes <= 1:
return 0
entropy = 0.0
# Compute entropy
base = math.e if base is None else base
for p in probs:
entropy -= p * math.log(p, base)
return entropy
def saveCheckpoint(self,
model : 'torch.nn.Module',
path : pathlib.Path,
optimizer = None,
scheduler = None,
step : int = None,
) -> None:
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: path / "{}-{}.pt".format(x, step)
if isinstance(model, self.torch.nn.Module):
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(model, self.torch.nn.DataParallel):
self.torch.save(model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(model.state_dict(), ckpt_comp("model"))
self.torch.save(optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(scheduler.state_dict(), ckpt_comp("scheduler"))
else:
checkpoint_dict = model.get_checkpoint_state()
with open(ckpt_comp("model"), 'wb') as outf:
pickle.dump(checkpoint_dict, outf)
with open(path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(step))
return
def loadCheckpoint(self,
model : 'torch.nn.Module',
path : pathlib.Path,
optimizer = None,
scheduler = None
) -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (path / "checkpoint.meta").exists():
return -1
with open(path / "checkpoint.meta", 'r') as mf:
key = "train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x})
if FLAGS.select_checkpoint_step == -1:
ckpt_step = max(entries)
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: path / "{}-{}.pt".format(x, ckpt_step)
if isinstance(model, self.torch.nn.DataParallel):
try:
model.module.load_state_dict(
self.torch.load(ckpt_comp("model"))
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
model.module.load_state_dict(new_state_dict)
model.eval()
elif isinstance(model, self.torch.nn.Module):
try:
model.load_state_dict(
self.torch.load(ckpt_comp("model"), map_location=self.pytorch.device)
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
model.eval()
else:
checkpoint_dict = pickle.load(open(ckpt_comp("model"), 'rb'))
model.load_checkpoint_state(checkpoint_dict)
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def GetShortSummary(self) -> str:
return "Short summary TODO"
| 33,727 | 42.407979 | 324 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/models.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Here all the committee members are defined.
"""
import math
import sys
import typing
import numpy as np
from sklearn import cluster as sklearn_cluster
from sklearn import neighbors as sklearn_neighbors
from deeplearning.benchpress.active_models.committee import config
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.util import pytorch
from deeplearning.benchpress.util.pytorch import torch
from deeplearning.benchpress.util import logging as l
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.functional.relu,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish,
"softmax" : torch.nn.Softmax
}
class CommitteeModels(object):
"""
Abstract representation of model committee.
"""
@classmethod
def FromConfig(cls, id: int, config: config.ModelConfig) -> "CommitteeModels":
return {
'MLP' : MLP,
'KMeans' : KMeans,
'KNN' : KNN,
}[config.name](id, config)
def __init__(self, id: int):
super(CommitteeModels, self).__init__()
self.id = id
return
def forward(self, *args, **kwargs) -> None:
raise NotImplementedError("Abstract class.")
def get_checkpoint_state(self, *args, **kwargs) -> None:
raise NotImplementedError("Only for non-NN modules")
def load_checkpoint_state(self, *args, **kwargs) -> None:
raise NotImplementedError("Only for non-NN modules")
class MLP(CommitteeModels, torch.nn.Module):
"""
A modular MLP model that supports Linear, Dropout, LayerNorm and activations.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(MLP, self).__init__(id)
self.config = config.layer_config
self.layers = []
layers = {
'Embedding' : torch.nn.Embedding,
'Linear' : torch.nn.Linear,
'Dropout' : torch.nn.Dropout,
'LayerNorm' : torch.nn.LayerNorm,
}
layers.update(ACT2FN)
self.layers = torch.nn.ModuleList([layers[layer[0]](**layer[1]) for layer in self.config])
return
def calculate_loss(self,
outputs: torch.Tensor,
target_ids: torch.Tensor,
) -> torch.Tensor:
"""
Categorical cross-entropy function.
"""
## Calculate categorical label loss.
loss_fn = torch.nn.CrossEntropyLoss()
label_loss = loss_fn(outputs.to(torch.float32), target_ids.squeeze(1))
## Calculate top-1 accuracy of predictions across batch.
hits, total = 0, int(outputs.size(0))
for out, target in zip(torch.argmax(outputs, dim = 1), target_ids):
if out == target:
hits += 1
return label_loss, torch.FloatTensor([hits / total])
def forward(self,
input_ids : torch.Tensor,
target_ids : torch.Tensor = None,
is_sampling : bool = False
) -> torch.Tensor:
"""
Args:
input_ids: Input features for training or prediction.
target_ids: Target tokens to predict during training.
static_features: List of static input features of respective sample to predict.
is_sampling: Select between training and sampling method.
"""
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
out = input_ids
for layer in self.layers:
out = layer(out)
if not is_sampling:
total_loss, batch_accuracy = self.calculate_loss(out, target_ids)
return {
'total_loss' : total_loss,
'accuracy' : batch_accuracy.to(device),
'output_probs' : out,
'output_label' : torch.argmax(out)
}
else:
return {
'output_probs' : out,
'output_label' : torch.argmax(out, dim = 1),
}
class KMeans(CommitteeModels):
"""
Wrapper class to manage, fit and predict KMeans clusters.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(KMeans, self).__init__(id)
self.config = config
self.target_ids = self.config.downstream_task.output_ids
self.kmeans = sklearn_cluster.KMeans(
n_clusters = self.config.n_clusters,
init = self.config.init,
n_init = self.config.n_init,
max_iter = self.config.max_iter,
tol = self.config.tol,
algorithm = self.config.algorithm,
)
## The following two variables are the model's attributes.
self.classifier = None
self.cluster_map = {}
return
def __call__(self,
input_ids : np.array,
target_ids : np.array = None,
is_sampling : bool = False
) -> None:
if not is_sampling:
## Create a map for labels from target ids, and cluster IDS.
self.cluster_map = {
cluster_id: [0] * self.config.num_labels for cluster_id in range(self.config.n_clusters)
}
self.classifier = self.kmeans.fit(input_ids)
for cluster_id, target_id in zip(self.classifier.labels_, target_ids):
self.cluster_map[cluster_id][int(target_id)] += 1
return {
'cluster_map' : self.cluster_map,
'cluster_labels' : self.classifier.labels_,
}
else:
target_labels = []
if not self.classifier:
for idx, _ in enumerate(input_ids):
target_labels.append(
np.random.choice(a = np.arange(self.config.num_labels))
)
return {
'cluster_labels' : [],
'predicted_labels' : target_labels,
}
else:
cluster_labels = self.classifier.predict(input_ids)
for x in cluster_labels:
p = [(y / sum(self.cluster_map[x]) if sum(self.cluster_map[x]) else 0.5) for y in self.cluster_map[x]]
p = p / np.array(p).sum()
target_labels.append(
np.random.choice(a = np.arange(self.config.num_labels), p = p)
)
return {
'cluster_labels' : cluster_labels,
'predicted_labels' : target_labels,
}
def get_checkpoint_state(self) -> typing.Dict[str, typing.Any]:
"""
Return the blob that is to be checkpointed.
"""
return {
'kmeans' : self.classifier,
'cluster_map' : self.cluster_map,
}
def load_checkpoint_state(self, checkpoint_state: typing.Dict[str, typing.Any]) -> None:
"""
Load the checkpoints to the class states.
"""
self.classifier = checkpoint_state['kmeans']
self.cluster_map = checkpoint_state['cluster_map']
return
class KNN(CommitteeModels):
"""
Wrapper class to manage, fit and predict KNN algorithm.
"""
def __init__(self, id: int, config: config.ModelConfig):
super(KNN, self).__init__(id)
self.config = config
self.knn = sklearn_neighbors.KNeighborsRegressor(
n_neighbors = self.config.n_neighbors,
weights = self.config.weights,
algorithm = self.config.algorithm,
leaf_size = self.config.leaf_size,
p = self.config.p,
n_jobs = -1,
)
## The model's attributes
self.classifier = None
return
def __call__(self,
input_ids : np.array,
target_ids : np.array = None,
is_sampling : bool = False,
) -> typing.Dict[str, np.array]:
if not is_sampling:
self.classifier = self.knn.fit(input_ids, target_ids)
return {}
else:
if not self.classifier:
return {
'predicted_labels' : [np.random.choice(a = np.arange(self.config.num_labels)) for x in input_ids]
}
else:
labels = self.classifier.predict(input_ids)
return {
'predicted_labels' : [int(round(float(x) + sys.float_info.epsilon)) for x in labels]
}
def get_checkpoint_state(self) -> typing.Dict[str, typing.Any]:
"""
Return the blob that is to be checkpointed.
"""
return {'knn' : self.classifier,}
def load_checkpoint_state(self, checkpoint_state: typing.Dict[str, typing.Any]) -> None:
"""
Load the checkpoints to the class states.
"""
self.classifier = checkpoint_state['knn']
return | 8,757 | 31.557621 | 112 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/committee/optimizer.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,214 | 39.989933 | 129 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/model.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Here all the committee members are defined.
"""
import typing
from deeplearning.benchpress.active_models.expected_error_reduction import config
from deeplearning.benchpress.models.torch_bert import activations
from deeplearning.benchpress.util.pytorch import torch
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
ACT2FN = {
"gelu" : activations.gelu,
"relu" : torch.nn.ReLU,
"swish" : activations.swish,
"gelu_new" : activations.gelu_new,
"mish" : mish,
"softmax" : torch.nn.Softmax
}
class MLP(torch.nn.Module):
"""
A modular MLP model that supports Linear, Dropout, LayerNorm and activations.
"""
def __init__(self, config: config.ModelConfig):
super(MLP, self).__init__()
self.config = config.layer_config
self.layers = []
layers = {
'Embedding' : torch.nn.Embedding,
'Linear' : torch.nn.Linear,
'Dropout' : torch.nn.Dropout,
'LayerNorm' : torch.nn.LayerNorm,
}
layers.update(ACT2FN)
self.layers = torch.nn.ModuleList([layers[layer[0]](**layer[1]) for layer in self.config])
self.softmax = torch.nn.Softmax(dim = 1)
return
def calculate_loss(self,
outputs: torch.Tensor,
target_ids: torch.Tensor,
) -> torch.Tensor:
"""
Categorical cross-entropy function.
"""
## Calculate categorical label loss.
loss_fn = torch.nn.CrossEntropyLoss()
label_loss = loss_fn(outputs.to(torch.float32), target_ids.squeeze(1))
## Calculate probs
probs = self.softmax(outputs.clone().detach())
## Calculate top-1 accuracy of predictions across batch.
hits, total = 0, int(outputs.size(0))
for out, target in zip(torch.argmax(outputs, dim = 1), target_ids):
if out == target:
hits += 1
return label_loss, probs, torch.FloatTensor([hits / total])
def forward(self,
input_ids : torch.Tensor,
target_ids : torch.Tensor = None,
is_sampling : bool = False
) -> torch.Tensor:
"""
Args:
input_ids: Input features for training or prediction.
target_ids: Target tokens to predict during training.
static_features: List of static input features of respective sample to predict.
is_sampling: Select between training and sampling method.
"""
device = input_ids.get_device()
device = device if device >= 0 else 'cpu'
out = input_ids
for layer in self.layers:
out = layer(out)
if not is_sampling:
total_loss, probs, batch_accuracy = self.calculate_loss(out, target_ids)
return {
'total_loss' : total_loss,
'accuracy' : batch_accuracy.to(device),
'output_probs' : probs,
'output_label' : torch.argmax(out, dim = -1).unsqueeze(-1),
}
else:
return {
'output_probs' : self.softmax(out.clone().detach()),
'output_label' : torch.argmax(out, dim = -1).unsqueeze(-1),
}
| 3,600 | 32.036697 | 94 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/eer.py | # coding=utf-8
# Copyright 2022 Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A neural architecture for downstream task label prediction.
This head is used for feature-less learning to target benchmarks.
"""
import typing
import collections
import tqdm
from deeplearning.benchpress.models.torch_bert import hooks
from deeplearning.benchpress.active_models import backends
from deeplearning.benchpress.active_models import data_generator
from deeplearning.benchpress.active_models.expected_error_reduction import optimizer
from deeplearning.benchpress.active_models.expected_error_reduction import model
from deeplearning.benchpress.active_models.expected_error_reduction import config
from deeplearning.benchpress.active_models.expected_error_reduction import eer_database
from deeplearning.benchpress.util import environment
from deeplearning.benchpress.util import distrib
from deeplearning.benchpress.util import logging as l
from absl import flags
FLAGS = flags.FLAGS
class ExpectedErrorReduction(backends.BackendBase):
class TrainingOpts(typing.NamedTuple):
"""Wrapper class for training options"""
train_batch_size : int
learning_rate : float
num_warmup_steps : int
max_grad_norm : float
steps_per_epoch : int
num_epochs : int
num_train_steps : int
class Estimator(typing.NamedTuple):
"""Named tuple to wrap BERT pipeline."""
model : typing.TypeVar('nn.Module')
data_generator : 'torch.utils.data.Dataset'
optimizer : typing.Any
scheduler : typing.Any
def __repr__(self):
return "ExpectedErrorReduction"
def __init__(self, *args, **kwargs):
super(ExpectedErrorReduction, self).__init__(*args, **kwargs)
from deeplearning.benchpress.util import pytorch
if not pytorch.initialized:
pytorch.initPytorch()
self.pytorch = pytorch
self.torch = pytorch.torch
self.torch_tpu_available = pytorch.torch_tpu_available
self.torch.manual_seed(self.config.random_seed)
self.torch.cuda.manual_seed_all(self.config.random_seed)
self.ckpt_path = self.cache_path / "checkpoints"
self.sample_path = self.cache_path / "samples"
self.logfile_path = self.cache_path / "logs"
if environment.WORLD_RANK == 0:
self.ckpt_path.mkdir(exist_ok = True, parents = True)
self.sample_path.mkdir(exist_ok = True, parents = True)
self.logfile_path.mkdir(exist_ok = True, parents = True)
self.validation_results_file = "val_results.txt"
self.validation_results_path = self.logfile_path / self.validation_results_file
self.model_config = None
self.training_opts = None
self.train = None
self.sample = None
self.is_validated = False
self.is_trained = False
self.eer_samples = eer_database.EERSamples(
url = "sqlite:///{}".format(str(self.sample_path / "samples.db")),
must_exist = False,
)
self.sample_epoch = self.eer_samples.cur_sample_epoch
l.logger().info("Active ExpectedErrorReduction config initialized in {}".format(self.cache_path))
return
def _ConfigModelParams(self) -> None:
"""
Generic initialization.
"""
self.model_config = config.ModelConfig.FromConfig(
self.config.expected_error_reduction,
self.downstream_task,
self.config.num_train_steps
)
self.training_opts = ExpectedErrorReduction.TrainingOpts(
train_batch_size = self.model_config.batch_size,
learning_rate = self.model_config.learning_rate,
num_warmup_steps = self.model_config.num_warmup_steps,
max_grad_norm = self.model_config.max_grad_norm,
steps_per_epoch = self.model_config.steps_per_epoch,
num_epochs = self.model_config.num_epochs,
num_train_steps = self.model_config.num_train_steps,
)
return
def _ConfigTrainParams(self, data_generator: 'torch.utils.data.Dataset') -> None:
"""
Model parameter initialization.
"""
if not self.train:
self._ConfigModelParams()
cm = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
cm = self.torch.nn.parallel.DistributedDataParallel(
cm,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
cm = self.torch.nn.DataParallel(cm)
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = cm,
num_train_steps = self.training_opts.num_train_steps,
warmup_steps = self.training_opts.num_warmup_steps,
learning_rate = self.training_opts.learning_rate,
weight_decay = 0.0,
)
self.train = ExpectedErrorReduction.Estimator(
model = cm,
data_generator = data_generator,
optimizer = opt,
scheduler = lr_scheduler,
)
l.logger().info(self.GetShortSummary())
return
def _ConfigSampleParams(self) -> None:
"""
Model parameter initialization.
"""
if not self.sample:
self._ConfigModelParams()
cm = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
cm = self.torch.nn.parallel.DistributedDataParallel(
cm,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
cm = self.torch.nn.DataParallel(cm)
self.sample = ExpectedErrorReduction.Estimator(
model = cm,
data_generator = None,
optimizer = None,
scheduler = None,
)
l.logger().info(self.GetShortSummary())
return
def model_step(self,
model : 'torch.nn.Module',
inputs : typing.Dict[str, 'torch.Tensor'],
is_sampling : bool = False
) -> float:
"""
Run forward function for member model.
"""
return model(
input_ids = inputs['input_ids'].to(self.pytorch.device),
target_ids = inputs['target_ids'].to(self.pytorch.device) if not is_sampling else None,
is_sampling = is_sampling,
)
def Train(self, **kwargs) -> None:
"""
Train the AL predictive model.
"""
# The update dataloader for when you want to step-train after collecting target benchmark.
update_dataloader = kwargs.get('update_dataloader', None)
# Temp estimator, for when you are temp-training a model version during EER Sample.
update_estimator = kwargs.get('eer_estimator', None)
if not update_estimator:
# If not a temp estimator, then create the standard train estimator if not already created.
self._ConfigTrainParams(self.downstream_task.data_generator)
train_estimator = update_estimator if update_estimator else self.train
if update_dataloader is None and update_estimator is None:
l.logger().info("Initial EER model training.")
# self.Validate()
if not self.is_trained or update_dataloader is not None or update_estimator:
data_generator = (
train_estimator.data_generator
if update_dataloader is None
else update_dataloader
# + train_estimator.data_generator.get_random_subset(
# max(0, abs(len(update_dataloader) - self.training_opts.num_train_steps)))
)
if len(data_generator) == 0:
return
# ## TODO: Dummy code. If active learner can't learn on test set, then features suck.
# Toggle this to train on test set. Used for evaluation purposes.
# elif not update_estimator:
# data_generator = self.downstream_task.test_set
# Empty cache for GPU environments.
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
# Load most recent checkpoint to estimator, if not temp-model.
if not update_estimator:
current_step = self.loadCheckpoint(train_estimator)
if current_step >= 0:
l.logger().info("EER: Loaded checkpoint step {}".format(current_step))
current_step = max(0, current_step)
num_train_steps = min(
(len(data_generator) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size,
self.training_opts.num_train_steps
) if update_dataloader is None else ((len(update_dataloader) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size) + current_step
else:
current_step = 0
num_train_steps = len(data_generator)
if current_step < num_train_steps:
train_estimator.model.zero_grad()
# Setup sampler and data loader.
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.RandomSampler(data_generator, replacement = False)
else:
sampler = self.torch.utils.data.DistributedSampler(
data_generator,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank()
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = data_generator,
batch_size = self.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = data_generator,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False if environment.WORLD_SIZE == 1 else True,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Get dataloader iterator and setup hooks.
batch_iterator = iter(loader)
if self.is_world_process_zero() and not update_estimator:
# Monitoring hook.
train_hook = hooks.tensorMonitorHook(
self.logfile_path,
current_step,
min(
(len(data_generator) + self.training_opts.train_batch_size) // self.training_opts.train_batch_size,
self.training_opts.steps_per_epoch, 50
)
)
try:
with self.torch.enable_grad():
train_estimator.model.train()
# epoch_iter = tqdm.auto.trange(self.training_opts.num_epochs, desc="Epoch", leave = False) if self.is_world_process_zero() else range(self.training_opts.num_epochs)
# In distributed mode, calling the set_epoch() method at
# the beginning of each epoch before creating the DataLoader iterator
# is necessary to make shuffling work properly across multiple epochs.
# Otherwise, the same ordering will be always used.
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(current_step)
batch_iter = tqdm.tqdm(batch_iterator, desc="Batch", leave = False) if self.is_world_process_zero() else batch_iterator
for inputs in batch_iter:
# Run model step on inputs
step_out = self.model_step(train_estimator.model, inputs)
# Backpropagate losses
total_loss = step_out['total_loss'].mean()
total_loss.backward()
self.torch.nn.utils.clip_grad_norm_(train_estimator.model.parameters(), self.training_opts.max_grad_norm)
if self.torch_tpu_available:
self.pytorch.torch_xla.optimizer_step(train_estimator.optimizer)
else:
train_estimator.optimizer.step()
train_estimator.scheduler.step()
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
total_loss = [
self.torch.zeros(tuple(step_out['total_loss'].shape), dtype = self.torch.float32).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
self.torch.distributed.all_gather(total_loss, step_out["total_loss"])
else:
total_loss = step_out['total_loss'].unsqueeze(0).cpu()
if self.is_world_process_zero() and not update_estimator:
train_hook.step(
train_step = current_step,
total_loss = sum([tl.mean().item() for tl in total_loss]) / len(total_loss),
)
train_estimator.model.zero_grad()
if current_step == 0 and update_estimator is None:
l.logger().info("EER: Starting Loss: {}".format(sum([tl.mean().item() for tl in total_loss]) / len(total_loss)))
current_step += 1
# End of epoch
if not update_estimator:
self.saveCheckpoint(train_estimator, current_step = current_step)
if self.is_world_process_zero() and not update_estimator:
try:
l.logger().info(
"EER: Step {} Loss: {}".format(
current_step, train_hook.epoch_loss
)
)
except ZeroDivisionError:
l.logger().error(
"Hook has crashed again: current_step: {}, step_freq: {}, flush_freq: {}, train_step: {}".format(
train_hook.current_step, train_hook.step_freq, train_hook.flush_freq,
current_step
)
)
val_accuracy = self.Validate()
train_hook.end_epoch(
**{"val_{}_accuracy".format(key): val for key, val in val_accuracy.items()}
)
if self.torch_tpu_available:
self.pytorch.torch_xla.master_print(self.pytorch.torch_xla_met.metrics_report())
except KeyboardInterrupt:
pass
self.is_trained = True
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
return
def Validate(self, **kwargs) -> int:
"""
Run validation to measure accuracy on the downstream task's selected test set, if exists.
"""
# Load the test database from the downstream task.
test_set = self.downstream_task.test_set
# If non-empty.
if test_set:
_ = self.loadCheckpoint(self.train)
self.train.model.zero_grad()
# Setup sampler and dataloader.
if self.pytorch.num_nodes <= 1:
sampler = self.torch.utils.data.SequentialSampler(test_set)
else:
sampler = self.torch.utils.data.DistributedSampler(
test_set,
num_replicas = self.pytorch.num_nodes,
rank = self.pytorch.torch.distributed.get_rank()
)
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = test_set,
batch_size = self.training_opts.train_batch_size,
sampler = (sampler
if not self.pytorch.torch_tpu_available or self.pytorch.torch_xla.xrt_world_size() <= 1
else self.torch.utils.data.distributed.DistributedSampler(
dataset = test_set,
num_replicas = self.pytorch.num_nodes if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.xrt_world_size(),
rank = self.pytorch.torch.distributed.get_rank() if not self.pytorch.torch_tpu_available else self.pytorch.torch_xla.get_ordinal()
)
),
num_workers = 0,
drop_last = False,
)
# Set dataloader in case of TPU training.
if self.torch_tpu_available:
loader = self.pytorch.torch_ploader.ParallelLoader(
data_generator, [self.pytorch.device]
).per_device_loader(self.pytorch.device)
# Setup iterator and accuracy metrics.
batch_iter = tqdm.tqdm(iter(loader), desc = "Test Set", leave = False) if self.is_world_process_zero() else iter(loader)
accuracy = {}
missed_idxs = {}
with self.torch.no_grad():
self.train.model.eval()
if self.pytorch.num_nodes > 1:
loader.sampler.set_epoch(0)
# Run inference.
for inputs in batch_iter:
step_out = self.model_step(self.train.model, inputs)
## Collect tensors for logging.
if self.pytorch.num_nodes > 1:
output_label = [
self.torch.zeros(tuple(step_out['output_label'].shape), dtype = self.torch.int64).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
target_ids = [
self.torch.zeros(tuple(inputs['target_ids'].shape), dtype = self.torch.int64).to(self.pytorch.device)
for _ in range(self.torch.distributed.get_world_size())
]
self.torch.distributed.all_gather(output_label, step_out['output_label'])
self.torch.distributed.all_gather(target_ids, inputs['target_ids'].to(self.pytorch.device))
else:
output_label = step_out['output_label'].unsqueeze(0)
target_ids = inputs ['target_ids'].unsqueeze(0).to(self.pytorch.device)
# Group accuracy stats by label.
# Assign to the first index the count of correct predictions.
# Assign to the second index the total predictions.
for id, label in zip(self.downstream_task.output_ids, self.downstream_task.output_labels):
if label not in accuracy:
accuracy[label] = [0, 0]
accuracy[label][0] += int(self.torch.sum((output_label == id) & (target_ids == id)).cpu())
accuracy[label][1] += int(self.torch.sum(target_ids == id).cpu())
for out, tar, idx in zip(step_out['output_label'], inputs['target_ids'], inputs['idx']):
if int(tar) != int(out):
if int(tar) not in missed_idxs:
missed_idxs[int(tar)] = []
missed_idxs[int(tar)].append(int(idx))
# You may want to all gather that.
epoch_accuracy = {
k: v[0] / v[1] for k, v in accuracy.items()
}
distrib.barrier()
l.logger().error("Total data: {},\nValidation stats: {}\n{}".format(len(test_set), epoch_accuracy, accuracy))
l.logger().error("Missed indices: {}".format(missed_idxs))
return epoch_accuracy
def Sample(self, sample_set: 'torch.Dataset') -> typing.List[typing.Dict[str, float]]:
"""
Active learner sampling.
sample_set contains random datapoints provided by the downstream task.
Expected Error Reduction algorithm is going to be applied for each datapoint for each label class.
"""
l.logger().error("Problem #2: Check that for DDP, every one gets the chunk they must.")
self._ConfigSampleParams()
current_step = self.loadCheckpoint(self.sample)
if self.pytorch.num_gpus > 0:
self.torch.cuda.empty_cache()
if current_step < 0:
l.logger().warn("EER: You are trying to sample an untrained model.")
current_step = max(0, current_step)
## If DDP, each node will work separately on chunks of the unlabelled dataset.
node_size = len(sample_set) // environment.WORLD_SIZE
node_rem = len(sample_set) % environment.WORLD_SIZE
node_set = sample_set.get_sliced_subset(environment.WORLD_RANK * node_size, (1 + environment.WORLD_RANK) * node_size)
if environment.WORLD_RANK == environment.WORLD_SIZE - 1 and node_rem > 0:
node_set += sample_set.get_sliced_subset((1 + environment.WORLD_RANK) * node_size)
node_loader = self.torch.utils.data.dataloader.DataLoader(
dataset = node_set,
batch_size = 1,
sampler = self.torch.utils.data.SequentialSampler(node_set),
num_workers = 0,
drop_last = False,
)
node_losses = {
'input_ids' : self.torch.zeros([len(node_set), self.downstream_task.input_size], dtype = self.torch.float32),
'static_features' : self.torch.zeros([len(node_set), self.downstream_task.static_features_size], dtype = self.torch.float32),
'runtime_features' : self.torch.zeros([len(node_set), self.downstream_task.runtime_features_size], dtype = self.torch.int64),
'posterior_probs' : self.torch.zeros([len(node_set), self.downstream_task.output_size], dtype = self.torch.float32),
'aggregated_entropy' : self.torch.zeros([len(node_set), self.downstream_task.output_size], dtype = self.torch.float32),
'expected_error_rate' : self.torch.zeros([len(node_set), 1], dtype = self.torch.float32),
}
self.sample.model.eval()
for idx, unl_train_point in tqdm.tqdm(enumerate(iter(node_loader)), total = len(node_loader), desc = "D + (x, y)"):
node_losses['input_ids'][idx] = unl_train_point['input_ids']
node_losses['static_features'][idx] = unl_train_point['static_features']
node_losses['runtime_features'][idx] = unl_train_point['runtime_features']
for out_label in self.downstream_task.output_ids:
## For (x, y) run model inference to obtain p(x|y)
with self.torch.no_grad():
out = self.model_step(self.sample.model, unl_train_point, is_sampling = True)
node_losses['posterior_probs'][idx][out_label] = out['output_probs'].squeeze(0)[out_label]
## Extend Dataset D+: D + (x, y)
# extended_dataset = self.downstream_task.dataset + {'input_ids': unl_train_point, 'target_ids': out_label}
extended_datapoint = data_generator.ListTrainDataloader([], lazy = True)
extended_datapoint.dataset = [
{
'input_ids': unl_train_point['input_ids'].squeeze(0),
'target_ids': self.torch.LongTensor([out_label]),
}
]
extended_dataset = self.downstream_task.data_generator + extended_datapoint
## Copy the model to a temp one.
new_model = model.MLP(self.model_config).to(self.pytorch.device)
if self.pytorch.num_nodes > 1:
distrib.barrier()
new_model.load_state_dict(self.sample.model.module.state_dict())
new_model = self.torch.nn.parallel.DistributedDataParallel(
new_model,
device_ids = [self.pytorch.offset_device],
output_device = self.pytorch.offset_device,
)
elif self.pytorch.num_gpus > 1:
new_model.load_state_dict(self.sample.model.module.state_dict())
new_model = self.torch.nn.DataParallel(new_model)
else:
new_model.load_state_dict(self.sample.model.state_dict())
## Define optimizer, scheduler for training regime.
opt, lr_scheduler = optimizer.create_optimizer_and_scheduler(
model = new_model,
num_train_steps = len(extended_dataset),
warmup_steps = 0,
learning_rate = self.training_opts.learning_rate,
weight_decay = 0.0,
)
dp_estimator = ExpectedErrorReduction.Estimator(
model = new_model,
data_generator = extended_dataset,
optimizer = opt,
scheduler = lr_scheduler,
)
## Train the new model here.
self.Train(eer_estimator = dp_estimator)
## Run the new model on the unlabelled dataset to estimate future errors.
loader = self.torch.utils.data.dataloader.DataLoader(
dataset = node_set,
batch_size = self.training_opts.train_batch_size,
sampler = self.torch.utils.data.SequentialSampler(node_set),
num_workers = 0,
drop_last = False,
)
aggr_entropy = 0.0
target_ids = self.torch.zeros(
[self.downstream_task.output_size, self.training_opts.train_batch_size, 1], dtype = self.torch.int64
)
with self.torch.no_grad():
for tid in self.downstream_task.output_ids:
target_ids[tid,:] = tid
for unl_batch in iter(loader):
for target_id_batch in target_ids:
out = self.model_step(new_model, {'input_ids': unl_batch['input_ids'], 'target_ids': target_id_batch}, is_sampling = False)
aggr_entropy += out['total_loss'].mean()
node_losses['aggregated_entropy'][idx][out_label] = aggr_entropy
node_losses['expected_error_rate'][idx] = sum(
[node_losses['posterior_probs'][idx][L] * node_losses['aggregated_entropy'][idx][L]
for L in self.downstream_task.output_ids]
)
if self.pytorch.num_nodes > 1:
self.torch.distributed.barrier()
input_ids = [self.torch.zeros(tuple(node_losses['input_ids' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
static_features = [self.torch.zeros(tuple(node_losses['static_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
runtime_features = [self.torch.zeros(tuple(node_losses['runtime_features' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
posterior_probs = [self.torch.zeros(tuple(node_losses['posterior_probs' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
aggregated_entropy = [self.torch.zeros(tuple(node_losses['aggregated_entropy' ].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
expected_error_rate = [self.torch.zeros(tuple(node_losses['expected_error_rate'].shape), dtype = self.torch.float32).to(self.pytorch.device) for _ in range(self.torch.distributed.get_world_size())]
self.torch.distributed.all_gather(input_ids, node_losses['input_ids' ])
self.torch.distributed.all_gather(static_features, node_losses['static_features' ])
self.torch.distributed.all_gather(runtime_features, node_losses['runtime_features' ])
self.torch.distributed.all_gather(posterior_probs, node_losses['posterior_probs' ])
self.torch.distributed.all_gather(aggregated_entropy, node_losses['aggregated_entropy' ])
self.torch.distributed.all_gather(expected_error_rate, node_losses['expected_error_rate'])
input_ids = self.torch.reshape(input_ids, (-1, input_ids.shape[-1]))
static_features = self.torch.reshape(static_features, (-1, static_features.shape[-1]))
runtime_features = self.torch.reshape(runtime_features, (-1, runtime_features.shape[-1]))
posterior_probs = self.torch.reshape(posterior_probs, (-1, posterior_probs.shape[-1]))
aggregated_entropy = self.torch.reshape(aggregated_entropy, (-1, aggregated_entropy.shape[-1]))
expected_error_rate = self.torch.reshape(expected_error_rate, (-1, expected_error_rate.shape[-1]))
expected_losses = {
'input_ids' : input_ids,
'static_features' : static_features,
'runtime_features' : runtime_features,
'posterior_probs' : posterior_probs,
'aggregated_entropy' : aggregated_entropy,
'expected_error_rate' : expected_error_rate,
}
else:
expected_losses = node_losses
expected_losses['input_ids'] = expected_losses['input_ids' ].cpu().numpy()
expected_losses['static_features'] = expected_losses['static_features' ].cpu().numpy()
expected_losses['runtime_features'] = expected_losses['runtime_features' ].cpu().numpy()
expected_losses['posterior_probs'] = expected_losses['posterior_probs' ].cpu().numpy()
expected_losses['aggregated_entropy'] = expected_losses['aggregated_entropy' ].cpu().numpy()
expected_losses['expected_error_rate'] = expected_losses['expected_error_rate'].cpu().numpy()
space_samples = []
for idx in range(len(expected_losses['input_ids'])):
space_samples.append({
'input_ids' : self.downstream_task.VecToInputFeatDict(expected_losses['input_ids' ][idx]),
'static_features' : self.downstream_task.VecToStaticFeatDict(expected_losses['static_features' ][idx]),
'runtime_features' : self.downstream_task.VecToRuntimeFeatDict(expected_losses['runtime_features'][idx]),
'posterior_probs' : expected_losses['posterior_probs' ][idx],
'aggregated_entropy' : expected_losses['aggregated_entropy' ][idx],
'expected_error_rate' : expected_losses['expected_error_rate'][idx],
})
return sorted(space_samples, key = lambda x: x['expected_error_rate'])
def saveCheckpoint(self,
estimator : 'ExpectedErrorReduction.Estimator',
current_step : int
) -> None:
"""
Saves model, scheduler, optimizer checkpoints per epoch.
"""
if self.is_world_process_zero():
ckpt_comp = lambda x: self.ckpt_path / "{}-{}.pt".format(x, current_step)
if self.torch_tpu_available:
if self.pytorch.torch_xla_model.rendezvous("saving_checkpoint"):
self.pytorch.torch_xla_model.save(estimator.model, ckpt_comp("model"))
self.pytorch.torch_xla.rendezvous("saving_optimizer_states")
self.pytorch.torch_xla.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.pytorch.torch_xla.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
else:
if isinstance(estimator.model, self.torch.nn.DataParallel):
self.torch.save(estimator.model.module.state_dict(), ckpt_comp("model"))
else:
self.torch.save(estimator.model.state_dict(), ckpt_comp("model"))
self.torch.save(estimator.optimizer.state_dict(), ckpt_comp("optimizer"))
self.torch.save(estimator.scheduler.state_dict(), ckpt_comp("scheduler"))
with open(self.ckpt_path / "checkpoint.meta", 'a') as mf:
mf.write("train_step: {}\n".format(current_step))
return
def loadCheckpoint(self, estimator: 'ExpectedErrorReduction.Estimator') -> int:
"""
Load model checkpoint. Loads either most recent epoch, or selected checkpoint through FLAGS.
"""
if not (self.ckpt_path / "checkpoint.meta").exists():
return -1
with open(self.ckpt_path / "checkpoint.meta", 'r') as mf:
key = "train_step"
get_step = lambda x: int(x.replace("\n", "").replace("{}: ".format(key), ""))
lines = mf.readlines()
entries = set({get_step(x) for x in lines if key in x})
if FLAGS.select_checkpoint_step == -1:
ckpt_step = max(entries)
else:
if FLAGS.select_checkpoint_step in entries:
ckpt_step = FLAGS.select_checkpoint_step
else:
raise ValueError("{} not found in checkpoint folder.".format(FLAGS.select_checkpoint_step))
ckpt_comp = lambda x: self.ckpt_path / "{}-{}.pt".format(x, ckpt_step)
if isinstance(estimator.model, self.torch.nn.DataParallel):
try:
estimator.model.module.load_state_dict(
self.torch.load(ckpt_comp("model")),
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = collections.OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
estimator.model.module.load_state_dict(new_state_dict)
else:
try:
estimator.model.load_state_dict(
self.torch.load(ckpt_comp("model")),
)
except RuntimeError:
"""
Pytorch doesn't love loading a DataParallel checkpoint
to a simple model. So, the following hack is needed
to remove the 'module.' prefix from state keys.
OR it might as well need the opposite. Transitioning from
single to multiple GPUs will mean that 'module.' prefix is missing
"""
new_state_dict = collections.OrderedDict()
for k, v in self.torch.load(ckpt_comp("model")).items():
if k[:7] == 'module.':
name = k[7:] # remove `module.`
else:
name = 'module.' + k # Add 'module.'
new_state_dict[name] = v
estimator.model.load_state_dict(new_state_dict)
if estimator.optimizer is not None and estimator.scheduler is not None and ckpt_step > 0:
estimator.optimizer.load_state_dict(
self.torch.load(ckpt_comp("optimizer"), map_location=self.pytorch.device)
)
estimator.scheduler.load_state_dict(
self.torch.load(ckpt_comp("scheduler"), map_location=self.pytorch.device)
)
estimator.model.eval()
return ckpt_step
def is_world_process_zero(self) -> bool:
"""
Whether or not this process is the global main process (when training in a distributed fashion on
several machines, this is only going to be :obj:`True` for one process).
"""
if self.torch_tpu_available:
return self.pytorch.torch_xla_model.is_master_ordinal(local=False)
elif self.pytorch.num_nodes > 1:
return self.torch.distributed.get_rank() == 0
else:
return True
def GetShortSummary(self) -> None:
return "Short Summary TODO" | 34,761 | 45.164675 | 203 | py |
BenchPress | BenchPress-master/deeplearning/benchpress/active_models/expected_error_reduction/optimizer.py | # coding=utf-8
# Copyright 2022 The Google AI Language Team Authors, The HuggingFace Inc. team and Foivos Tsimpourlas.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch optimization for BERT model."""
import math
import typing
from deeplearning.benchpress.util.pytorch import torch
def create_optimizer_and_scheduler(model,
num_train_steps: int,
warmup_steps: int,
learning_rate: float,
adam_beta1 = 0.9,
adam_beta2 = 0.999,
adam_epsilon = 1e-6,
weight_decay = 0.01,
):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.
"""
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = AdamW(
optimizer_grouped_parameters,
lr = learning_rate,
betas = (adam_beta1, adam_beta2),
eps = adam_epsilon,
)
lr_scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps = warmup_steps, num_training_steps = num_train_steps
)
return opt, lr_scheduler
def get_constant_schedule(optimizer: torch.optim.Optimizer, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate, using the learning rate set in optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
return torch.optim.lr_scheduler.LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
def get_constant_schedule_with_warmup(optimizer: torch.optim.Optimizer, num_warmup_steps: int, last_epoch: int = -1):
"""
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
increases linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The totale number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps))
)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: torch.optim.Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda, last_epoch)
class AdamW(torch.optim.Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in
`Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.
Parameters:
params (:obj:`typing.Iterable[torch.nn.parameter.Parameter]`):
typing.Iterable of parameters to optimize or dictionaries defining parameter groups.
lr (:obj:`float`, `optional`, defaults to 1e-3):
The learning rate to use.
betas (:obj:`typing.Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):
Adam's betas parameters (b1, b2).
eps (:obj:`float`, `optional`, defaults to 1e-6):
Adam's epsilon for numerical stability.
weight_decay (:obj:`float`, `optional`, defaults to 0):
Decoupled weight decay to apply.
correct_bias (:obj:`bool`, `optional`, defaults to `True`):
Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).
"""
def __init__(
self,
params: typing.Iterable[torch.nn.parameter.Parameter],
lr: float = 1e-3,
betas: typing.Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: typing.Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`typing.Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"])
return loss
| 12,214 | 39.989933 | 129 | py |
DeepGlow | DeepGlow-main/DeepGlow/DGmain.py | import numpy as np
from tensorflow import keras
import importlib.resources
class Emulator(object):
def __init__(self, simtype='ism'):
if simtype == 'ism':
with importlib.resources.path('DeepGlow', 'data') as data_path:
scale_path = data_path / "scale_facs_ism_final.csv"
scale_txt = scale_path.absolute().as_posix()
scale_facs = np.loadtxt(scale_txt)
self.Xmean = scale_facs[:-234][::2]
self.Xstd = scale_facs[:-234][1::2]
self.Ymean = scale_facs[-234:][::2]
self.Ystd = scale_facs[-234:][1::2]
model_path = data_path / "model-ism-final.hdf5"
model_hdf = model_path.absolute().as_posix()
self.NNmodel = keras.models.load_model(model_hdf, compile=False)
elif simtype == 'wind':
with importlib.resources.path('DeepGlow', 'data') as data_path:
scale_path = data_path / "scale_facs_wind_final.csv"
scale_txt = scale_path.absolute().as_posix()
scale_facs = np.loadtxt(scale_txt)
self.Xmean = scale_facs[:-234][::2]
self.Xstd = scale_facs[:-234][1::2]
self.Ymean = scale_facs[-234:][::2]
self.Ystd = scale_facs[-234:][1::2]
model_path = data_path / "model-wind-final.hdf5"
model_hdf = model_path.absolute().as_posix()
self.NNmodel = keras.models.load_model(model_hdf, compile=False)
# Fixed model parameters
self.Nparams = 8
self.nDP = 117
self.sdays = 60*60*24
self.tcomp = np.geomspace(0.1, 1000, self.nDP)*self.sdays
self.ref_d_L = 50*3.08567758 * 1e24
def flux(self, params, t_obs, nu_obs):
z = params[0]
log10_d_L_28_ = params[1]
log10_E_iso_53_ = params[2]
log10_n_ref_ = params[3]
theta_c = params[4]
theta_jn = params[5]*theta_c
p = params[6]
log10_eps_e_bar_ = params[7]
log10_eps_B_ = params[8]
log10_xi_N_ = params[9]
xi_N = 10**log10_xi_N_
E0 = (10**(log10_E_iso_53_)) * 1e53 * (xi_N)
d_L = (10**(log10_d_L_28_)) * 1e28
n0 = (10**(log10_n_ref_)) * (xi_N)
ee = (10**(log10_eps_e_bar_)) * ((p-1)/(p-2)) * (1.0/xi_N)
eB = (10**(log10_eps_B_)) * (1.0/xi_N)
t_obs = t_obs / (1+z)
nu_obs = nu_obs*(1+z)
if theta_jn == 0:
theta_jn = 1e-6
nu_unique = np.unique(nu_obs)
nu_inds = [np.where(nu_obs == nu)[0] for nu in nu_unique]
f_obs = np.zeros(len(t_obs))
inp_arr = np.zeros((len(nu_unique), self.Nparams))
inp_arr[:, :] = [(np.log10(E0), np.log10(theta_jn), np.log10(theta_c), np.log10(
n0), p, np.log10(ee), np.log10(eB), np.log10(nu)) for nu in nu_unique]
outY_unscaled = 10**((self.NNmodel((inp_arr - self.Xmean) /
self.Xstd)) * self.Ystd + self.Ymean).numpy()
for i, nu in enumerate(nu_unique):
t_nu = t_obs[nu_inds[i]]
dataOut = np.interp(t_nu, self.tcomp, outY_unscaled[i, :])
f_obs[nu_inds[i]] = dataOut
f_obs = f_obs*(1.0+z)/((d_L/self.ref_d_L)**2)
return f_obs
| 3,302 | 43.04 | 88 | py |
DeepGlow | DeepGlow-main/paper/boxfit_clrDLtrain.py | from CLR.clr_callback import CyclicLR
from sklearn.preprocessing import StandardScaler
import numpy as np
from tensorflow.keras.losses import MeanAbsoluteError
from keras import layers
import keras
from tensorflow.keras import backend as K
import tensorflow as tf
import pandas as pd
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_features = pd.read_csv(
'boxfitdata/boxfit_ism_final_trainfeatures.csv')
test_features = pd.read_csv(
'boxfitdata/boxfit_ism_final_testfeatures.csv')
train_labels = pd.read_csv(
'boxfitdata/boxfit_ism_final_trainlabels.csv')
test_labels = pd.read_csv(
'boxfitdata/boxfit_ism_final_testlabels.csv')
scaler_in = StandardScaler()
scaler_out = StandardScaler()
train_features_scaled = scaler_in.fit_transform(train_features)
train_labels_scaled = scaler_out.fit_transform(train_labels)
test_features_scaled = scaler_in.transform(test_features)
test_labels_scaled = scaler_out.transform(test_labels)
filepath = 'boxfitfinal/'
def masked_metric(y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
class CustomAccuracy(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
def build_and_compile_model():
model = keras.Sequential([
layers.Dense(
1000, input_dim=train_features_scaled.shape[1], activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(117, activation='linear')
])
model.compile(loss=CustomAccuracy(), metrics=[masked_metric],
optimizer=keras.optimizers.Nadam(0.001))
return model
trainingsizes = [int(len(train_features_scaled)/32), int(len(train_features_scaled)/16), int(
len(train_features_scaled)/8), int(len(train_features_scaled)/4), int(len(train_features_scaled)/2)]
for Ntrain in trainingsizes:
dnn_model = build_and_compile_model()
dnn_model.summary()
train_features_scaled_subset = train_features_scaled[0:Ntrain]
train_labels_scaled_subset = train_labels_scaled[0:Ntrain]
batch_size = 128
clr_step_size = int(4 * (len(train_features_scaled_subset)/batch_size))
base_lr = 1e-4
max_lr = 1e-2
mode = 'triangular2'
clr = CyclicLR(base_lr=base_lr, max_lr=max_lr,
step_size=clr_step_size, mode=mode)
history = dnn_model.fit(train_features_scaled_subset, train_labels_scaled_subset,
validation_split=0.0, batch_size=batch_size, verbose=1, epochs=200, callbacks=[clr])
dnn_model.save(filepath+'boxfit_ism_stdsc_'+str(Ntrain)+'.h5')
test_predictions_scaled = dnn_model.predict(test_features_scaled)
test_predictions_unscaled = scaler_out.inverse_transform(
test_predictions_scaled)
test_predictions = 10**test_predictions_unscaled
test_labels_lin = 10**test_labels
err = np.abs(test_predictions-test_labels_lin)/test_labels_lin
err = err.values.flatten()
print('errors <0.1: '+str(len(err[err < 0.1])/len(err)))
print('errors >0.2: '+str(len(err[err > 0.2])/len(err)))
print('median errors: '+str(np.nanmedian(err)))
| 4,003 | 36.773585 | 112 | py |
DeepGlow | DeepGlow-main/paper/boxfit_clrDLtrain_wind.py | from CLR.clr_callback import CyclicLR
from sklearn.preprocessing import StandardScaler
import numpy as np
from tensorflow.keras.losses import MeanAbsoluteError
from keras import layers
import keras
from tensorflow.keras import backend as K
import tensorflow as tf
import pandas as pd
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
matplotlib.use('Agg')
train_features = pd.read_csv(
'boxfitdata/boxfit_wind_final_trainfeatures.csv')
test_features = pd.read_csv(
'boxfitdata/boxfit_wind_final_testfeatures.csv')
train_labels = pd.read_csv(
'boxfitdata/boxfit_wind_final_trainlabels.csv')
test_labels = pd.read_csv(
'boxfitdata/boxfit_wind_final_testlabels.csv')
scaler_in = StandardScaler()
scaler_out = StandardScaler()
train_features_scaled = scaler_in.fit_transform(train_features)
test_features_scaled = scaler_in.transform(test_features)
train_labels_scaled = scaler_out.fit_transform(train_labels)
test_labels_scaled = scaler_out.transform(test_labels)
batch_size = 128
clr_step_size = int(4 * (len(train_features_scaled)/batch_size))
base_lr = 1e-4
max_lr = 1e-2
mode = 'triangular2'
clr = CyclicLR(base_lr=base_lr, max_lr=max_lr,
step_size=clr_step_size, mode=mode)
filepath = 'boxfitfinal/'
filepath_intermediate = 'boxfitfinal/model-wind-stdsc-{epoch:02d}.hdf5'
model_checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath_intermediate, monitor='loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=20)
def masked_metric(y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
class CustomAccuracy(tf.keras.losses.Loss):
def __init__(self):
super().__init__()
def call(self, y_true, y_pred):
mae = MeanAbsoluteError()
y_true_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_true), y_true)
y_pred_no_nan = tf.where(tf.math.is_nan(
y_true), tf.zeros_like(y_pred), y_pred)
non_zero_cor = tf.cast(tf.size(y_true_no_nan, out_type=tf.int32), dtype=tf.float32) / \
tf.math.count_nonzero(y_true_no_nan, dtype=tf.float32)
return mae(y_true_no_nan, y_pred_no_nan)*non_zero_cor
def build_and_compile_model():
model = keras.Sequential([
layers.Dense(
1000, input_dim=train_features_scaled.shape[1], activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(1000, activation='softplus'),
layers.Dense(117, activation='linear')
])
model.compile(loss=CustomAccuracy(), metrics=[masked_metric],
optimizer=keras.optimizers.Nadam(0.001))
return model
dnn_model = build_and_compile_model()
dnn_model.summary()
history = dnn_model.fit(train_features_scaled, train_labels_scaled,
validation_split=0.0, batch_size=batch_size, verbose=1, epochs=2000, callbacks=[clr, model_checkpoint_callback])
dnn_model.save(filepath+'boxfit_wind_final_stdsc.h5')
test_predictions_scaled = dnn_model.predict(test_features_scaled)
test_predictions_unscaled = scaler_out.inverse_transform(
test_predictions_scaled)
test_predictions = 10**test_predictions_unscaled
test_labels = 10**test_labels
err = np.abs(test_predictions-test_labels)/test_predictions
err = err.values.flatten()
print('errors <0.1: '+str(len(err[err < 0.1])/len(err)))
print('errors >0.2: '+str(len(err[err > 0.2])/len(err)))
print('median errors: '+str(np.nanmedian(err)))
| 3,833 | 35.169811 | 136 | py |
GL-AT | GL-AT-master/pytorch/inference.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
# Print embedding
if 'embedding' in batch_output_dict.keys():
embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
print('embedding: {}'.format(embedding.shape))
return clipwise_output, labels
def sound_event_detection(args):
"""Inference sound event detection result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
frames_per_second = sample_rate // hop_size
# Paths
fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))
create_folder(os.path.dirname(fig_path))
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
print('Sound event detection result (time_steps x classes_num): {}'.format(
framewise_output.shape))
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,
hop_length=hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
plt.savefig(fig_path)
print('Save sound event detection visualization to {}'.format(fig_path))
return framewise_output, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--checkpoint_path', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
elif args.mode == 'sound_event_detection':
sound_event_detection(args)
else:
raise Exception('Error argument!') | 7,172 | 34.161765 | 101 | py |
GL-AT | GL-AT-master/pytorch/main.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
from sklearn import metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import (create_folder, get_filename, create_logging, Mixup,
StatisticsContainer)
from models import *
from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
do_mixup)
from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler,
AlternateTrainSampler, EvaluateSampler, collate_fn)
from evaluate import Evaluator
import config
from losses import get_loss_func
def get_train_sampler(balanced):
"""Get train sampler.
Args:
balanced: str
augmentation: str
train_indexes_hdf5_path: str
black_list_csv: str
batch_size: int
Returns:
train_sampler: object
train_collector: object
"""
if balanced == 'none':
_Sampler = TrainSampler
elif balanced == 'balanced':
_Sampler = BalancedTrainSampler
elif balanced == 'alternate':
_Sampler = AlternateTrainSampler
def train(args):
"""Train AudioSet tagging model.
Args:
dataset_dir: str
workspace: str
data_type: 'balanced_train' | 'unbalanced_train'
frames_per_second: int
mel_bins: int
model_type: str
loss_type: 'clip_bce'
balanced: 'none' | 'balanced' | 'alternate'
augmentation: 'none' | 'mixup'
batch_size: int
learning_rate: float
resume_iteration: int
early_stop: int
accumulation_steps: int
cuda: bool
"""
# Arugments & parameters
workspace = args.workspace
data_type = args.data_type
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
loss_type = args.loss_type
balanced = args.balanced
augmentation = args.augmentation
batch_size = args.batch_size
learning_rate = args.learning_rate
resume_iteration = args.resume_iteration
early_stop = args.early_stop
N = args.N
length = args.length
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
filename = args.filename
num_workers = 8
sample_rate = config.sample_rate
clip_samples = config.clip_samples
classes_num = config.classes_num
loss_func = get_loss_func(loss_type)
# Paths
black_list_csv = None
train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'{}.h5'.format(data_type))
eval_bal_indexes_hdf5_path = os.path.join(workspace,
'hdf5s', 'indexes', 'balanced_train.h5')
eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'eval.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length))
create_folder(checkpoints_dir)
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length),
'statistics.pkl')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'N={},length={}'.format(N,length))
create_logging(logs_dir, filemode='w')
logging.info(args)
if 'cuda' in str(device):
logging.info('Using GPU.')
device = 'cuda'
else:
logging.info('Using CPU.')
device = 'cpu'
# Model
Model_G = eval(model_type)
model_G = Model_G(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Wavegram_Logmel_Cnn14_mAP=0.439.pth')['model'])
# model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth')['model'])
# model_G.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/ResNet38_mAP=0.434.pth')['model'])
Model = eval(model_type+'_local')
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num, N=N, length=length)
params_num = count_parameters(model)
# flops_num = count_flops(model, clip_samples)
logging.info('Parameters num: {}'.format(params_num))
# logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
# Dataset will be used by DataLoader later. Dataset takes a meta as input
# and return a waveform and a target.
dataset = AudioSetDataset(clip_samples=clip_samples, classes_num=classes_num)
# Train sampler
if balanced == 'none':
Sampler = TrainSampler
elif balanced == 'balanced':
Sampler = BalancedTrainSampler
elif balanced == 'alternate':
Sampler = AlternateTrainSampler
train_sampler = Sampler(
indexes_hdf5_path=train_indexes_hdf5_path,
batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size,
black_list_csv=black_list_csv)
# Evaluate sampler
eval_bal_sampler = EvaluateSampler(
indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size)
eval_test_sampler = EvaluateSampler(
indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=train_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_bal_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
if 'mixup' in augmentation:
mixup_augmenter = Mixup(mixup_alpha=1.)
# Evaluator
evaluator = Evaluator(model=model,model_G=model_G)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
# Optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate,
betas=(0.9, 0.999), eps=1e-08, weight_decay=0., amsgrad=True)
train_bgn_time = time.time()
# Resume training
if resume_iteration > 0:
resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(resume_iteration))
logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(checkpoint['model'])
train_sampler.load_state_dict(checkpoint['sampler'])
statistics_container.load_state_dict(resume_iteration)
iteration = checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
model_G.to(device)
time1 = time.time()
for batch_data_dict in train_loader:
"""batch_data_dict: {
'audio_name': (batch_size [*2 if mixup],),
'waveform': (batch_size [*2 if mixup], clip_samples),
'target': (batch_size [*2 if mixup], classes_num),
(ifexist) 'mixup_lambda': (batch_size * 2,)}
"""
# Evaluate
if (iteration % 2000 == 0 and iteration > resume_iteration):
train_fin_time = time.time()
bal_statistics = evaluator.evaluate(eval_bal_loader)
test_statistics = evaluator.evaluate(eval_test_loader)
logging.info('Validate bal mAP: {:.3f}'.format(
np.mean(bal_statistics['average_precision'])))
logging.info('Validate test mAP: {:.3f}'.format(
np.mean(test_statistics['average_precision'])))
statistics_container.append(iteration, bal_statistics, data_type='bal')
statistics_container.append(iteration, test_statistics, data_type='test')
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
''.format(iteration, train_time, validate_time))
logging.info('------------------------------------')
train_bgn_time = time.time()
# Save model
if iteration % 2000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'sampler': train_sampler.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Mixup lambda
if 'mixup' in augmentation:
batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
batch_size=len(batch_data_dict['waveform']))
# Move data to device
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
model_G.eval()
if 'mixup' in augmentation:
batch_output_dict_G = model_G(batch_data_dict['waveform'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
else:
batch_output_dict_G = model_G(batch_data_dict['waveform'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
# Forward
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'], batch_output_dict_G['clipwise_output'], batch_output_dict_G['feature_map'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': do_mixup(batch_data_dict['target'],
batch_data_dict['mixup_lambda'])}
"""{'target': (batch_size, classes_num)}"""
else:
batch_output_dict = model(batch_data_dict['waveform'], batch_output_dict_G['clipwise_output'], batch_output_dict_G['feature_map'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': batch_data_dict['target']}
"""{'target': (batch_size, classes_num)}"""
# Loss
loss = loss_func(batch_output_dict, batch_target_dict)
# Backward
loss.backward()
# print(loss)
optimizer.step()
optimizer.zero_grad()
if iteration % 400 == 0:
print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\
.format(iteration, time.time() - time1))
time1 = time.time()
iteration += 1
# Stop learning
if iteration == early_stop:
break
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--workspace', type=str, required=True)
parser_train.add_argument('--data_type', type=str, default='full_train', choices=['balanced_train', 'full_train'])
parser_train.add_argument('--window_size', type=int, default=1024)
parser_train.add_argument('--hop_size', type=int, default=320)
parser_train.add_argument('--mel_bins', type=int, default=64)
parser_train.add_argument('--fmin', type=int, default=50)
parser_train.add_argument('--fmax', type=int, default=14000)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce'])
parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate'])
parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup'])
parser_train.add_argument('--batch_size', type=int, default=32)
parser_train.add_argument('--N', type=int, default=5)
parser_train.add_argument('--length', type=int, default=2)
parser_train.add_argument('--learning_rate', type=float, default=1e-3)
parser_train.add_argument('--resume_iteration', type=int, default=0)
parser_train.add_argument('--early_stop', type=int, default=1000000)
parser_train.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | 15,044 | 37.676093 | 148 | py |
GL-AT | GL-AT-master/pytorch/losses.py | import torch
import torch.nn.functional as F
def clip_bce(output_dict, target_dict):
"""Binary crossentropy loss.
"""
return F.binary_cross_entropy(
output_dict['local_prob'], target_dict['target'])
def get_loss_func(loss_type):
if loss_type == 'clip_bce':
return clip_bce | 308 | 21.071429 | 57 | py |
GL-AT | GL-AT-master/pytorch/test.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
sample_rate = config.sample_rate
classes_num = config.classes_num
labels = config.labels
# Model
checkpoint_path = '/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth'
checkpoint_path_2 = '/data/dean/audioset_tagging_cnn/workspaces/checkpoints/main/sample_rate=32000,window_size=1024,hop_size=320,mel_bins=64,fmin=50,fmax=14000/data_type=full_train/Cnn10/loss_type=clip_bce/balanced=balanced/augmentation=none/batch_size=32/N=5,length=2/198000_iterations.pth'
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
Model_2 = eval(model_type+'_local')
model_2 = Model_2(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num, N=5, length=2)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
checkpoint_2 = torch.load(checkpoint_path_2, map_location=device)
model_2.load_state_dict(checkpoint_2['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model_2 = torch.nn.DataParallel(model_2)
if 'cuda' in str(device):
model.to(device)
model_2.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform_ = waveform
# stft_ = librosa.core.stft(y=waveform,n_fft=window_size,hop_length=hop_size).T
# melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
# mel_spec = np.dot(np.abs(stft_)**2,melW)
# logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
# logmel = logmel.astype(np.float32)
# logmel = np.transpose(logmel,(1,0))
# plt.imshow(logmel,cmap=plt.cm.jet)
# plt.axis('off')
# fig = plt.gcf()
# height,width=logmel.shape
# fig.set_size_inches(width/40.,height/40.)
# plt.gca().xaxis.set_major_locator(plt.NullLocator())
# plt.gca().yaxis.set_major_locator(plt.NullLocator())
# plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
# plt.margins(0,0)
# plt.savefig('waveform.png',dpi=200,pad_inches=0)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
model_2.eval()
batch_output_dict_2 = model(waveform, None)
batch_output_dict = model_2(waveform, batch_output_dict_2['clipwise_output'], batch_output_dict_2['feature_map'], None)
clipwise_output = batch_output_dict['prob'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
waveform_1 = waveform_[109395:173395]
stft_ = librosa.core.stft(y=waveform_1,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform1.png',dpi=200,pad_inches=0)
waveform_2 = waveform_[34976:98976]
stft_ = librosa.core.stft(y=waveform_2,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform2.png',dpi=200,pad_inches=0)
waveform_3 = waveform_[146604:210604]
stft_ = librosa.core.stft(y=waveform_3,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform3.png',dpi=200,pad_inches=0)
waveform_4 = waveform_[49860:113860]
stft_ = librosa.core.stft(y=waveform_4,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform4.png',dpi=200,pad_inches=0)
waveform_5 = waveform_[5209:69209]
stft_ = librosa.core.stft(y=waveform_5,n_fft=window_size,hop_length=hop_size).T
melW = librosa.filters.mel(sr=sample_rate,n_fft=window_size,n_mels=mel_bins,fmin=fmin,fmax=fmax).T
mel_spec = np.dot(np.abs(stft_)**2,melW)
logmel = librosa.core.power_to_db(mel_spec,ref=1.0,amin=1e-10,top_db=None)
logmel = logmel.astype(np.float32)
logmel = np.transpose(logmel,(1,0))
plt.imshow(logmel,cmap=plt.cm.jet)
plt.axis('off')
fig = plt.gcf()
height,width=logmel.shape
fig.set_size_inches(width/40.,height/40.)
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1,bottom=0,right=1,left=0,hspace=0,wspace=0)
plt.margins(0,0)
plt.savefig('waveform5.png',dpi=200,pad_inches=0)
return clipwise_output, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
else:
raise Exception('Error argument!') | 9,038 | 40.847222 | 295 | py |
GL-AT | GL-AT-master/pytorch/evaluate.py | from sklearn import metrics
from pytorch_utils import forward
class Evaluator(object):
def __init__(self, model, model_G):
"""Evaluator.
Args:
model: object
"""
self.model = model
self.model_G = model_G
def evaluate(self, data_loader):
"""Forward evaluation data and calculate statistics.
Args:
data_loader: object
Returns:
statistics: dict,
{'average_precision': (classes_num,), 'auc': (classes_num,)}
"""
# Forward
output_dict = forward(
model=self.model,
model_G=self.model_G,
generator=data_loader,
return_target=True)
clipwise_output = output_dict['prob'] # (audios_num, classes_num)
target = output_dict['target'] # (audios_num, classes_num)
average_precision = metrics.average_precision_score(
target, clipwise_output, average=None)
auc = metrics.roc_auc_score(target, clipwise_output, average=None)
statistics = {'average_precision': average_precision, 'auc': auc}
return statistics | 1,176 | 25.75 | 76 | py |
GL-AT | GL-AT-master/pytorch/finetune_template.py | import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import get_filename
from models import *
import config
class Transfer_Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, freeze_base):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(Transfer_Cnn14, self).__init__()
audioset_classes_num = 527
self.base = Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, audioset_classes_num)
# Transfer to another task layer
self.fc_transfer = nn.Linear(2048, classes_num, bias=True)
if freeze_base:
# Freeze AudioSet pretrained layers
for param in self.base.parameters():
param.requires_grad = False
self.init_weights()
def init_weights(self):
init_layer(self.fc_transfer)
def load_from_pretrain(self, pretrained_checkpoint_path):
checkpoint = torch.load(pretrained_checkpoint_path)
self.base.load_state_dict(checkpoint['model'])
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, data_length)
"""
output_dict = self.base(input, mixup_lambda)
embedding = output_dict['embedding']
clipwise_output = torch.log_softmax(self.fc_transfer(embedding), dim=-1)
output_dict['clipwise_output'] = clipwise_output
return output_dict
def train(args):
# Arugments & parameters
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
pretrained_checkpoint_path = args.pretrained_checkpoint_path
freeze_base = args.freeze_base
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
sample_rate = config.sample_rate
classes_num = config.classes_num
pretrain = True if pretrained_checkpoint_path else False
# Model
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num, freeze_base)
# Load pretrained model
if pretrain:
logging.info('Load pretrained model from {}'.format(pretrained_checkpoint_path))
model.load_from_pretrain(pretrained_checkpoint_path)
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
print('Load pretrained model successfully!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
# Train
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--window_size', type=int, required=True)
parser_train.add_argument('--hop_size', type=int, required=True)
parser_train.add_argument('--mel_bins', type=int, required=True)
parser_train.add_argument('--fmin', type=int, required=True)
parser_train.add_argument('--fmax', type=int, required=True)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--pretrained_checkpoint_path', type=str)
parser_train.add_argument('--freeze_base', action='store_true', default=False)
parser_train.add_argument('--cuda', action='store_true', default=False)
# Parse arguments
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | 3,979 | 30.587302 | 88 | py |
GL-AT | GL-AT-master/pytorch/models.py | import os
import sys
import math
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from pytorch_utils import do_mixup, interpolate, pad_framewise_output
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Cnn10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
feature_map = x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class Cnn10_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(Cnn10_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = Cnn10(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Cnn10_mAP=0.380.pth')['model'])
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
def _resnet_conv3x3(in_planes, out_planes):
#3x3 convolution with padding
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, groups=1, bias=False, dilation=1)
def _resnet_conv1x1(in_planes, out_planes):
#1x1 convolution
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, bias=False)
class _ResnetBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(_ResnetBasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('_ResnetBasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in _ResnetBasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.stride = stride
self.conv1 = _resnet_conv3x3(inplanes, planes)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = _resnet_conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn1)
init_layer(self.conv2)
init_bn(self.bn2)
nn.init.constant_(self.bn2.weight, 0)
def forward(self, x):
identity = x
if self.stride == 2:
out = F.avg_pool2d(x, kernel_size=(2, 2))
else:
out = x
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = F.dropout(out, p=0.1, training=self.training)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class _ResnetBottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(_ResnetBottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.stride = stride
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = _resnet_conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = _resnet_conv3x3(width, width)
self.bn2 = norm_layer(width)
self.conv3 = _resnet_conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn1)
init_layer(self.conv2)
init_bn(self.bn2)
init_layer(self.conv3)
init_bn(self.bn3)
nn.init.constant_(self.bn3.weight, 0)
def forward(self, x):
identity = x
if self.stride == 2:
x = F.avg_pool2d(x, kernel_size=(2, 2))
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = F.dropout(out, p=0.1, training=self.training)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class _ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(_ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
if stride == 1:
downsample = nn.Sequential(
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[0])
init_bn(downsample[1])
elif stride == 2:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=2),
_resnet_conv1x1(self.inplanes, planes * block.expansion),
norm_layer(planes * block.expansion),
)
init_layer(downsample[1])
init_bn(downsample[2])
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNet38(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(ResNet38, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
# self.conv_block2 = ConvBlock(in_channels=64, out_channels=64)
self.resnet = _ResNet(block=_ResnetBasicBlock, layers=[3, 4, 6, 3], zero_init_residual=True)
self.conv_block_after1 = ConvBlock(in_channels=512, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = self.resnet(x)
x = F.avg_pool2d(x, kernel_size=(2, 2))
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = self.conv_block_after1(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training, inplace=True)
x = torch.mean(x, dim=3)
feature_map=x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class ResNet38_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(ResNet38_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = ResNet38(sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/ResNet38_mAP=0.434.pth')['model'])
self.fc1 = nn.Linear(2048, 2048)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
class ConvPreWavBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvPreWavBlock, self).__init__()
self.conv1 = nn.Conv1d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=3, stride=1,
padding=1, bias=False)
self.conv2 = nn.Conv1d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=3, stride=1, dilation=2,
padding=2, bias=False)
self.bn1 = nn.BatchNorm1d(out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
x = F.max_pool1d(x, kernel_size=pool_size)
return x
class Wavegram_Logmel_Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(Wavegram_Logmel_Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.pre_conv0 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=11, stride=5, padding=5, bias=False)
self.pre_bn0 = nn.BatchNorm1d(64)
self.pre_block1 = ConvPreWavBlock(64, 64)
self.pre_block2 = ConvPreWavBlock(64, 128)
self.pre_block3 = ConvPreWavBlock(128, 128)
self.pre_block4 = ConvBlock(in_channels=4, out_channels=64)
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=128, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_layer(self.pre_conv0)
init_bn(self.pre_bn0)
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
a1 = F.relu_(self.pre_bn0(self.pre_conv0(input[:, None, :])))
a1 = self.pre_block1(a1, pool_size=4)
a1 = self.pre_block2(a1, pool_size=4)
a1 = self.pre_block3(a1, pool_size=4)
a1 = a1.reshape((a1.shape[0], -1, 32, a1.shape[-1])).transpose(2, 3)
a1 = self.pre_block4(a1, pool_size=(2, 1))
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# if self.training:
# x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
a1 = do_mixup(a1, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = torch.cat((x, a1), dim=1)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
feature_map = x
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'feature_map': feature_map}
return output_dict
class Wavegram_Logmel_Cnn14_local(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, N, length):
super(Wavegram_Logmel_Cnn14_local, self).__init__()
self.N = N
self.length = length
self.length_all = 10
self.duration = int(sample_rate*self.length)
self.duration_all = int(sample_rate*self.length_all)
self.local_net = Wavegram_Logmel_Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num)
self.local_net.load_state_dict(torch.load('/data/dean/panns/audioset_tagging_cnn/pytorch/Wavegram_Logmel_Cnn14_mAP=0.439.pth')['model'])
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
for name, module in self.local_net._modules.items():
if name == 'fc1':
self.fc1 = module
if name == 'fc_audioset':
self.fc_audioset = module
def forward(self, input, global_prob, feature_map, mixup_lambda=None):
"""
Input: (batch_size, data_length)""" #64, 1, 1001, 64
x = input #64, 320000
sorted, indices = torch.sort(global_prob, dim=1, descending=True)
indices = indices[:,:self.N]#bs,N
feature_map = feature_map.transpose(1, 2)
feature_map = F.dropout(feature_map, p=0.5, training=self.training)
embedding_L = F.relu_(self.fc1(feature_map))
embedding_L = F.dropout(embedding_L, p=0.5, training=self.training)
frame_prob = torch.sigmoid(self.fc_audioset(embedding_L))
# frame_prob = torch.clamp(frame_prob, 1e-7, 1 - 1e-7)#bs,T,527
frame_prob = frame_prob.transpose(1,2)#bs,527,T
maps = torch.zeros(frame_prob.size(0),self.N,frame_prob.size(2)).cuda()#bs,N,T
for i in range(indices.size(0)):
maps[i] = torch.index_select(frame_prob[i],0,indices[i])
local_regions = self.region_select(maps,x)
local_ = local_regions.view(local_regions.size(0)*local_regions.size(1),local_regions.size(2))
local_prob = self.local_net(local_,mixup_lambda)['clipwise_output']
local_prob = local_prob.view(local_regions.size(0),local_regions.size(1),-1)
(local_prob1, _) = torch.max(local_prob, dim=1)
local_prob2 = torch.mean(local_prob, dim=1)
local_prob = 0.5*(local_prob1 + local_prob2)
# local_prob = torch.clamp(local_prob, 1e-7, 1 - 1e-7) # bs,527
prob = torch.cat([global_prob[:,None,:],local_prob[:,None,:]],1)
(prob1, _) = torch.max(prob, dim=1)
prob2 = torch.mean(prob,dim=1)
prob = 0.5*(prob1 + prob2)
# prob = torch.clamp(prob, 1e-7, 1 - 1e-7)
output_dict = {'local_prob': local_prob, 'prob': prob}
return output_dict
def region_select(self, maps, x):
local_regions = torch.zeros(x.size(0),self.N, self.duration).cuda()#bs,N,T,F
over_range = int(self.duration*maps.size(-1)/self.duration_all)//4
for i in range(maps.size(0)):
for j in range(maps.size(1)):
map_ = maps[i,j]
(_,max_index)=torch.max(map_,dim=0)
index_ = max_index
max_index = int(self.duration_all*max_index/len(map_))
index_l = max_index-self.duration//2
index_r = max_index+self.duration//2
if index_r >= self.duration_all:
local_regions[i,j,:] = x[i,self.duration_all-self.duration:self.duration_all]
# print(self.duration_all-self.duration,self.duration_all)
maps[i,:,-2*over_range:]=0.
elif index_l < 0:
local_regions[i,j,:] = x[i,:self.duration]
# print(0,self.duration)
maps[i,:,:2*over_range]=0.
else:
local_regions[i,j,:] = x[i,index_l:index_r]
# print(index_l,index_r)
maps[i,:,index_-over_range:index_+over_range]=0.
return local_regions
| 33,592 | 38.708038 | 144 | py |
GL-AT | GL-AT-master/pytorch/pytorch_utils.py | import numpy as np
import time
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, model_G, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
time1 = time.time()
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
# print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
model_G.eval()
batch_output_G = model_G(batch_waveform)
batch_output = model(batch_waveform,batch_output_G['clipwise_output'],batch_output_G['feature_map'])
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'prob',
batch_output['prob'].data.cpu().numpy())
if 'segmentwise_output' in batch_output.keys():
append_to_dict(output_dict, 'segmentwise_output',
batch_output['segmentwise_output'].data.cpu().numpy())
if 'framewise_output' in batch_output.keys():
append_to_dict(output_dict, 'framewise_output',
batch_output['framewise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
if n % 1000 == 0:
print(' --- Inference time: {:.3f} s / 1000 iterations ---'.format(
time.time() - time1))
time1 = time.time()
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops | 8,446 | 32.387352 | 127 | py |
gbm-bench | gbm-bench-master/algorithms.py | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from abc import ABC, abstractmethod
import time
import pandas as pd
import numpy as np
import dask.dataframe as dd
import dask.array as da
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
import xgboost as xgb
try:
import catboost as cat
except ImportError:
cat = None
try:
import lightgbm as lgb
except (ImportError, OSError):
lgb = None
try:
import dask_xgboost as dxgb
except ImportError:
dxgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier as skhgb
except ImportError:
skhgb = None
try:
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor as skhgb_r
except ImportError:
skhgb_r = None
try:
from sklearn.ensemble import GradientBoostingClassifier as skgb
except ImportError:
skgb = None
try:
from sklearn.ensemble import GradientBoostingRegressor as skgb_r
except ImportError:
skgb_r = None
try:
from sklearn.ensemble import RandomForestClassifier as skrf
except ImportError:
skrf = None
try:
from sklearn.ensemble import RandomForestRegressor as skrf_r
except ImportError:
skrf_r = None
try:
from cuml.ensemble import RandomForestClassifier as cumlrf
except ImportError:
cumlrf = None
try:
from cuml.ensemble import RandomForestRegressor as cumlrf_r
except ImportError:
cumlrf_r = None
from datasets import LearningTask
class Timer:
def __init__(self):
self.start = None
self.end = None
self.interval = None
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
class Algorithm(ABC):
@staticmethod
def create(name): # pylint: disable=too-many-return-statements
if name == 'xgb-gpu':
return XgbGPUHistAlgorithm()
if name == 'xgb-gpu-dask':
return XgbGPUHistDaskAlgorithm()
if name == 'xgb-gpu-dask-old':
return XgbGPUHistDaskOldAlgorithm()
if name == 'xgb-cpu':
return XgbCPUHistAlgorithm()
if name == 'lgbm-cpu':
return LgbmCPUAlgorithm()
if name == 'lgbm-gpu':
return LgbmGPUAlgorithm()
if name == 'cat-cpu':
return CatCPUAlgorithm()
if name == 'cat-gpu':
return CatGPUAlgorithm()
if name == 'skhgb':
return SkHistAlgorithm()
if name == 'skgb':
return SkGradientAlgorithm()
if name == 'skrf':
return SkRandomForestAlgorithm()
if name == 'cumlrf':
return CumlRfAlgorithm()
raise ValueError("Unknown algorithm: " + name)
def __init__(self):
self.model = None
@abstractmethod
def fit(self, data, args):
pass
@abstractmethod
def test(self, data):
pass
def __enter__(self):
pass
@abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
pass
# learning parameters shared by all algorithms, using the xgboost convention
shared_params = {"max_depth": 8, "learning_rate": 0.1,
"reg_lambda": 1}
class CumlRfAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = cumlrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = cumlrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "reg:squarederror"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary:logistic"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multi:softmax"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = xgb.DMatrix(data.X_train, data.y_train)
params = self.configure(data, args)
with Timer() as t:
self.model = xgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "gpu_id": 0})
return params
class SkRandomForestAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skrf_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skrf(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkGradientAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class SkHistAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
del params["reg_lambda"]
del params["learning_rate"]
params["n_estimators"] = args.ntrees
params.update(args.extra)
return params
def fit(self, data, args):
params = self.configure(data, args)
if data.learning_task == LearningTask.REGRESSION:
with Timer() as t:
self.model = skhgb_r(**params).fit(data.X_train, data.y_train)
return t.interval
else:
with Timer() as t:
self.model = skhgb(**params).fit(data.X_train, data.y_train)
return t.interval
def test(self, data):
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist"})
del params['nthread'] # This is handled by dask
return params
def get_slices(self, n_slices, X, y):
n_rows_worker = int(np.ceil(len(y) / n_slices))
indices = []
count = 0
for _ in range(0, n_slices - 1):
indices.append(min(count + n_rows_worker, len(y)))
count += n_rows_worker
return np.split(X, indices), np.split(y, indices)
def fit(self, data, args):
params = self.configure(data, args)
n_workers = None if args.gpus < 0 else args.gpus
cluster = LocalCUDACluster(n_workers=n_workers,
local_directory=args.root)
client = Client(cluster)
n_partitions = len(client.scheduler_info()['workers'])
X_sliced, y_sliced = self.get_slices(n_partitions,
data.X_train, data.y_train)
X = da.concatenate([da.from_array(sub_array) for sub_array in X_sliced])
X = X.rechunk((X_sliced[0].shape[0], data.X_train.shape[1]))
y = da.concatenate([da.from_array(sub_array) for sub_array in y_sliced])
y = y.rechunk(X.chunksize[0])
dtrain = xgb.dask.DaskDMatrix(client, X, y)
with Timer() as t:
output = xgb.dask.train(client, params, dtrain, num_boost_round=args.ntrees)
self.model = output['booster']
client.close()
cluster.close()
return t.interval
def test(self, data):
dtest = xgb.DMatrix(data.X_test, data.y_test)
self.model.set_param({'predictor': 'gpu_predictor'})
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbGPUHistDaskOldAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbGPUHistDaskOldAlgorithm, self).configure(data, args)
params.update({"tree_method": "gpu_hist", "nthread": 1})
return params
def fit(self, data, args):
params = self.configure(data, args)
cluster = LocalCUDACluster(n_workers=None if args.gpus < 0 else args.gpus,
local_directory=args.root)
client = Client(cluster)
partition_size = 1000
if isinstance(data.X_train, np.ndarray):
X = dd.from_array(data.X_train, partition_size)
y = dd.from_array(data.y_train, partition_size)
else:
X = dd.from_pandas(data.X_train, partition_size)
y = dd.from_pandas(data.y_train, partition_size)
X.columns = [str(i) for i in range(0, X.shape[1])]
with Timer() as t:
self.model = dxgb.train(client, params, X, y, num_boost_round=args.ntrees)
client.close()
return t.interval
def test(self, data):
if isinstance(data.X_test, np.ndarray):
data.X_test = pd.DataFrame(data=data.X_test, columns=np.arange(0,
data.X_test.shape[1]),
index=np.arange(0, data.X_test.shape[0]))
data.X_test.columns = [str(i) for i in range(0, data.X_test.shape[1])]
dtest = xgb.DMatrix(data.X_test, data.y_test)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class XgbCPUHistAlgorithm(XgbAlgorithm):
def configure(self, data, args):
params = super(XgbCPUHistAlgorithm, self).configure(data, args)
params.update({"tree_method": "hist"})
return params
class LgbmAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({"max_leaves": 256,
"nthread": args.cpus})
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "regression"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "binary"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "multiclass"
params["num_class"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = lgb.Dataset(data.X_train, data.y_train,
free_raw_data=False)
params = self.configure(data, args)
with Timer() as t:
self.model = lgb.train(params, dtrain, args.ntrees)
return t.interval
def test(self, data):
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(data.X_test)
return np.argmax(prob, axis=1)
return self.model.predict(data.X_test)
def __exit__(self, exc_type, exc_value, traceback):
self.model.free_dataset()
del self.model
class LgbmCPUAlgorithm(LgbmAlgorithm):
pass
class LgbmGPUAlgorithm(LgbmAlgorithm):
def configure(self, data, args):
params = super(LgbmGPUAlgorithm, self).configure(data, args)
params.update({"device": "gpu"})
return params
class CatAlgorithm(Algorithm):
def configure(self, data, args):
params = shared_params.copy()
params.update({
"thread_count": args.cpus})
if args.gpus >= 0:
params["devices"] = "0-" + str(args.gpus)
if data.learning_task == LearningTask.REGRESSION:
params["objective"] = "RMSE"
elif data.learning_task == LearningTask.CLASSIFICATION:
params["objective"] = "Logloss"
params["scale_pos_weight"] = len(data.y_train) / np.count_nonzero(data.y_train)
elif data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
params["objective"] = "MultiClassOneVsAll"
params["classes_count"] = np.max(data.y_test) + 1
params.update(args.extra)
return params
def fit(self, data, args):
dtrain = cat.Pool(data.X_train, data.y_train)
params = self.configure(data, args)
params["iterations"] = args.ntrees
self.model = cat.CatBoost(params)
with Timer() as t:
self.model.fit(dtrain)
return t.interval
def test(self, data):
dtest = cat.Pool(data.X_test)
if data.learning_task == LearningTask.MULTICLASS_CLASSIFICATION:
prob = self.model.predict(dtest)
return np.argmax(prob, axis=1)
return self.model.predict(dtest)
def __exit__(self, exc_type, exc_value, traceback):
del self.model
class CatCPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatCPUAlgorithm, self).configure(data, args)
params.update({"task_type": "CPU"})
return params
class CatGPUAlgorithm(CatAlgorithm):
def configure(self, data, args):
params = super(CatGPUAlgorithm, self).configure(data, args)
params.update({"task_type": "GPU"})
return params
| 17,038 | 34.204545 | 97 | py |
Subsets and Splits